code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import collections
from abc import ABC
from typing import Tuple, List, Dict
import numpy as np
class StepInformationProvider(ABC):
"""
This class calculates certain values which are used frequently in reward generators.
A single instance of this class can be shared between a set of (sub)generators
to prevent multiple calculations of costly intermediate results.
:param maze: (np.ndarray) two dimensional array defining the maze where 0 indicates passable terrain and 1 indicates an obstacle
:param goal: (list) A point coordinate in form [x, y] ([column, row]) defining the goal.
:param goal_range: (int) Range around the goal position which should be treated as 'goal reached'.
:param n_particles: (int) Total number of robots.
:param action_map: (dict) Map containing allowed actions.
"""
def __init__(
self,
maze: np.ndarray,
goal: Tuple[int, int],
goal_range: int,
n_particles: int,
action_map: Dict[int, Tuple[int, int]],
relative: bool = False,
):
self.goal_range = goal_range
self.n_particles = n_particles
self.initial_robot_locations = None
self.action_map = action_map
self.maze = maze
self.goal = goal
self.relative = relative
self.last_locations = None
self.last_action = None
self._cost = None
self._max_start_cost = None
self._max_cost = None
self._particle_cost = None
self._total_start_cost = None
self._total_cost = None
self._unique_particles = None
self._done = False
self._step_reward = 0.0
self._mean_cost = None
self._ep_len_estimate = None
self._convex_corners = None
def reset(self, locations):
self.initial_robot_locations = np.copy(locations)
self.last_locations = locations
self._done = False
if self.relative:
self._ep_len_estimate = None
self._total_start_cost = None
self._max_start_cost = None
def step(self, action, locations):
self._step_reward = 0.0
self.last_locations = locations
self.last_action = action
self._step_reset()
def stepped_generator(self, done, reward):
self._step_reward += reward
if done:
self._done = True
def _step_reset(self):
self._max_cost = None
self._particle_cost = None
self._total_cost = None
self._unique_particles = None
def _calculate_cost_map(self, maze, goal) -> np.ndarray:
"""
Calculates the cost map based on a given goal position via bfs
"""
queue = collections.deque([goal]) # [x, y] pairs in point notation order!
seen = np.zeros(maze.shape, dtype=int)
seen[goal[1], goal[0]] = 1
cost = np.zeros(maze.shape, dtype=int)
height, width = maze.shape
while queue:
x, y = queue.popleft()
for action in self.action_map.values():
x2, y2 = x + action[1], y + action[0]
if (
0 <= x2 < width
and 0 <= y2 < height
and maze[y2, x2] != 1
and seen[y2, x2] != 1
):
queue.append([x2, y2])
seen[y2, x2] = 1
cost[y2, x2] = cost[y, x] + 1
return cost
def _count_convex_corners(self) -> Tuple[int, int, int, int]:
"""
Calculates the number of convex corners
:return: (Tuple[int, int, int, int]) Tuple containing the number of convex corners for nw, ne, sw and se convex corners.
"""
nw = ne = sw = se = 0
for ix, iy in np.ndindex(self.maze.shape):
if self.maze[ix, iy] == 0:
if self.maze[ix + 1, iy] == 1 and self.maze[ix, iy + 1] == 1:
sw += 1
elif self.maze[ix + 1, iy] == 1 and self.maze[ix, iy - 1] == 1:
se += 1
elif self.maze[ix - 1, iy] == 1 and self.maze[ix, iy + 1] == 1:
nw += 1
elif self.maze[ix - 1, iy] == 1 and self.maze[ix, iy - 1] == 1:
ne += 1
return (nw, ne, sw, se)
def _count_freespace(self):
free = 0
idxes = np.argwhere(self.maze == 0)
for iy, ix in idxes:
if (self.maze[iy - 1 : iy + 1, ix - 1 : ix + 1] == 0).all():
free += 1
return free
def set_particle_count(self, n_particles):
self.n_particles = n_particles
self._total_start_cost = None
@property
def convex_corners(self) -> Tuple[int, int, int, int]:
if self._convex_corners is None:
self._convex_corners = self._count_convex_corners()
return self._convex_corners
@property
def costmap(self) -> np.ndarray:
if self._cost is None:
self._cost = self._calculate_cost_map(self.maze, self.goal)
return self._cost
@property
def max_start_cost(self) -> float:
if self._max_start_cost is None:
if self.relative:
self._max_start_cost = np.max(self.particle_cost)
else:
self._max_start_cost = np.max(self.costmap)
return self._max_start_cost
@property
def particle_cost(self) -> np.ndarray:
if self._particle_cost is None:
self._particle_cost = self.costmap.ravel()[
(
self.last_locations[:, 1]
+ self.last_locations[:, 0] * self.costmap.shape[1]
)
]
return self._particle_cost
@property
def episode_length_estimate(self) -> int:
if self._ep_len_estimate is None:
points = np.argwhere(self.maze == 0)
extremes = np.argmax(points, axis=0)
if np.sum(points[extremes[0]]) > np.sum(points[extremes[1]]):
extreme = points[extremes[0]]
else:
extreme = points[extremes[1]]
costmap = self._calculate_cost_map(self.maze, (extreme[1], extreme[0]))
self._ep_len_estimate = int(
0.75
* np.max(costmap)
* np.log(self.mean_cost * np.min(self.convex_corners))
)
return self._ep_len_estimate
@property
def mean_cost(self) -> int:
if self._mean_cost is None:
self._mean_cost = np.ma.masked_equal(self.costmap, 0).mean()
return self._mean_cost
@property
def total_start_cost(self) -> float:
if self._total_start_cost is None:
if self.relative:
self._total_start_cost = np.sum(self.particle_cost)
else:
self._total_start_cost = self.mean_cost * self.n_particles
return self._total_start_cost
@property
def total_cost(self) -> float:
if self._total_cost is None:
self._total_cost = np.sum(self.particle_cost)
return self._total_cost
@property
def max_cost(self) -> float:
if self._max_cost is None:
self._max_cost = np.max(self.particle_cost)
return self._max_cost
@property
def unique_particles(self) -> np.ndarray:
if self._unique_particles is None:
self._unique_particles = np.unique(self.last_locations, axis=0)
return self._unique_particles
@property
def is_relative(self):
return self.relative
@property
def is_done(self):
return self._done
@property
def step_reward(self):
return self._step_reward
class RewardGenerator(ABC):
"""
Base Class for reward generators for the maze environments
"""
def __init__(
self, information_provider: StepInformationProvider = None, scale: float = 1.0
):
self.calculator = None
self.generators = [] # type: List[RewardGenerator]
self.scale = scale
if information_provider:
self.set_information_provider(information_provider)
def set_information_provider(self, calculator: StepInformationProvider):
self.calculator = calculator
for generator in self.generators:
generator.set_information_provider(calculator)
def set_particle_count(self, n_particles):
self.calculator.set_particle_count(n_particles=n_particles)
def add_sub_generator(self, generator):
generator.set_information_provider(self.calculator)
self.generators.append(generator)
def reset(self, locations):
self.calculator.reset(locations)
self._reset(locations)
self._reset_generators(locations)
def _reset_generators(self, locations):
for generator in self.generators:
generator._reset(locations)
def _reset(self, locations):
pass
def step(self, action, locations) -> Tuple[bool, float]:
self.calculator.step(action, locations)
self.calculator.stepped_generator(*self._step(action, locations))
self._step_generators(action, locations)
if self.calculator.is_done:
end_reward = self._on_done()
self.calculator.stepped_generator(False, end_reward)
self._on_done_generators()
return self.calculator.is_done, self.calculator.step_reward
def _step(self, action, locations) -> Tuple[bool, float]:
return False, 0.0
def _on_done(self) -> float:
return 0.0
def _step_generators(self, action, locations) -> None:
for generator in self.generators:
self.calculator.stepped_generator(*generator._step(action, locations))
def _on_done_generators(self) -> None:
for generator in self.generators:
end_reward = generator._on_done()
self.calculator.stepped_generator(False, end_reward)
| [
"numpy.copy",
"numpy.ma.masked_equal",
"collections.deque",
"numpy.unique",
"numpy.ndindex",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"numpy.min"
] | [((1843, 1861), 'numpy.copy', 'np.copy', (['locations'], {}), '(locations)\n', (1850, 1861), True, 'import numpy as np\n'), ((2719, 2744), 'collections.deque', 'collections.deque', (['[goal]'], {}), '([goal])\n', (2736, 2744), False, 'import collections\n'), ((2801, 2832), 'numpy.zeros', 'np.zeros', (['maze.shape'], {'dtype': 'int'}), '(maze.shape, dtype=int)\n', (2809, 2832), True, 'import numpy as np\n'), ((2883, 2914), 'numpy.zeros', 'np.zeros', (['maze.shape'], {'dtype': 'int'}), '(maze.shape, dtype=int)\n', (2891, 2914), True, 'import numpy as np\n'), ((3784, 3811), 'numpy.ndindex', 'np.ndindex', (['self.maze.shape'], {}), '(self.maze.shape)\n', (3794, 3811), True, 'import numpy as np\n'), ((4380, 4407), 'numpy.argwhere', 'np.argwhere', (['(self.maze == 0)'], {}), '(self.maze == 0)\n', (4391, 4407), True, 'import numpy as np\n'), ((5863, 5890), 'numpy.argwhere', 'np.argwhere', (['(self.maze == 0)'], {}), '(self.maze == 0)\n', (5874, 5890), True, 'import numpy as np\n'), ((5914, 5939), 'numpy.argmax', 'np.argmax', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (5923, 5939), True, 'import numpy as np\n'), ((7059, 7085), 'numpy.sum', 'np.sum', (['self.particle_cost'], {}), '(self.particle_cost)\n', (7065, 7085), True, 'import numpy as np\n'), ((7230, 7256), 'numpy.max', 'np.max', (['self.particle_cost'], {}), '(self.particle_cost)\n', (7236, 7256), True, 'import numpy as np\n'), ((7428, 7466), 'numpy.unique', 'np.unique', (['self.last_locations'], {'axis': '(0)'}), '(self.last_locations, axis=0)\n', (7437, 7466), True, 'import numpy as np\n'), ((5241, 5267), 'numpy.max', 'np.max', (['self.particle_cost'], {}), '(self.particle_cost)\n', (5247, 5267), True, 'import numpy as np\n'), ((5325, 5345), 'numpy.max', 'np.max', (['self.costmap'], {}), '(self.costmap)\n', (5331, 5345), True, 'import numpy as np\n'), ((5955, 5982), 'numpy.sum', 'np.sum', (['points[extremes[0]]'], {}), '(points[extremes[0]])\n', (5961, 5982), True, 'import numpy as np\n'), ((5985, 6012), 'numpy.sum', 'np.sum', (['points[extremes[1]]'], {}), '(points[extremes[1]])\n', (5991, 6012), True, 'import numpy as np\n'), ((6783, 6809), 'numpy.sum', 'np.sum', (['self.particle_cost'], {}), '(self.particle_cost)\n', (6789, 6809), True, 'import numpy as np\n'), ((6539, 6574), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['self.costmap', '(0)'], {}), '(self.costmap, 0)\n', (6557, 6574), True, 'import numpy as np\n'), ((6288, 6303), 'numpy.max', 'np.max', (['costmap'], {}), '(costmap)\n', (6294, 6303), True, 'import numpy as np\n'), ((6346, 6373), 'numpy.min', 'np.min', (['self.convex_corners'], {}), '(self.convex_corners)\n', (6352, 6373), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
@author: <NAME>
@create date: 2020/10/26
@description: This parameter are ready to prepartion the VASP inputs files for different calculational situation.
'''
import os
import src.system_cmd
import src.get_path as gp
import numpy as np
def save_diele(imag, real):
imag_out = '\n'.join([''.join(['{0:10.5f}'.format(item) for item in line]) for line in imag])
real_out = '\n'.join([''.join(['{0:10.5f}'.format(item) for item in line]) for line in real])
with open('DPT.IMAG.dat', 'w') as obj:
obj.write(imag_out)
with open('DPT.REAL.dat', 'w') as obj:
obj.write(real_out)
src.system_cmd.systemEcho(' [DPT] - Files DPT.IMAG.dat (image part of dielectric function) and DPT.REAL.dat (image part of dielectric function) Be Saved!')
def get_absorption_coefficient(E, imag, real):
h = 4.1356676969e-15
omega = 2*np.pi*E/h
alpha = np.sqrt(2.0)*omega*np.sqrt(np.sqrt(imag**2+real**2)-real)
def process_absorption():
# find the file vasprun.xml
if not os.path.exists('vasprun.xml'):
src.system_cmd.systemError(' File vasprun.xml Not Found!')
# read the vasprun.xml
with open('vasprun.xml', 'r') as obj:
ct = obj.readlines()
imag = os.popen('''awk 'BEGIN{i=1} /imag/,\
/\/imag/ \
{a[i]=$2 ; b[i]=$3 ; c[i]=$4; d[i]=$5 ; e[i]=$6 ; f[i]=$7; g[i]=$8; i=i+1} \
END{for (j=12;j<i-3;j++) print a[j],b[j],c[j],d[j],e[j],f[j],g[j]}' vasprun.xml''').readlines()
real = os.popen('''awk 'BEGIN{i=1} /real/,\
/\/real/ \
{a[i]=$2 ; b[i]=$3 ; c[i]=$4; d[i]=$5 ; e[i]=$6 ; f[i]=$7; g[i]=$8; i=i+1} \
END{for (j=12;j<i-3;j++) print a[j],b[j],c[j],d[j],e[j],f[j],g[j]}' vasprun.xml''').readlines()
imag = np.array([[float(item) for item in line.split()] for line in imag])
real = np.array([[float(item) for item in line.split()] for line in real])
# save imag and real part of dielectric function
save_diele(imag, real)
# calculate the absorption coefficient
absorp = get_absorption_coefficient(imag, real)
| [
"os.path.exists",
"os.popen",
"numpy.sqrt"
] | [((1028, 1057), 'os.path.exists', 'os.path.exists', (['"""vasprun.xml"""'], {}), "('vasprun.xml')\n", (1042, 1057), False, 'import os\n'), ((900, 912), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (907, 912), True, 'import numpy as np\n'), ((1235, 1482), 'os.popen', 'os.popen', (['"""awk \'BEGIN{i=1} /imag/, /\\\\/imag/ {a[i]=$2 ; b[i]=$3 ; c[i]=$4; d[i]=$5 ; e[i]=$6 ; f[i]=$7; g[i]=$8; i=i+1} END{for (j=12;j<i-3;j++) print a[j],b[j],c[j],d[j],e[j],f[j],g[j]}\' vasprun.xml"""'], {}), '(\n "awk \'BEGIN{i=1} /imag/, /\\\\/imag/ {a[i]=$2 ; b[i]=$3 ; c[i]=$4; d[i]=$5 ; e[i]=$6 ; f[i]=$7; g[i]=$8; i=i+1} END{for (j=12;j<i-3;j++) print a[j],b[j],c[j],d[j],e[j],f[j],g[j]}\' vasprun.xml"\n )\n', (1243, 1482), False, 'import os\n'), ((1505, 1752), 'os.popen', 'os.popen', (['"""awk \'BEGIN{i=1} /real/, /\\\\/real/ {a[i]=$2 ; b[i]=$3 ; c[i]=$4; d[i]=$5 ; e[i]=$6 ; f[i]=$7; g[i]=$8; i=i+1} END{for (j=12;j<i-3;j++) print a[j],b[j],c[j],d[j],e[j],f[j],g[j]}\' vasprun.xml"""'], {}), '(\n "awk \'BEGIN{i=1} /real/, /\\\\/real/ {a[i]=$2 ; b[i]=$3 ; c[i]=$4; d[i]=$5 ; e[i]=$6 ; f[i]=$7; g[i]=$8; i=i+1} END{for (j=12;j<i-3;j++) print a[j],b[j],c[j],d[j],e[j],f[j],g[j]}\' vasprun.xml"\n )\n', (1513, 1752), False, 'import os\n'), ((927, 957), 'numpy.sqrt', 'np.sqrt', (['(imag ** 2 + real ** 2)'], {}), '(imag ** 2 + real ** 2)\n', (934, 957), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from imodels.rule_list.greedy_rule_list import GreedyRuleListClassifier
class TestGRL(unittest.TestCase):
def test_integration_stability(self):
'''Test on synthetic dataset
'''
X = np.array(
[[0, 0, 1, 1, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 0, 1, 1, 1]])
y = np.array([0, 0, 0, 0, 1, 1, 1, 1])
m = GreedyRuleListClassifier()
m.fit(X, y)
yhat = m.predict(X)
acc = np.mean(y == yhat) * 100
assert acc > 99, 'acc must be 100' | [
"numpy.array",
"numpy.mean",
"imodels.rule_list.greedy_rule_list.GreedyRuleListClassifier"
] | [((247, 398), 'numpy.array', 'np.array', (['[[0, 0, 1, 1, 0], [1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [1, 0, 0, 0, 0], [1, 1,\n 0, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1], [1, 0, 1, 1, 1]]'], {}), '([[0, 0, 1, 1, 0], [1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [1, 0, 0, 0, 0\n ], [1, 1, 0, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1], [1, 0, 1, 1, 1]])\n', (255, 398), True, 'import numpy as np\n'), ((510, 544), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1])\n', (518, 544), True, 'import numpy as np\n'), ((557, 583), 'imodels.rule_list.greedy_rule_list.GreedyRuleListClassifier', 'GreedyRuleListClassifier', ([], {}), '()\n', (581, 583), False, 'from imodels.rule_list.greedy_rule_list import GreedyRuleListClassifier\n'), ((646, 664), 'numpy.mean', 'np.mean', (['(y == yhat)'], {}), '(y == yhat)\n', (653, 664), True, 'import numpy as np\n')] |
from astropy.io import fits
import numpy as np
def readfrom(filename, lower_case=False, upper_case=False, **kwargs):
# Open the file and get HDU list
hdulist = fits.open(filename, **kwargs)
# Column-oriented FITS tables have empty primary HDU
# The actual data is located inside the first HDU
if len(hdulist) == 1:
raise RuntimeError('this is not a column-oriented FITS table')
hdr = hdulist[1].header
data = hdulist[1].data[0]
colnames = hdulist[1].data.dtype.names
# Create columns one by one
tbl = dict()
for n in colnames:
# Store that into the dictionary
if lower_case:
cname = n.lower()
elif upper_case:
cname = n.upper()
else:
cname = n
tbl[cname] = data[n]
return tbl
def _get_format_code(v):
# Convert numpy type code into FITS
if isinstance(v, str):
dims = ()
tdtype = 'S'+str(len(v))
else :
if isinstance(v, np.ndarray):
dims = v.shape
else:
dims = ()
tdtype = v.dtype.str.replace('>','').replace('<','').replace('|','')
if tdtype[0] == 'S':
# Strings are handled in a peculiar way in FITS
# They are stored as multi-dimensional array of bytes
# and the last dimension is the maximum length of all
# strings in the array
strlen = int(tdtype[1:])
tdtype = 'a'
dims = dims + (strlen,)
if len(dims) == 0:
repeat = ''
dims = (1,)
else:
repeat = str(np.prod(dims))
tform = repeat+fits.column.NUMPY2FITS[tdtype]
tdim = '('+','.join(str(d) for d in reversed(dims))+')'
return tform, tdim
def writeto(filename, data, lower_case=False, upper_case=False, **kwargs):
# Build the ColDef array
cols = fits.ColDefs([])
for colname, value in data.iteritems():
# Figure out which FITS format to use to store this column
tform, tdim = _get_format_code(value)
if lower_case:
cname = colname.lower()
elif upper_case:
cname = colname.upper()
else:
cname = colname
# Add new column to the list
cols.add_col(fits.Column(
name=cname,
format=tform,
dim=tdim))
# Create the FITS table in memory
hdu = fits.BinTableHDU.from_columns(cols, nrows=1, fill=True)
for colname, value in data.iteritems():
if lower_case:
cname = colname.lower()
elif upper_case:
cname = colname.upper()
else:
cname = colname
hdu.data[cname] = value
# Write it on the disk
hdu.writeto(filename, **kwargs)
| [
"astropy.io.fits.ColDefs",
"numpy.prod",
"astropy.io.fits.Column",
"astropy.io.fits.open",
"astropy.io.fits.BinTableHDU.from_columns"
] | [((169, 198), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename, **kwargs)\n', (178, 198), False, 'from astropy.io import fits\n'), ((1832, 1848), 'astropy.io.fits.ColDefs', 'fits.ColDefs', (['[]'], {}), '([])\n', (1844, 1848), False, 'from astropy.io import fits\n'), ((2363, 2418), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['cols'], {'nrows': '(1)', 'fill': '(True)'}), '(cols, nrows=1, fill=True)\n', (2392, 2418), False, 'from astropy.io import fits\n'), ((1566, 1579), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (1573, 1579), True, 'import numpy as np\n'), ((2228, 2275), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': 'cname', 'format': 'tform', 'dim': 'tdim'}), '(name=cname, format=tform, dim=tdim)\n', (2239, 2275), False, 'from astropy.io import fits\n')] |
import matplotlib.pyplot as plt
import numpy as np
x, y, se = np.loadtxt('data/seir_1000_100.0_FR_.data', delimiter=';', unpack=True)
plt.plot(x,y, label='Loaded from file!')
plt.fill_between(x, y-se, y+se)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Fraction of Infected Agents')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((63, 134), 'numpy.loadtxt', 'np.loadtxt', (['"""data/seir_1000_100.0_FR_.data"""'], {'delimiter': '""";"""', 'unpack': '(True)'}), "('data/seir_1000_100.0_FR_.data', delimiter=';', unpack=True)\n", (73, 134), True, 'import numpy as np\n'), ((135, 176), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""Loaded from file!"""'}), "(x, y, label='Loaded from file!')\n", (143, 176), True, 'import matplotlib.pyplot as plt\n'), ((177, 212), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y - se)', '(y + se)'], {}), '(x, y - se, y + se)\n', (193, 212), True, 'import matplotlib.pyplot as plt\n'), ((209, 224), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (219, 224), True, 'import matplotlib.pyplot as plt\n'), ((225, 240), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (235, 240), True, 'import matplotlib.pyplot as plt\n'), ((241, 281), 'matplotlib.pyplot.title', 'plt.title', (['"""Fraction of Infected Agents"""'], {}), "('Fraction of Infected Agents')\n", (250, 281), True, 'import matplotlib.pyplot as plt\n'), ((282, 294), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (292, 294), True, 'import matplotlib.pyplot as plt\n'), ((295, 305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (303, 305), True, 'import matplotlib.pyplot as plt\n')] |
# ./examples/load_data.py
# Required libraries:
# pip install sdfpy
# pip install thingking
from sdfpy import load_sdf
from thingking import loadtxt
import numpy as np
import math
import matplotlib.cm as cm
import matplotlib.pylab as pl
prefix = "http://darksky.slac.stanford.edu/scivis2015/data/ds14_scivis_0128/"
# Load N-body particles from a = 1.0 dataset. Particles have positions with
# units of proper kpc, and velocities with units of km/s.
particles = load_sdf(prefix+"ds14_scivis_0128_e4_dt04_1.0000")
# Load the a=1 Rockstar hlist file. The header of the file lists the useful
# units/information.
scale, id, desc_scale, desc_id, num_prog, pid, upid, desc_pid, phantom, \
sam_mvir, mvir, rvir, rs, vrms, mmp, scale_of_last_MM, vmax, x, y, z, \
vx, vy, vz, Jx, Jy, Jz, Spin, Breadth_first_ID, Depth_first_ID, \
Tree_root_ID, Orig_halo_ID, Snap_num, Next_coprogenitor_depthfirst_ID, \
Last_progenitor_depthfirst_ID, Rs_Klypin, M_all, M200b, M200c, M500c, \
M2500c, Xoff, Voff, Spin_Bullock, b_to_a, c_to_a, A_x, A_y, A_z, \
b_to_a_500c, c_to_a_500c, A_x_500c, A_y_500c, A_z_500c, T_over_U, \
M_pe_Behroozi, M_pe_Diemer, Macc, Mpeak, Vacc, Vpeak, Halfmass_Scale, \
Acc_Rate_Inst, Acc_Rate_100Myr, Acc_Rate_Tdyn = \
loadtxt(prefix+"rockstar/hlists/hlist_1.00000.list", unpack=True)
# Now we want to convert the proper kpc of the particle position to comoving
# Mpc/h, a common unit used in computational cosmology in general, but
# specifically is used as the output unit in the merger tree halo list loaded
# in above. First we get the Hubble parameter, here stored as 'h_100' in the
# SDF parameters. Then we load the simulation width, L0, which is also in
# proper kpc. Finally we load the scale factor, a, which for this particular
# snapshot is equal to 1 since we are loading the final snapshot from the
# simulation.
h_100 = particles.parameters['h_100']
width = particles.parameters['L0']
cosmo_a = particles.parameters['a']
kpc_to_Mpc = 1./1000
sl = slice(0,None)
# Define a simple function to convert proper to comoving Mpc/h.
convert_to_cMpc = lambda proper: (proper + width/2.) * h_100 * kpc_to_Mpc / cosmo_a
halox = np.array( x )
fn = "halox.npy"
np.save( fn, halox )
haloy = np.array( y )
fn = "haloy.npy"
np.save( fn, haloy )
haloz = np.array( z )
fn = "haloz.npy"
np.save( fn, haloz )
halorvir = np.array( rvir )
fn = "halorvir.npy"
np.save( fn, halorvir )
partx = np.array( convert_to_cMpc(particles['x'][sl]) )
fn = "partx.npy"
np.save( fn, partx )
party = np.array( convert_to_cMpc(particles['y'][sl]) )
fn = "party.npy"
np.save( fn, party )
partz = np.array( convert_to_cMpc(particles['z'][sl]) )
fn = "partz.npy"
np.save( fn, partz )
partvx = np.array( convert_to_cMpc(particles['vx'][sl]) )
fn = "partvx.npy"
np.save( fn, partvx )
partvy = np.array( convert_to_cMpc(particles['vy'][sl]) )
fn = "partvy.npy"
np.save( fn, partvy )
partvz = np.array( convert_to_cMpc(particles['vz'][sl]) )
fn = "partvz.npy"
np.save( fn, partvz )
#
# # Plot all the particles, adding a bit of alpha so that we see the density of
# # points.
# import matplotlib.pylab as pl
# pl.figure(figsize=[10,10])
#
# pl.scatter(convert_to_cMpc(particles['x'][sl]),
# convert_to_cMpc(particles['y'][sl]), color='b', s=1.0, alpha=0.05)
#
# # Plot all the halos in red.
# pl.scatter(x, y, color='r', alpha=0.1)
#
# # Add some labels
# pl.xlabel('x [cMpc/h]')
# pl.ylabel('y [cMpc/h]')
# pl.savefig("halos_and_particles.png", bbox_inches='tight')
#
# # Could now consider coloring halos by any of the various quantities above.
# # Perhaps mvir would be nice to show the virial Mass of the halo, or we could
# # scale the points by the virial radius, rvir. | [
"numpy.array",
"sdfpy.load_sdf",
"thingking.loadtxt",
"numpy.save"
] | [((463, 515), 'sdfpy.load_sdf', 'load_sdf', (["(prefix + 'ds14_scivis_0128_e4_dt04_1.0000')"], {}), "(prefix + 'ds14_scivis_0128_e4_dt04_1.0000')\n", (471, 515), False, 'from sdfpy import load_sdf\n'), ((1262, 1329), 'thingking.loadtxt', 'loadtxt', (["(prefix + 'rockstar/hlists/hlist_1.00000.list')"], {'unpack': '(True)'}), "(prefix + 'rockstar/hlists/hlist_1.00000.list', unpack=True)\n", (1269, 1329), False, 'from thingking import loadtxt\n'), ((2180, 2191), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2188, 2191), True, 'import numpy as np\n'), ((2211, 2229), 'numpy.save', 'np.save', (['fn', 'halox'], {}), '(fn, halox)\n', (2218, 2229), True, 'import numpy as np\n'), ((2240, 2251), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2248, 2251), True, 'import numpy as np\n'), ((2271, 2289), 'numpy.save', 'np.save', (['fn', 'haloy'], {}), '(fn, haloy)\n', (2278, 2289), True, 'import numpy as np\n'), ((2300, 2311), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (2308, 2311), True, 'import numpy as np\n'), ((2331, 2349), 'numpy.save', 'np.save', (['fn', 'haloz'], {}), '(fn, haloz)\n', (2338, 2349), True, 'import numpy as np\n'), ((2363, 2377), 'numpy.array', 'np.array', (['rvir'], {}), '(rvir)\n', (2371, 2377), True, 'import numpy as np\n'), ((2400, 2421), 'numpy.save', 'np.save', (['fn', 'halorvir'], {}), '(fn, halorvir)\n', (2407, 2421), True, 'import numpy as np\n'), ((2499, 2517), 'numpy.save', 'np.save', (['fn', 'partx'], {}), '(fn, partx)\n', (2506, 2517), True, 'import numpy as np\n'), ((2593, 2611), 'numpy.save', 'np.save', (['fn', 'party'], {}), '(fn, party)\n', (2600, 2611), True, 'import numpy as np\n'), ((2687, 2705), 'numpy.save', 'np.save', (['fn', 'partz'], {}), '(fn, partz)\n', (2694, 2705), True, 'import numpy as np\n'), ((2784, 2803), 'numpy.save', 'np.save', (['fn', 'partvx'], {}), '(fn, partvx)\n', (2791, 2803), True, 'import numpy as np\n'), ((2882, 2901), 'numpy.save', 'np.save', (['fn', 'partvy'], {}), '(fn, partvy)\n', (2889, 2901), True, 'import numpy as np\n'), ((2980, 2999), 'numpy.save', 'np.save', (['fn', 'partvz'], {}), '(fn, partvz)\n', (2987, 2999), True, 'import numpy as np\n')] |
import numpy as np
import scipy.linalg as spla
from embedding import convert_to_graph
def get_degree_matrix(A):
return np.diag(np.sum(A,axis=1),0)
def get_laplacian(A):
return get_degree_matrix(A) - A
def _spectral_clustering_by_connected_components(L):
n,_ = L.shape[0]
u,v = spla.eigh(L)
number_of_components = 1 + np.where(np.abs(u)<1e-14)[0][-1]
clusters = np.zeros(n)
for x in range(number_of_components):
mask = np.abs(v[:,x]) > 1e-14
clusters[mask] = x
shift = number_of_components // 2
return clusters - shift
# Spectral clustering using the Fiedler vector (second-smallest eigenvector)
# Relaxing the integer conditions in minimizing the number of edges between partitions
# leads to the solution of the second-smallest eigenvector. (The eigenvector for the smallest
# eigenvalue assigns all nodes to the same partition, assuming the graph is connected).
#
# See www.blog.shriphani.com/2015/04/06/the-smallest-eigenvalues-of-a-graph-laplacian
#
def _spectral_clustering_by_fiedler_vector(L,kernel='sgn',explore=False):
u,v = spla.eigh(L,eigvals=(0,1))
assert u[1] > 1e-14, "Multiplicity of 0 eigenvalues is > 1. Multiple connected components exist."
clusters = v[:,1]
if explore:
return clusters
if kernel == 'sgn':
return np.sign(clusters)
elif kernel == 'mean':
return(v[:,1] > np.mean(v[:,1])).astype(int)*2 - 1
elif kernel == 'median':
return(v[:,1] > np.median(v[:,1])).astype(int)*2 - 1
def spectral_clustering(X,method='fiedler',affinity_measure='euclidean',epsilon=1,truncate=False,threshold=0.1,kernel='sgn',explore=False):
A = convert_to_graph(X,affinity_measure=affinity_measure,epsilon=1,truncate=truncate,threshold=threshold)
L = get_laplacian(A)
if method == 'fiedler':
clusters = _spectral_clustering_by_fiedler_vector(L,kernel=kernel,explore=explore)
elif method == 'components':
clusters = _spectral_clustering_by_connected_components(L)
return clusters
# shows histogram of affinity measurements to help tune epsilon and to help set an appropriate threshold if truncating
def explore_graph_formation(X,affinity_measure='euclidean',epsilon=1):
As = convert_to_graph(X,affinity_measure=affinity_measure,epsilon=epsilon,explore=True)
fig = plt.figure(figsize=(12,8))
plt.hist(As,bins=30,color='b')
plt.xlabel('affinity measurement')
plt.ylabel('frequency')
plt.title('Distribution of affinity measurements with epsilon=%.3f'%epsilon)
plt.show()
# returns histogram of pre-bucketed clusters to aid in deciding which kernel to use
# if distribution is not centered about 0, mean may be best
# if distribution is skewed, median may be best
def explore_spectral_clustering(X,affinity_measure='euclidean',epsilon=1,truncate=False,threshold=0.1):
vec = spectral_clustering(X,method='fiedler',affinity_measure=affinity_measure,epsilon=epsilon,truncate=truncate,threshold=threshold,explore=True)
fig = plt.figure(figsize=(12,8))
plt.hist(vec,bins=30,color='b')
plt.xlabel('raw cluster assignment')
plt.ylabel('frequency')
plt.title('Distribution of pre-discretized cluster assignments with epsilon=%.3f and threshold=%.3f'%(epsilon,threshold))
plt.show()
| [
"scipy.linalg.eigh",
"numpy.abs",
"numpy.mean",
"numpy.median",
"numpy.sum",
"numpy.zeros",
"numpy.sign",
"embedding.convert_to_graph"
] | [((296, 308), 'scipy.linalg.eigh', 'spla.eigh', (['L'], {}), '(L)\n', (305, 308), True, 'import scipy.linalg as spla\n'), ((388, 399), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (396, 399), True, 'import numpy as np\n'), ((1099, 1127), 'scipy.linalg.eigh', 'spla.eigh', (['L'], {'eigvals': '(0, 1)'}), '(L, eigvals=(0, 1))\n', (1108, 1127), True, 'import scipy.linalg as spla\n'), ((1678, 1788), 'embedding.convert_to_graph', 'convert_to_graph', (['X'], {'affinity_measure': 'affinity_measure', 'epsilon': '(1)', 'truncate': 'truncate', 'threshold': 'threshold'}), '(X, affinity_measure=affinity_measure, epsilon=1, truncate=\n truncate, threshold=threshold)\n', (1694, 1788), False, 'from embedding import convert_to_graph\n'), ((2245, 2334), 'embedding.convert_to_graph', 'convert_to_graph', (['X'], {'affinity_measure': 'affinity_measure', 'epsilon': 'epsilon', 'explore': '(True)'}), '(X, affinity_measure=affinity_measure, epsilon=epsilon,\n explore=True)\n', (2261, 2334), False, 'from embedding import convert_to_graph\n'), ((132, 149), 'numpy.sum', 'np.sum', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (138, 149), True, 'import numpy as np\n'), ((1335, 1352), 'numpy.sign', 'np.sign', (['clusters'], {}), '(clusters)\n', (1342, 1352), True, 'import numpy as np\n'), ((457, 472), 'numpy.abs', 'np.abs', (['v[:, x]'], {}), '(v[:, x])\n', (463, 472), True, 'import numpy as np\n'), ((349, 358), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (355, 358), True, 'import numpy as np\n'), ((1404, 1420), 'numpy.mean', 'np.mean', (['v[:, 1]'], {}), '(v[:, 1])\n', (1411, 1420), True, 'import numpy as np\n'), ((1492, 1510), 'numpy.median', 'np.median', (['v[:, 1]'], {}), '(v[:, 1])\n', (1501, 1510), True, 'import numpy as np\n')] |
# Copyright (c) 2015-2016 <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from sklearn.linear_model import SGDClassifier, SGDRegressor
from storlets.agent.daemon import files
from mlstorlets.utils.serialize_model import\
classifier_from_string, regressor_from_string,\
classifier_to_string, regressor_to_string
def data_file_create(path, X, Y):
X_Y = np.column_stack((X, Y[np.newaxis].T))
np.savetxt(path, X_Y)
def data_storlet_file_open(path):
fd = os.open(path, os.O_RDONLY)
sif = files.StorletInputFile(dict(), fd)
return sif
def data_file_read(path, num_features, num_labels):
sif = data_storlet_file_open(path)
loadedX_Y = np.loadtxt(sif)
sif.close()
num_colums = num_features + num_labels
num_samples = loadedX_Y.size / num_colums
loadedX_Y = np.reshape(loadedX_Y, (num_samples, num_colums))
X, Y, junk = np.hsplit(loadedX_Y, np.array((num_features, num_colums)))
return X, Y.ravel()
def data_file_destroy(path):
os.unlink(path)
def estimator_from_string(est_type, sest):
if est_type == 'SGDRegressor':
return regressor_from_string(sest)
if est_type == 'SGDClassifier':
return classifier_from_string(sest)
def estimator_to_string(est_type, est):
if est_type == 'SGDRegressor':
return regressor_to_string(est)
if est_type == 'SGDClassifier':
return classifier_to_string(est)
def get_estimator(est_type):
if est_type == 'SGDRegressor':
return SGDRegressor(shuffle=False)
if est_type == 'SGDClassifier':
return SGDClassifier(shuffle=False)
| [
"mlstorlets.utils.serialize_model.regressor_from_string",
"sklearn.linear_model.SGDClassifier",
"numpy.reshape",
"mlstorlets.utils.serialize_model.classifier_to_string",
"sklearn.linear_model.SGDRegressor",
"os.open",
"mlstorlets.utils.serialize_model.regressor_to_string",
"numpy.column_stack",
"num... | [((903, 940), 'numpy.column_stack', 'np.column_stack', (['(X, Y[np.newaxis].T)'], {}), '((X, Y[np.newaxis].T))\n', (918, 940), True, 'import numpy as np\n'), ((945, 966), 'numpy.savetxt', 'np.savetxt', (['path', 'X_Y'], {}), '(path, X_Y)\n', (955, 966), True, 'import numpy as np\n'), ((1012, 1038), 'os.open', 'os.open', (['path', 'os.O_RDONLY'], {}), '(path, os.O_RDONLY)\n', (1019, 1038), False, 'import os\n'), ((1208, 1223), 'numpy.loadtxt', 'np.loadtxt', (['sif'], {}), '(sif)\n', (1218, 1223), True, 'import numpy as np\n'), ((1345, 1393), 'numpy.reshape', 'np.reshape', (['loadedX_Y', '(num_samples, num_colums)'], {}), '(loadedX_Y, (num_samples, num_colums))\n', (1355, 1393), True, 'import numpy as np\n'), ((1529, 1544), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (1538, 1544), False, 'import os\n'), ((1432, 1468), 'numpy.array', 'np.array', (['(num_features, num_colums)'], {}), '((num_features, num_colums))\n', (1440, 1468), True, 'import numpy as np\n'), ((1640, 1667), 'mlstorlets.utils.serialize_model.regressor_from_string', 'regressor_from_string', (['sest'], {}), '(sest)\n', (1661, 1667), False, 'from mlstorlets.utils.serialize_model import classifier_from_string, regressor_from_string, classifier_to_string, regressor_to_string\n'), ((1719, 1747), 'mlstorlets.utils.serialize_model.classifier_from_string', 'classifier_from_string', (['sest'], {}), '(sest)\n', (1741, 1747), False, 'from mlstorlets.utils.serialize_model import classifier_from_string, regressor_from_string, classifier_to_string, regressor_to_string\n'), ((1840, 1864), 'mlstorlets.utils.serialize_model.regressor_to_string', 'regressor_to_string', (['est'], {}), '(est)\n', (1859, 1864), False, 'from mlstorlets.utils.serialize_model import classifier_from_string, regressor_from_string, classifier_to_string, regressor_to_string\n'), ((1916, 1941), 'mlstorlets.utils.serialize_model.classifier_to_string', 'classifier_to_string', (['est'], {}), '(est)\n', (1936, 1941), False, 'from mlstorlets.utils.serialize_model import classifier_from_string, regressor_from_string, classifier_to_string, regressor_to_string\n'), ((2023, 2050), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'shuffle': '(False)'}), '(shuffle=False)\n', (2035, 2050), False, 'from sklearn.linear_model import SGDClassifier, SGDRegressor\n'), ((2102, 2130), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'shuffle': '(False)'}), '(shuffle=False)\n', (2115, 2130), False, 'from sklearn.linear_model import SGDClassifier, SGDRegressor\n')] |
#!/usr/bin/env python
"""
Implementation of the template for XIMEA XiB cameras
"""
__author__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__maintainer__ = "not assigned"
__email__ = ""
__status__ = "Development"
import abc
import logging
import numpy as np
from pyXimea import xiapi, xidefs
from pyCameras.cameraTemplate import CameraTemplate
# class CameraControllerTemplate(object):
# """
# Template class for spectrometer controllers to inherit from if they are
# necessary to use the camera. The controller for camera devices should
# be opened and closed from the Camera.openController and
# Camera.closeController static methods. If the controller is only used to
# detect available camera devices, that logic could also be easily
# implemented in Camera.listDevices().
# """
#
# def __init__(self):
# self.logger = logging.getLogger(__name__)
# self.device_handles = []
#
# def listDevices(self):
# """
# Returns a list of available devices. One of these entries should be
# used as parameter for self.getDevice to open the corresponding device
#
# Returns
# -------
# device_handles : list
# List of available capture devices
# """
# self.updateDeviceHandles()
# return self.device_handles
#
# def updateDeviceHandles(self):
# """
# Update the list of available device handles
# """
# raise NotImplementedError
#
# def getDevice(self, device_handle):
# raise NotImplementedError
#
# def closeController(self):
# raise NotImplementedError
#
# def __del__(self):
# self.logger.debug('Deleting cameracontroller {self}'
# ''.format(self=self))
# self.closeController()
#
# def __repr__(self):
# return "<CameraController Template: OVERLOAD THIS FUNCTION>"
class CameraXimeaXIB(CameraTemplate):
"""
Class to access XIMEA XiB cameras.
The device_handle is the serial number STRING
"""
def __init__(self, device_handle):
"""
Template class for camera objects. This is only meant to define the
interface. You probably want a real camera implementation that is
inherited from this class.
Parameters
----------
device_handle : object
Some handle that identifies the camera and is used to open the
device connection
"""
super(CameraXimeaXIB, self).__init__(device_handle)
self.logger = logging.getLogger(__name__)
self.device_handle = device_handle # Use this to identify and open the
# device
self.device = xiapi.Camera() # Use this variable to store the device itself
self.triggerMode = "off"
@staticmethod
def listDevices():
"""
List all available camera devices correspponding to the class type
"""
cam = xiapi.Camera()
numCameras = cam.get_number_devices()
result = []
for i in range(numCameras):
#print i
cam = xiapi.Camera(i)
cam.open_device()
result.append(cam.get_device_sn().decode("utf-8"))
cam.close_device()
return result
def openDevice(self):
"""
Open the device by using self.device_handle and store the device in
self.device
"""
self.device.open_device_by_SN(self.device_handle)
def closeDevice(self):
"""
Close the connection to the device and reset self.device to None.
"""
self.device.close_device()
def isOpen(self):
"""
Check if the device for this instance is currently open and ready to
communicate
Returns
-------
bool
True if the camera connection is open, False if it is not
"""
return self.device.CAM_OPEN
def getImage(self, *args, **kwargs):
"""
Return a numpy array containing an image
*args and **kwargs are optional parameters that may be ignored!
"""
try:
self.device.start_acquisition()
img = xiapi.Image()
self.device.get_image(img)
nImg = self.getNumpyData(img)
except:
raise
finally:
self.device.stop_acquisition()
return nImg
def getNumpyData(self, img):
bpp = img.get_bytes_per_pixel()
if bpp in [1, 3, 4]:
npType = np.uint8
elif bpp in [2, 6, 8]:
npType = np.uint16
raw = img.get_image_data_raw()
#print(len(raw), img.width, img.height, img.get_bytes_per_pixel(), npType)
nImg = np.squeeze(np.fromstring(raw, dtype = npType).reshape((img.height, img.width, -1)))
return nImg
def getImages(self, num=None):
"""
Return a iterable of numpy arrays corresponding to images that were
recorded previously. If there are no buffered images the iterable may
be empty.
Parameters
----------
num : int
number of images to return. If None return all images currently in
buffer
"""
result = [xiapi.Image() for i in range(num)]
resultNp = []
self.device.start_acquisition()
t = time()
try:
for img in result:
self.device.get_image(img)
resultNp.append(self.getNumpyData(img))
finally:
self.device.stop_acquisition()
def grabStart(self):
"""
Start grabbing images
"""
self.device.start_acquisition()
def grabStop(self):
"""
Stop grabbing images
"""
self.device.stop_acquisition()
def listFeatures(self):
"""
Helper function to return the properties dict
"""
return xidefs.ASSOC_ENUM.keys()
def registerSharedFeatures(self):
"""
Registration of shared features that should be the same for all camera
implementations. E.g. ExposureMicrons, Resolution, Gain, Format and
TriggerMode
"""
self.logger.debug('Registering shared camera features')
self.registerFeature('ExposureMicrons', self.setExposureMicrons)
self.registerFeature('ExposureTime', self.setExposureMicrons)
self.registerFeature('Exposure', self.setExposureMicrons)
self.registerFeature('Resolution', self.setResolution)
self.registerFeature('Gain', self.setGain)
self.registerFeature('Format', self.setPixelFormat)
self.registerFeature('TriggerMode', self.setTriggerMode)
self.registerFeature('Trigger', self.setTriggerMode)
def setExposureMicrons(self, microns=None):
"""
Set the exposure time to the given value in microseconds or read the
current value by passing None
Parameters
----------
microns : int
Desired exposure time in microseconds that should be set, or None
to read the current exposure time
Returns
-------
microns : int
The exposure time in microseconds after applying the passed value
"""
self.device.set_exposure(microns)
def setResolution(self, resolution=None):
"""
Set the resolution of the camera to the given values in pixels or read
the current resolution by passing None
Parameters
----------
resolution : tuple
Desired camera resolution in the form (width, height), or None to
read the current resolution
Returns
-------
resolution : tuple
The set camera resolution after applying the passed value
"""
raise NotImplementedError
def setGain(self, gain=None):
"""
Set the gain of the camera to the given value or read the current value
by passing None
Parameters
----------
gain : int
Desired gain value to be set, or None to read the current gain
value
Returns
-------
gain : int
The gain value after applying the passed value
"""
self.device.set_gain_selector("XI_GAIN_SELECTOR_ANALOG_ALL");
if gain is None:
return self.device.get_gain()
self.device.set_gain(gain)
def setPixelFormat(self, format=None):
"""
Set the image format to the passed setting or read the current format
by passing None
Parameters
----------
format : str
String describing the desired image format (e.g. "mono8"), or None
to read the current image format
Returns
-------
format : str
The image format after applying the passed value
"""
raise NotImplementedError
def setTriggerMode(self, mode=None):
"""
Set the trigger mode of the camera to either "in", "out" or "off", or
read the current trigger setting ba passing None
Parameters
----------
mode : str
The desired trigger mode. "in" means the camera receives a trigger
signal, "out" means the camera sends a trigger signal, "off"" means
the camera does not react to triggers. To read the current trigger
setting pass None
Returns
-------
mode : str
The trigger mode after applying the passed value
"""
if mode is None:
return self.triggerMode
elif mode == "off":
self.device.set_gpo_selector("XI_GPO_PORT1")
self.device.set_gpo_mode("XI_GPO_OFF")
self.device.set_gpi_selector("XI_GPI_PORT1")
self.device.set_gpi_mode("XI_GPI_OFF")
self.device.set_trigger_source("XI_TRG_OFF")
elif mode == "in":
self.device.set_gpo_selector("XI_GPO_PORT1")
self.device.set_gpo_mode("XI_GPO_OFF")
self.device.set_gpi_selector("XI_GPI_PORT1")
self.device.set_gpi_mode("XI_GPI_TRIGGER")
self.device.set_trigger_source("XI_TRG_EDGE_RISING")
elif mode == "out":
self.device.set_gpo_selector("XI_GPO_PORT1")
self.device.set_gpo_mode("XI_GPO_EXPOSURE_ACTIVE")
self.device.set_gpi_selector("XI_GPI_PORT1")
self.device.set_gpi_mode("XI_GPI_OFF")
self.device.set_trigger_source("XI_TRG_OFF")
self.triggerMode = mode
def __del__(self):
if self.device is not None:
self.closeDevice()
if __name__ == "__main__":
from time import time
t = time()
print ("start", time()-t)
devices = CameraXimeaXIB.listDevices()
print ("listed", time()-t)
print ("Devices: ", devices)
cam = CameraXimeaXIB(devices[0])
print ("constructed", time()-t)
cam.openDevice()
print (cam.setTriggerMode())
cam.setTriggerMode("in")
cam.setTriggerMode("out")
cam.setTriggerMode("off")
print ("opened", time()-t)
print( "Gain: ", cam.setGain())
cam.setGain(-9)
print( "Gain: ", cam.setGain())
cam.setExposureMicrons(10000)
print ("set", time()-t)
from matplotlib import pyplot as plt
img = cam.getImage()
print ("got image", time()-t)
print ("Image 1", img, img.shape, img.dtype)
plt.imshow(img, cmap="gray")
plt.show()
cam.setGain(12)
img = cam.getImage()
print ("Image 2", img, img.shape, img.dtype)
plt.imshow(img, cmap="gray")
plt.show()
def testMultipleImages(num):
print ("getting %d images ..."%(num))
from time import time
t = time()
cam.getImages(num)
res = time()-t
print ("done in %f seconds with %f fps"%(res, float(num)/res))
return res
testMultipleImages(10)
testMultipleImages(20)
testMultipleImages(200)
#testMultipleImages(90)
| [
"matplotlib.pyplot.imshow",
"logging.getLogger",
"pyXimea.xiapi.Image",
"pyXimea.xidefs.ASSOC_ENUM.keys",
"pyXimea.xiapi.Camera",
"numpy.fromstring",
"time.time",
"matplotlib.pyplot.show"
] | [((10883, 10889), 'time.time', 'time', ([], {}), '()\n', (10887, 10889), False, 'from time import time\n'), ((11592, 11620), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (11602, 11620), True, 'from matplotlib import pyplot as plt\n'), ((11625, 11635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11633, 11635), True, 'from matplotlib import pyplot as plt\n'), ((11734, 11762), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (11744, 11762), True, 'from matplotlib import pyplot as plt\n'), ((11767, 11777), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11775, 11777), True, 'from matplotlib import pyplot as plt\n'), ((2582, 2609), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2599, 2609), False, 'import logging\n'), ((2765, 2779), 'pyXimea.xiapi.Camera', 'xiapi.Camera', ([], {}), '()\n', (2777, 2779), False, 'from pyXimea import xiapi, xidefs\n'), ((3016, 3030), 'pyXimea.xiapi.Camera', 'xiapi.Camera', ([], {}), '()\n', (3028, 3030), False, 'from pyXimea import xiapi, xidefs\n'), ((5451, 5457), 'time.time', 'time', ([], {}), '()\n', (5455, 5457), False, 'from time import time\n'), ((6021, 6045), 'pyXimea.xidefs.ASSOC_ENUM.keys', 'xidefs.ASSOC_ENUM.keys', ([], {}), '()\n', (6043, 6045), False, 'from pyXimea import xiapi, xidefs\n'), ((11904, 11910), 'time.time', 'time', ([], {}), '()\n', (11908, 11910), False, 'from time import time\n'), ((3172, 3187), 'pyXimea.xiapi.Camera', 'xiapi.Camera', (['i'], {}), '(i)\n', (3184, 3187), False, 'from pyXimea import xiapi, xidefs\n'), ((4272, 4285), 'pyXimea.xiapi.Image', 'xiapi.Image', ([], {}), '()\n', (4283, 4285), False, 'from pyXimea import xiapi, xidefs\n'), ((5342, 5355), 'pyXimea.xiapi.Image', 'xiapi.Image', ([], {}), '()\n', (5353, 5355), False, 'from pyXimea import xiapi, xidefs\n'), ((10910, 10916), 'time.time', 'time', ([], {}), '()\n', (10914, 10916), False, 'from time import time\n'), ((10990, 10996), 'time.time', 'time', ([], {}), '()\n', (10994, 10996), False, 'from time import time\n'), ((11096, 11102), 'time.time', 'time', ([], {}), '()\n', (11100, 11102), False, 'from time import time\n'), ((11270, 11276), 'time.time', 'time', ([], {}), '()\n', (11274, 11276), False, 'from time import time\n'), ((11424, 11430), 'time.time', 'time', ([], {}), '()\n', (11428, 11430), False, 'from time import time\n'), ((11524, 11530), 'time.time', 'time', ([], {}), '()\n', (11528, 11530), False, 'from time import time\n'), ((11952, 11958), 'time.time', 'time', ([], {}), '()\n', (11956, 11958), False, 'from time import time\n'), ((4841, 4873), 'numpy.fromstring', 'np.fromstring', (['raw'], {'dtype': 'npType'}), '(raw, dtype=npType)\n', (4854, 4873), True, 'import numpy as np\n')] |
import numpy, itertools
class FourierBasis:
def __init__(self, nvars, order, ranges):
nterms = pow(order + 1.0, nvars)
self.numTerms = nterms
self.order = order
self.ranges = numpy.array(ranges)
iter = itertools.product(''.join(map(str, range(order+1))), repeat=nvars)
self.multipliers = numpy.array([list(map(int,x)) for x in iter])
def scale(self, value, pos):
return (value - self.ranges[pos,0]) / (self.ranges[pos,1] - self.ranges[pos,0])
def computeFeatures(self, features):
basisFeatures = numpy.array([self.scale(features[i],i) for i in range(len(features))])
return numpy.cos(numpy.pi * numpy.dot(self.multipliers, basisFeatures))
| [
"numpy.array",
"numpy.dot"
] | [((211, 230), 'numpy.array', 'numpy.array', (['ranges'], {}), '(ranges)\n', (222, 230), False, 'import numpy, itertools\n'), ((681, 723), 'numpy.dot', 'numpy.dot', (['self.multipliers', 'basisFeatures'], {}), '(self.multipliers, basisFeatures)\n', (690, 723), False, 'import numpy, itertools\n')] |
#Make a heat map of all teams passes during a tournament
#Set match id in match_id_required.
#Function to draw the pitch
import matplotlib.pyplot as plt
import numpy as np
from pandas.io.json import json_normalize
from FCPython import createPitch
#Size of the pitch in yards (!!!)
pitchLengthX=120
pitchWidthY=80
#The team we are interested in
#team_required ="United States Women's"
team_required ="England Women's"
team_required ="Sweden Women's"
team_required ="Germany Women's"
#Find the matches they played
match_id_required=[]
for match in matches:
home_team_name=match['home_team']['home_team_name']
away_team_name=match['away_team']['away_team_name']
if (home_team_name==team_required) or (away_team_name==team_required):
match_id_required.append(match['match_id'])
print(match_id_required)
# Load in the data
# I took this from https://znstrider.github.io/2018-11-11-Getting-Started-with-StatsBomb-Data/
for i,match_id in enumerate(match_id_required):
file_name=str(match_id)+'.json'
#Load in all match events
import json
with open('Statsbomb/data/events/'+file_name) as data_file:
#print (mypath+'events/'+file)
data = json.load(data_file)
#get the nested structure into a dataframe
#store the dataframe in a dictionary with the match id as key (remove '.json' from string)
df = json_normalize(data, sep = "_").assign(match_id = file_name[:-5])
team_passes = (df['team_name']==team_required)
df = df[team_passes]
#A dataframe of shots
passes_match = df.loc[df['type_name'] == 'Pass'].set_index('id')
if i==0:
passes = passes_match
else:
passes.append(passes_match)
print('Match: ' + str(match_id) + '. Number of passes is: ' + str(len(passes_match)))
#Set number of matches
number_of_matches=i+1
#Size of the pitch in yards (!!!)
pitchLengthX=120
pitchWidthY=80
#Plot the passes
(fig,ax) = createPitch(pitchLengthX,pitchWidthY,'yards','gray')
for i,thepass in passes.iterrows():
x=thepass['location'][0]
y=pitchWidthY-thepass['location'][1]
passCircle=plt.Circle((x,y),1,color="blue")
passCircle.set_alpha(.2)
ax.add_patch(passCircle)
#dx=thepass['pass_end_location'][0]-x
#dy=thepass['pass_end_location'][1]-y
#passArrow=plt.Arrow(x,y,dx,dy,width=1,color="blue")
#ax.add_patch(passArrow)
fig.set_size_inches(10, 7)
fig.savefig('Output/passes.pdf', dpi=100)
plt.show()
x=[]
y=[]
for i,apass in passes.iterrows():
x.append(apass['location'][0])
y.append(pitchWidthY-apass['location'][1])
H_Pass=np.histogram2d(y, x,bins=10,range=[[0, pitchWidthY],[0, pitchLengthX]])
from FCPython import createPitch
(fig,ax) = createPitch(pitchLengthX,pitchWidthY,'yards','gray')
pos=ax.imshow(H_Pass[0]/number_of_matches, extent=[0,120,0,80], aspect='auto',cmap=plt.cm.Reds)
fig.colorbar(pos, ax=ax)
ax.set_title('Number of passes per match')
plt.xlim((-1,121))
plt.ylim((-3,83))
plt.tight_layout()
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
| [
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.gca",
"json.load",
"FCPython.createPitch",
"matplotlib.pyplot.tight_layout",
"numpy.histogram2d",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"pandas.io.json.json_normalize",
"matplotlib.pyplot.show"
] | [((1957, 2012), 'FCPython.createPitch', 'createPitch', (['pitchLengthX', 'pitchWidthY', '"""yards"""', '"""gray"""'], {}), "(pitchLengthX, pitchWidthY, 'yards', 'gray')\n", (1968, 2012), False, 'from FCPython import createPitch\n'), ((2473, 2483), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2481, 2483), True, 'import matplotlib.pyplot as plt\n'), ((2620, 2694), 'numpy.histogram2d', 'np.histogram2d', (['y', 'x'], {'bins': '(10)', 'range': '[[0, pitchWidthY], [0, pitchLengthX]]'}), '(y, x, bins=10, range=[[0, pitchWidthY], [0, pitchLengthX]])\n', (2634, 2694), True, 'import numpy as np\n'), ((2737, 2792), 'FCPython.createPitch', 'createPitch', (['pitchLengthX', 'pitchWidthY', '"""yards"""', '"""gray"""'], {}), "(pitchLengthX, pitchWidthY, 'yards', 'gray')\n", (2748, 2792), False, 'from FCPython import createPitch\n'), ((2954, 2973), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1, 121)'], {}), '((-1, 121))\n', (2962, 2973), True, 'import matplotlib.pyplot as plt\n'), ((2973, 2991), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 83)'], {}), '((-3, 83))\n', (2981, 2991), True, 'import matplotlib.pyplot as plt\n'), ((2991, 3009), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3007, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3066, 3068), True, 'import matplotlib.pyplot as plt\n'), ((2131, 2166), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x, y)', '(1)'], {'color': '"""blue"""'}), "((x, y), 1, color='blue')\n", (2141, 2166), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1224), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1213, 1224), False, 'import json\n'), ((3010, 3019), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3017, 3019), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1411), 'pandas.io.json.json_normalize', 'json_normalize', (['data'], {'sep': '"""_"""'}), "(data, sep='_')\n", (1396, 1411), False, 'from pandas.io.json import json_normalize\n')] |
from typing import Dict, Union, Tuple, Iterable, Callable, NoReturn, Optional, List, Sequence
import geopandas as gpd
import joblib as jl
import numpy as np
import shapely.geometry as sg
from holoviews import Overlay, Element
from holoviews.element import Geometry
from seedpod_ground_risk.core.utils import make_bounds_polygon, remove_raster_nans, reproj_bounds
from seedpod_ground_risk.layers.annotation_layer import AnnotationLayer
from seedpod_ground_risk.layers.data_layer import DataLayer
from seedpod_ground_risk.layers.fatality_risk_layer import FatalityRiskLayer
from seedpod_ground_risk.layers.layer import Layer
class PlotServer:
data_layers: List[DataLayer]
annotation_layers: List[AnnotationLayer]
plot_size: Tuple[int, int]
_cached_area: sg.Polygon
_generated_data_layers: Dict[str, Geometry]
# noinspection PyTypeChecker
def __init__(self, tiles: str = 'Wikipedia', tools: Optional[Iterable[str]] = None,
active_tools: Optional[Iterable[str]] = None,
cmap: str = 'CET_L18',
raster_resolution: float = 40,
plot_size: Tuple[int, int] = (760, 735),
progress_callback: Optional[Callable[[str], None]] = None,
update_callback: Optional[Callable[[str], None]] = None,
progress_bar_callback: Optional[Callable[[int], None]] = None):
"""
Initialise a Plot Server
:param str tiles: a geoviews.tile_sources attribute string from http://geoviews.org/gallery/bokeh/tile_sources.html#bokeh-gallery-tile-sources
:param List[str] tools: the bokeh tools to make available for the plot from https://docs.bokeh.org/en/latest/docs/user_guide/tools.html
:param List[str] active_tools: the subset of `tools` that should be enabled by default
:param cmap: a colorcet attribute string for the colourmap to use from https://colorcet.holoviz.org/user_guide/Continuous.html
:param raster_resolution: resolution of a single square of the raster pixel grid in metres
:param Tuple[int, int] plot_size: the plot size in (width, height) order
:param progress_callback: an optional callable that takes a string updating progress
:param update_callback: an optional callable that is called before an plot is rendered
:param progress_bar_callback: an optional callback that takes an integer updating the progress bar
"""
self.tools = ['crosshair'] if tools is None else tools
self.active_tools = ['wheel_zoom'] if active_tools is None else active_tools
import colorcet
self.cmap = getattr(colorcet, cmap)
from geoviews import tile_sources as gvts
self._base_tiles = getattr(gvts, tiles)
self._time_idx = 0
self._generated_data_layers = {}
self.data_layer_order = []
self.data_layers = [
# TemporalPopulationEstimateLayer('Temporal Pop. Est'),
# RoadsLayer('Road Traffic Population/Hour')
FatalityRiskLayer('Fatality Risk'),
# ResidentialLayer('Residential Layer')
]
self.annotation_layers = []
self.plot_size = plot_size
self._progress_callback = progress_callback if progress_callback is not None else lambda *args: None
self._update_callback = update_callback if update_callback is not None else lambda *args: None
self._progress_bar_callback = progress_bar_callback if progress_bar_callback is not None else lambda *args: None
self._x_range, self._y_range = [-1.45, -1.35], [50.85, 50.95]
self.raster_resolution_m = raster_resolution
self._epsg4326_to_epsg3857_proj = None
self._epsg3857_to_epsg4326_proj = None
self._preload_started = False
self._preload_complete = False
from bokeh.io import curdoc
from bokeh.server.server import Server
self._current_plot = curdoc()
self._server_thread = None
self.server = Server({'/': self.plot}, num_procs=1)
self.server.io_loop.spawn_callback(self._preload_layers)
self.url = 'http://localhost:{port}/{prefix}'.format(port=self.server.port, prefix=self.server.prefix) \
if self.server.address is None else self.server.address
async def _preload_layers(self):
from concurrent.futures.thread import ThreadPoolExecutor
from tornado.gen import multi
from itertools import chain
with ThreadPoolExecutor() as pool:
await multi([pool.submit(layer.preload_data) for layer in chain(self.data_layers, self.annotation_layers)])
self._preload_complete = True
self._progress_callback('Preload complete. First generation will take a minute longer')
self._progress_bar_callback(0)
def start(self) -> NoReturn:
"""
Start the plot server in a daemon thread
"""
assert self.server is not None
import threading
self._progress_callback('Plot Server starting...')
self.server.start()
self._server_thread = threading.Thread(target=self.server.io_loop.start, daemon=True)
self._server_thread.start()
self._progress_callback('Preloading data')
def stop(self) -> NoReturn:
"""
Stop the plot server if running
"""
assert self.server is not None
if self._server_thread is not None:
if self._server_thread.is_alive():
self._server_thread.join()
self._progress_callback('Plot Server stopped')
def _reproject_ranges(self):
import pyproj
if self._epsg3857_to_epsg4326_proj is None:
self._epsg3857_to_epsg4326_proj = pyproj.Transformer.from_crs(pyproj.CRS.from_epsg('3857'),
pyproj.CRS.from_epsg('4326'),
always_xy=True)
self._x_range[0], self._y_range[0] = self._epsg3857_to_epsg4326_proj.transform(self._x_range[0],
self._y_range[0])
self._x_range[1], self._y_range[1] = self._epsg3857_to_epsg4326_proj.transform(self._x_range[1],
self._y_range[1])
def plot(self, doc):
import holoviews as hv
if doc.roots:
doc.clear()
self._reproject_ranges()
self._progress_callback(10)
hvPlot = self.compose_overlay_plot(self._x_range, self._y_range)
if self._preload_complete:
self._progress_bar_callback(100)
self._progress_callback("Plotting complete")
fig = hv.render(hvPlot, backend='bokeh')
fig.output_backend = 'webgl'
def update_range(n, val):
if n == 'x0':
self._x_range[0] = round(val, 2)
elif n == 'x1':
self._x_range[1] = round(val, 2)
elif n == 'y0':
self._y_range[0] = round(val, 2)
elif n == 'y1':
self._y_range[1] = round(val, 2)
fig.x_range.on_change('start', lambda attr, old, new: update_range('x0', new))
fig.x_range.on_change('end', lambda attr, old, new: update_range("x1", new))
fig.y_range.on_change('start', lambda attr, old, new: update_range("y0", new))
fig.y_range.on_change('end', lambda attr, old, new: update_range("y1", new))
doc.add_root(fig)
self._current_plot = doc
def generate_map(self):
self._current_plot.add_next_tick_callback(lambda *args: self.plot(self._current_plot))
def compose_overlay_plot(self, x_range: Optional[Sequence[float]] = (-1.6, -1.2),
y_range: Optional[Sequence[float]] = (50.8, 51.05)) \
-> Union[Overlay, Element]:
"""
Compose all generated HoloViews layers in self.data_layers into a single overlay plot.
Overlaid in a first-on-the-bottom manner.
If plot bounds has moved outside of data bounds, generate more as required.
:param tuple x_range: (min, max) longitude range in EPSG:4326 coordinates
:param tuple y_range: (min, max) latitude range in EPSG:4326 coordinates
:returns: overlay plot of stored layers
"""
try:
if not self._preload_complete:
# If layers aren't preloaded yet just return the map tiles
self._progress_callback('Still preloading layer data...')
plot = self._base_tiles
else:
# Construct box around requested bounds
bounds_poly = make_bounds_polygon(x_range, y_range)
raster_shape = self._get_raster_dimensions(bounds_poly, self.raster_resolution_m)
# Ensure bounds are small enough to render without OOM or heat death of universe
if (raster_shape[0] * raster_shape[1]) < 7e5:
from time import time
t0 = time()
self._progress_bar_callback(10)
# TODO: This will give multiple data layers, these need to be able to fed into their relevent pathfinding layers
for annlayer in self.annotation_layers:
new_layer = FatalityRiskLayer('Fatality Risk', ac=annlayer.aircraft['name'])
self.add_layer(new_layer)
self.remove_duplicate_layers()
self._progress_bar_callback(20)
self.generate_layers(bounds_poly, raster_shape)
self._progress_bar_callback(50)
plt_lyr = list(self._generated_data_layers)[0]
plot = Overlay([self._generated_data_layers[plt_lyr][0]])
print("Generated all layers in ", time() - t0)
if self.annotation_layers:
plot = Overlay([self._generated_data_layers[plt_lyr][0]])
res = []
prog_bar = 50
for dlayer in self.data_layers:
raster_indices = dict(Longitude=np.linspace(x_range[0], x_range[1], num=raster_shape[0]),
Latitude=np.linspace(y_range[0], y_range[1], num=raster_shape[1]))
raw_data = [self._generated_data_layers[dlayer.key][2]]
raster_grid = np.sum(
[remove_raster_nans(self._generated_data_layers[dlayer.key][1])],
axis=0)
raster_grid = np.flipud(raster_grid)
raster_indices['Latitude'] = np.flip(raster_indices['Latitude'])
for alayer in self.annotation_layers:
if alayer.aircraft == dlayer.ac_dict:
self._progress_bar_callback(prog_bar)
prog_bar += 40 / len(self.annotation_layers)
self._progress_callback(f'Finding a path for {alayer.aircraft["name"]}')
res.append(alayer.annotate(raw_data, (raster_indices, raster_grid)))
self._progress_callback('Plotting paths')
self._progress_bar_callback(90)
# res = jl.Parallel(n_jobs=1, verbose=1, backend='threading')(
# jl.delayed(layer.annotate)(raw_datas, (raster_indices, raster_grid)) for layer in
# self.annotation_layers )
plot = Overlay(
[self._base_tiles, plot, *[annot for annot in res if annot is not None]]).collate()
else:
plot = Overlay([self._base_tiles, plot]).collate()
self._progress_bar_callback(90)
else:
self._progress_callback('Area too large to render!')
if not self._generated_data_layers:
plot = self._base_tiles
else:
plot = Overlay([self._base_tiles, *list(self._generated_data_layers.values())])
self._update_layer_list()
self._progress_callback("Rendering new map...")
except Exception as e:
# Catch-all to prevent plot blanking out and/or crashing app
# Just display map tiles in case this was transient
import traceback
traceback.print_exc()
self._progress_callback(
f'Plotting failed with the following error: {e}. Please attempt to re-generate the plot')
print(e)
plot = self._base_tiles
return plot.opts(width=self.plot_size[0], height=self.plot_size[1],
tools=self.tools, active_tools=self.active_tools)
def _update_layer_list(self):
from itertools import chain
layers = []
for layer in chain(self.data_layers, self.annotation_layers):
d = {'key': layer.key}
if hasattr(layer, '_colour'):
d.update(colour=layer._colour)
if hasattr(layer, '_osm_tag'):
d.update(dataTag=layer._osm_tag)
layers.append(d)
self._update_callback(list(chain(self.data_layers, self.annotation_layers)))
def generate_layers(self, bounds_poly: sg.Polygon, raster_shape: Tuple[int, int]) -> NoReturn:
"""
Generate static layers of map
:param raster_shape: shape of raster grid
:param shapely.geometry.Polygon bounds_poly: the bounding polygon for which to generate the map
"""
layers = {}
self._progress_callback('Generating layer data')
res = jl.Parallel(n_jobs=-1, verbose=1, prefer='threads')(
jl.delayed(self.generate_layer)(layer, bounds_poly, raster_shape, self._time_idx,
self.raster_resolution_m) for layer in self.data_layers)
for key, result in res:
if result:
layers[key] = result
# Remove layers with explicit ordering
# so they are can be reinserted in the correct order instead of updated in place
self._generated_data_layers.clear()
if not self.data_layer_order:
self._generated_data_layers.update(dict(list(layers.items())[::-1]))
else:
# Add layers in order
self._generated_data_layers.update({k: layers[k] for k in self.data_layer_order if k in layers})
# # Add any new layers last
self._generated_data_layers.update(
{k: layers[k] for k in layers.keys() if k not in self._generated_data_layers})
@staticmethod
def generate_layer(layer: DataLayer, bounds_poly: sg.Polygon, raster_shape: Tuple[int, int], hour: int,
resolution: float) -> Union[
Tuple[str, Tuple[Geometry, np.ndarray, gpd.GeoDataFrame]], Tuple[str, None]]:
try:
if isinstance(layer, FatalityRiskLayer):
layer.key = f'{layer.key} {layer.ac} {layer.wind_dir:03d}@{layer.wind_vel}kts'
result = layer.key, layer.generate(bounds_poly, raster_shape, from_cache=False, hour=hour,
resolution=resolution)
return result
except Exception as e:
import traceback
traceback.print_tb(e.__traceback__)
print(e)
return layer.key + ' FAILED', None
def set_rasterise(self, val: bool) -> None:
self.rasterise = val
for layer in self.data_layers:
layer.rasterise = val
def set_time(self, hour: int) -> None:
self._time_idx = hour
def add_layer(self, layer: Layer):
layer.preload_data()
if isinstance(layer, DataLayer):
self.data_layers.append(layer)
elif isinstance(layer, AnnotationLayer):
self.annotation_layers.append(layer)
def remove_layer(self, layer):
if layer in self.data_layers:
self.data_layers.remove(layer)
elif layer in self.annotation_layers:
self.annotation_layers.remove(layer)
def set_layer_order(self, layer_order):
self.data_layer_order = layer_order
def export_path_geojson(self, layer, filepath):
import os
if layer in self.annotation_layers:
layer.dataframe.to_file(os.path.join(os.sep, f'{filepath}', 'path.geojson'), driver='GeoJSON')
def generate_path_data_popup(self, layer):
from seedpod_ground_risk.pathfinding.environment import GridEnvironment
from seedpod_ground_risk.ui_resources.info_popups import DataWindow
from seedpod_ground_risk.layers.fatality_risk_layer import FatalityRiskLayer
for dlayer in self.data_layers:
if isinstance(dlayer, FatalityRiskLayer) and layer.aircraft == dlayer.ac_dict:
cur_layer = GridEnvironment(self._generated_data_layers[dlayer.key][1])
grid = cur_layer.grid
popup = DataWindow(layer, grid)
popup.exec()
break
def remove_duplicate_layers(self):
# TODO Make the list/set method work as the nested for loop is clunky
# self.data_layers = list(set(self.data_layers))
for i, layer1 in enumerate(self.data_layers):
for j, layer2 in enumerate(self.data_layers):
if layer1.ac_dict == layer2.ac_dict and i != j:
self.remove_layer(layer2)
def _get_raster_dimensions(self, bounds_poly: sg.Polygon, raster_resolution_m: float) -> Tuple[int, int]:
"""
Return a the (x,y) shape of a raster grid given its EPSG4326 envelope and desired raster resolution
:param bounds_poly: EPSG4326 Shapely Polygon specifying bounds
:param raster_resolution_m: raster resolution in metres
:return: 2-tuple of (width, height)
"""
import pyproj
if self._epsg4326_to_epsg3857_proj is None:
self._epsg4326_to_epsg3857_proj = pyproj.Transformer.from_crs(pyproj.CRS.from_epsg('4326'),
pyproj.CRS.from_epsg('3857'),
always_xy=True)
return reproj_bounds(bounds_poly, self._epsg4326_to_epsg3857_proj, raster_resolution_m)
| [
"itertools.chain",
"seedpod_ground_risk.pathfinding.environment.GridEnvironment",
"holoviews.Overlay",
"concurrent.futures.thread.ThreadPoolExecutor",
"numpy.flip",
"seedpod_ground_risk.layers.fatality_risk_layer.FatalityRiskLayer",
"bokeh.io.curdoc",
"numpy.linspace",
"traceback.print_exc",
"seed... | [((3953, 3961), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (3959, 3961), False, 'from bokeh.io import curdoc\n'), ((4019, 4056), 'bokeh.server.server.Server', 'Server', (["{'/': self.plot}"], {'num_procs': '(1)'}), "({'/': self.plot}, num_procs=1)\n", (4025, 4056), False, 'from bokeh.server.server import Server\n'), ((5118, 5181), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.server.io_loop.start', 'daemon': '(True)'}), '(target=self.server.io_loop.start, daemon=True)\n', (5134, 5181), False, 'import threading\n'), ((6834, 6868), 'holoviews.render', 'hv.render', (['hvPlot'], {'backend': '"""bokeh"""'}), "(hvPlot, backend='bokeh')\n", (6843, 6868), True, 'import holoviews as hv\n'), ((13236, 13283), 'itertools.chain', 'chain', (['self.data_layers', 'self.annotation_layers'], {}), '(self.data_layers, self.annotation_layers)\n', (13241, 13283), False, 'from itertools import chain\n'), ((18659, 18744), 'seedpod_ground_risk.core.utils.reproj_bounds', 'reproj_bounds', (['bounds_poly', 'self._epsg4326_to_epsg3857_proj', 'raster_resolution_m'], {}), '(bounds_poly, self._epsg4326_to_epsg3857_proj, raster_resolution_m\n )\n', (18672, 18744), False, 'from seedpod_ground_risk.core.utils import make_bounds_polygon, remove_raster_nans, reproj_bounds\n'), ((3038, 3072), 'seedpod_ground_risk.layers.fatality_risk_layer.FatalityRiskLayer', 'FatalityRiskLayer', (['"""Fatality Risk"""'], {}), "('Fatality Risk')\n", (3055, 3072), False, 'from seedpod_ground_risk.layers.fatality_risk_layer import FatalityRiskLayer\n'), ((4494, 4514), 'concurrent.futures.thread.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (4512, 4514), False, 'from concurrent.futures.thread import ThreadPoolExecutor\n'), ((14024, 14075), 'joblib.Parallel', 'jl.Parallel', ([], {'n_jobs': '(-1)', 'verbose': '(1)', 'prefer': '"""threads"""'}), "(n_jobs=-1, verbose=1, prefer='threads')\n", (14035, 14075), True, 'import joblib as jl\n'), ((5785, 5813), 'pyproj.CRS.from_epsg', 'pyproj.CRS.from_epsg', (['"""3857"""'], {}), "('3857')\n", (5805, 5813), False, 'import pyproj\n'), ((5889, 5917), 'pyproj.CRS.from_epsg', 'pyproj.CRS.from_epsg', (['"""4326"""'], {}), "('4326')\n", (5909, 5917), False, 'import pyproj\n'), ((8801, 8838), 'seedpod_ground_risk.core.utils.make_bounds_polygon', 'make_bounds_polygon', (['x_range', 'y_range'], {}), '(x_range, y_range)\n', (8820, 8838), False, 'from seedpod_ground_risk.core.utils import make_bounds_polygon, remove_raster_nans, reproj_bounds\n'), ((12750, 12771), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12769, 12771), False, 'import traceback\n'), ((13565, 13612), 'itertools.chain', 'chain', (['self.data_layers', 'self.annotation_layers'], {}), '(self.data_layers, self.annotation_layers)\n', (13570, 13612), False, 'from itertools import chain\n'), ((15702, 15737), 'traceback.print_tb', 'traceback.print_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (15720, 15737), False, 'import traceback\n'), ((16734, 16785), 'os.path.join', 'os.path.join', (['os.sep', 'f"""{filepath}"""', '"""path.geojson"""'], {}), "(os.sep, f'{filepath}', 'path.geojson')\n", (16746, 16785), False, 'import os\n'), ((17253, 17312), 'seedpod_ground_risk.pathfinding.environment.GridEnvironment', 'GridEnvironment', (['self._generated_data_layers[dlayer.key][1]'], {}), '(self._generated_data_layers[dlayer.key][1])\n', (17268, 17312), False, 'from seedpod_ground_risk.pathfinding.environment import GridEnvironment\n'), ((17375, 17398), 'seedpod_ground_risk.ui_resources.info_popups.DataWindow', 'DataWindow', (['layer', 'grid'], {}), '(layer, grid)\n', (17385, 17398), False, 'from seedpod_ground_risk.ui_resources.info_popups import DataWindow\n'), ((18420, 18448), 'pyproj.CRS.from_epsg', 'pyproj.CRS.from_epsg', (['"""4326"""'], {}), "('4326')\n", (18440, 18448), False, 'import pyproj\n'), ((18524, 18552), 'pyproj.CRS.from_epsg', 'pyproj.CRS.from_epsg', (['"""3857"""'], {}), "('3857')\n", (18544, 18552), False, 'import pyproj\n'), ((9164, 9170), 'time.time', 'time', ([], {}), '()\n', (9168, 9170), False, 'from time import time\n'), ((9884, 9934), 'holoviews.Overlay', 'Overlay', (['[self._generated_data_layers[plt_lyr][0]]'], {}), '([self._generated_data_layers[plt_lyr][0]])\n', (9891, 9934), False, 'from holoviews import Overlay, Element\n'), ((14089, 14120), 'joblib.delayed', 'jl.delayed', (['self.generate_layer'], {}), '(self.generate_layer)\n', (14099, 14120), True, 'import joblib as jl\n'), ((9452, 9516), 'seedpod_ground_risk.layers.fatality_risk_layer.FatalityRiskLayer', 'FatalityRiskLayer', (['"""Fatality Risk"""'], {'ac': "annlayer.aircraft['name']"}), "('Fatality Risk', ac=annlayer.aircraft['name'])\n", (9469, 9516), False, 'from seedpod_ground_risk.layers.fatality_risk_layer import FatalityRiskLayer\n'), ((10080, 10130), 'holoviews.Overlay', 'Overlay', (['[self._generated_data_layers[plt_lyr][0]]'], {}), '([self._generated_data_layers[plt_lyr][0]])\n', (10087, 10130), False, 'from holoviews import Overlay, Element\n'), ((4594, 4641), 'itertools.chain', 'chain', (['self.data_layers', 'self.annotation_layers'], {}), '(self.data_layers, self.annotation_layers)\n', (4599, 4641), False, 'from itertools import chain\n'), ((9989, 9995), 'time.time', 'time', ([], {}), '()\n', (9993, 9995), False, 'from time import time\n'), ((10807, 10829), 'numpy.flipud', 'np.flipud', (['raster_grid'], {}), '(raster_grid)\n', (10816, 10829), True, 'import numpy as np\n'), ((10887, 10922), 'numpy.flip', 'np.flip', (["raster_indices['Latitude']"], {}), "(raster_indices['Latitude'])\n", (10894, 10922), True, 'import numpy as np\n'), ((11837, 11922), 'holoviews.Overlay', 'Overlay', (['[self._base_tiles, plot, *[annot for annot in res if annot is not None]]'], {}), '([self._base_tiles, plot, *[annot for annot in res if annot is not\n None]])\n', (11844, 11922), False, 'from holoviews import Overlay, Element\n'), ((12015, 12048), 'holoviews.Overlay', 'Overlay', (['[self._base_tiles, plot]'], {}), '([self._base_tiles, plot])\n', (12022, 12048), False, 'from holoviews import Overlay, Element\n'), ((10318, 10374), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]'], {'num': 'raster_shape[0]'}), '(x_range[0], x_range[1], num=raster_shape[0])\n', (10329, 10374), True, 'import numpy as np\n'), ((10435, 10491), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]'], {'num': 'raster_shape[1]'}), '(y_range[0], y_range[1], num=raster_shape[1])\n', (10446, 10491), True, 'import numpy as np\n'), ((10660, 10722), 'seedpod_ground_risk.core.utils.remove_raster_nans', 'remove_raster_nans', (['self._generated_data_layers[dlayer.key][1]'], {}), '(self._generated_data_layers[dlayer.key][1])\n', (10678, 10722), False, 'from seedpod_ground_risk.core.utils import make_bounds_polygon, remove_raster_nans, reproj_bounds\n')] |
from artemis.general.should_be_builtins import all_equal
__author__ = 'peter'
import numpy as np
def is_pareto_efficient_dumb(costs):
"""
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
is_efficient[i] = np.all(np.any(costs>=c, axis=1))
return is_efficient
def is_pareto_efficient(costs):
"""
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(costs[is_efficient]<=c, axis=1) # Remove dominated points
return is_efficient
def is_pareto_efficient_ixs(costs):
candidates = np.arange(costs.shape[0])
for i, c in enumerate(costs):
if 0 < np.searchsorted(candidates, i) < len(candidates): # If this element has not yet been eliminated
candidates = candidates[np.any(costs[candidates]<=c, axis=1)]
is_efficient = np.zeros(costs.shape[0], dtype = bool)
is_efficient[candidates] = True
return is_efficient
def find_pareto_ixs(cost_arrays):
"""
:param cost_arrays: A collection of nd-arrays representing a grid of costs for different indices.
:return: A tuple of indices which can be used to index the pareto-efficient points.
"""
assert all_equal([c.shape for c in cost_arrays])
flat_ixs, = np.nonzero(is_pareto_efficient(np.reshape(cost_arrays, (len(cost_arrays), -1)).T), )
ixs = np.unravel_index(flat_ixs, dims=cost_arrays[0].shape)
return ixs
| [
"artemis.general.should_be_builtins.all_equal",
"numpy.ones",
"numpy.searchsorted",
"numpy.any",
"numpy.zeros",
"numpy.unravel_index",
"numpy.arange"
] | [((312, 347), 'numpy.ones', 'np.ones', (['costs.shape[0]'], {'dtype': 'bool'}), '(costs.shape[0], dtype=bool)\n', (319, 347), True, 'import numpy as np\n'), ((676, 711), 'numpy.ones', 'np.ones', (['costs.shape[0]'], {'dtype': 'bool'}), '(costs.shape[0], dtype=bool)\n', (683, 711), True, 'import numpy as np\n'), ((963, 988), 'numpy.arange', 'np.arange', (['costs.shape[0]'], {}), '(costs.shape[0])\n', (972, 988), True, 'import numpy as np\n'), ((1228, 1264), 'numpy.zeros', 'np.zeros', (['costs.shape[0]'], {'dtype': 'bool'}), '(costs.shape[0], dtype=bool)\n', (1236, 1264), True, 'import numpy as np\n'), ((1580, 1621), 'artemis.general.should_be_builtins.all_equal', 'all_equal', (['[c.shape for c in cost_arrays]'], {}), '([c.shape for c in cost_arrays])\n', (1589, 1621), False, 'from artemis.general.should_be_builtins import all_equal\n'), ((1733, 1786), 'numpy.unravel_index', 'np.unravel_index', (['flat_ixs'], {'dims': 'cost_arrays[0].shape'}), '(flat_ixs, dims=cost_arrays[0].shape)\n', (1749, 1786), True, 'import numpy as np\n'), ((417, 443), 'numpy.any', 'np.any', (['(costs >= c)'], {'axis': '(1)'}), '(costs >= c, axis=1)\n', (423, 443), True, 'import numpy as np\n'), ((817, 857), 'numpy.any', 'np.any', (['(costs[is_efficient] <= c)'], {'axis': '(1)'}), '(costs[is_efficient] <= c, axis=1)\n', (823, 857), True, 'import numpy as np\n'), ((1038, 1068), 'numpy.searchsorted', 'np.searchsorted', (['candidates', 'i'], {}), '(candidates, i)\n', (1053, 1068), True, 'import numpy as np\n'), ((1171, 1209), 'numpy.any', 'np.any', (['(costs[candidates] <= c)'], {'axis': '(1)'}), '(costs[candidates] <= c, axis=1)\n', (1177, 1209), True, 'import numpy as np\n')] |
# Code is based on scikit-learns permutation importance.
import numpy as np
from sklearn.inspection._permutation_importance import check_random_state, \
_weights_scorer
def _calculate_permutation_scores(estimator, X, y, sample_weight, col_idx,
random_state, n_repeats, scorer):
"""Calculate score when `col_idx` is permuted."""
random_state = check_random_state(random_state)
# Work on a copy of X to to ensure thread-safety in case of threading based
# parallelism. Furthermore, making a copy is also useful when the joblib
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
# if X is large it will be automatically be backed by a readonly memory map
# (memmap). X.copy() on the other hand is always guaranteed to return a
# writable data-structure whose columns can be shuffled inplace.
X_permuted = X.copy()
scores = np.zeros(n_repeats)
shuffling_idx = np.arange(X.shape[0])
for n_round in range(n_repeats):
random_state.shuffle(shuffling_idx)
if hasattr(X_permuted, "iloc"):
raise NotImplementedError("DataFrames not yet implemented.")
else:
X_permuted[:, col_idx] = X_permuted[[[x] for x in shuffling_idx],
col_idx]
feature_score = _weights_scorer(
scorer, estimator, X_permuted, y, sample_weight
)
scores[n_round] = feature_score
return scores
| [
"sklearn.inspection._permutation_importance.check_random_state",
"sklearn.inspection._permutation_importance._weights_scorer",
"numpy.zeros",
"numpy.arange"
] | [((391, 423), 'sklearn.inspection._permutation_importance.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (409, 423), False, 'from sklearn.inspection._permutation_importance import check_random_state, _weights_scorer\n'), ((926, 945), 'numpy.zeros', 'np.zeros', (['n_repeats'], {}), '(n_repeats)\n', (934, 945), True, 'import numpy as np\n'), ((966, 987), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (975, 987), True, 'import numpy as np\n'), ((1355, 1419), 'sklearn.inspection._permutation_importance._weights_scorer', '_weights_scorer', (['scorer', 'estimator', 'X_permuted', 'y', 'sample_weight'], {}), '(scorer, estimator, X_permuted, y, sample_weight)\n', (1370, 1419), False, 'from sklearn.inspection._permutation_importance import check_random_state, _weights_scorer\n')] |
import os, shutil
import numpy as np
import time
import datetime
import torch
import torchvision
from torch import optim
from torch.autograd import Variable
import torch.nn.functional as F
from utils.mask_functions import write_txt
from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net
from models.linknet import LinkNet34
from models.deeplabv3.deeplabv3plus import DeepLabV3Plus
import csv
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
import tqdm
from backboned_unet import Unet
from utils.loss import GetLoss, RobustFocalLoss2d, BCEDiceLoss, SoftBCEDiceLoss, SoftBceLoss, LovaszLoss
from torch.utils.tensorboard import SummaryWriter
import segmentation_models_pytorch as smp
from models.Transpose_unet.unet.model import Unet as Unet_t
from models.octave_unet.unet.model import OctaveUnet
import pandas as pd
class Train(object):
def __init__(self, config, train_loader, valid_loader):
# Data loader
self.train_loader = train_loader
self.valid_loader = valid_loader
# Models
self.unet = None
self.optimizer = None
self.img_ch = config.img_ch
self.output_ch = config.output_ch
self.criterion = SoftBCEDiceLoss(weight=[0.25, 0.75])
# self.criterion = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(50))
self.criterion_stage2 = SoftBCEDiceLoss(weight=[0.25, 0.75])
self.criterion_stage3 = SoftBCEDiceLoss(weight=[0.25, 0.75])
self.model_type = config.model_type
self.t = config.t
self.mode = config.mode
self.resume = config.resume
# Hyper-parameters
self.lr = config.lr
self.lr_stage2 = config.lr_stage2
self.lr_stage3 = config.lr_stage3
self.start_epoch, self.max_dice = 0, 0
self.weight_decay = config.weight_decay
self.weight_decay_stage2 = config.weight_decay
self.weight_decay_stage3 = config.weight_decay
# save set
self.save_path = config.save_path
if 'choose_threshold' not in self.mode:
TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now())
self.writer = SummaryWriter(log_dir=self.save_path+'/'+TIMESTAMP)
# 配置参数
self.epoch_stage1 = config.epoch_stage1
self.epoch_stage1_freeze = config.epoch_stage1_freeze
self.epoch_stage2 = config.epoch_stage2
self.epoch_stage2_accumulation = config.epoch_stage2_accumulation
self.accumulation_steps = config.accumulation_steps
self.epoch_stage3 = config.epoch_stage3
self.epoch_stage3_accumulation = config.epoch_stage3_accumulation
# 模型初始化
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.build_model()
def build_model(self):
print("Using model: {}".format(self.model_type))
"""Build generator and discriminator."""
if self.model_type == 'U_Net':
self.unet = U_Net(img_ch=3, output_ch=self.output_ch)
elif self.model_type == 'R2U_Net':
self.unet = R2U_Net(img_ch=3, output_ch=self.output_ch, t=self.t)
elif self.model_type == 'AttU_Net':
self.unet = AttU_Net(img_ch=3, output_ch=self.output_ch)
elif self.model_type == 'R2AttU_Net':
self.unet = R2AttU_Net(img_ch=3, output_ch=self.output_ch, t=self.t)
elif self.model_type == 'unet_resnet34':
# self.unet = Unet(backbone_name='resnet34', pretrained=True, classes=self.output_ch)
self.unet = smp.Unet('resnet34', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_resnet50':
self.unet = smp.Unet('resnet50', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_se_resnext50_32x4d':
self.unet = smp.Unet('se_resnext50_32x4d', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_densenet121':
self.unet = smp.Unet('densenet121', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_resnet34_t':
self.unet = Unet_t('resnet34', encoder_weights='imagenet', activation=None, use_ConvTranspose2d=True)
elif self.model_type == 'unet_resnet34_oct':
self.unet = OctaveUnet('resnet34', encoder_weights='imagenet', activation=None)
elif self.model_type == 'linknet':
self.unet = LinkNet34(num_classes=self.output_ch)
elif self.model_type == 'deeplabv3plus':
self.unet = DeepLabV3Plus(model_backbone='res50_atrous', num_classes=self.output_ch)
elif self.model_type == 'pspnet_resnet34':
self.unet = smp.PSPNet('resnet34', encoder_weights='imagenet', classes=1, activation=None)
if torch.cuda.is_available():
self.unet = torch.nn.DataParallel(self.unet)
self.criterion = self.criterion.cuda()
self.criterion_stage2 = self.criterion_stage2.cuda()
self.criterion_stage3 = self.criterion_stage3.cuda()
self.unet.to(self.device)
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def reset_grad(self):
"""Zero the gradient buffers."""
self.unet.zero_grad()
def save_checkpoint(self, state, stage, index, is_best):
# 保存权重,每一epoch均保存一次,若为最优,则复制到最优权重;index可以区分不同的交叉验证
pth_path = os.path.join(self.save_path, '%s_%d_%d.pth' % (self.model_type, stage, index))
torch.save(state, pth_path)
if is_best:
print('Saving Best Model.')
write_txt(self.save_path, 'Saving Best Model.')
shutil.copyfile(os.path.join(self.save_path, '%s_%d_%d.pth' % (self.model_type, stage, index)), os.path.join(self.save_path, '%s_%d_%d_best.pth' % (self.model_type, stage, index)))
def load_checkpoint(self, load_optimizer=True):
# Load the pretrained Encoder
weight_path = os.path.join(self.save_path, self.resume)
if os.path.isfile(weight_path):
checkpoint = torch.load(weight_path)
# 加载模型的参数,学习率,优化器,开始的epoch,最小误差等
if torch.cuda.is_available:
self.unet.module.load_state_dict(checkpoint['state_dict'])
else:
self.unet.load_state_dict(checkpoint['state_dict'])
self.start_epoch = checkpoint['epoch']
self.max_dice = checkpoint['max_dice']
if load_optimizer:
self.lr = checkpoint['lr']
self.optimizer.load_state_dict(checkpoint['optimizer'])
print('%s is Successfully Loaded from %s' % (self.model_type, weight_path))
write_txt(self.save_path, '%s is Successfully Loaded from %s' % (self.model_type, weight_path))
else:
raise FileNotFoundError("Can not find weight file in {}".format(weight_path))
def train(self, index):
# self.optimizer = optim.Adam([{'params': self.unet.decoder.parameters(), 'lr': 1e-4}, {'params': self.unet.encoder.parameters(), 'lr': 1e-6},])
self.optimizer = optim.Adam(self.unet.module.parameters(), self.lr, weight_decay=self.weight_decay)
# 若训练到一半暂停了,则需要加载之前训练的参数,并加载之前学习率 TODO:resume学习率没有接上,所以resume暂时无法使用
if self.resume:
self.load_checkpoint(load_optimizer=True)
'''
CosineAnnealingLR:若存在['initial_lr'],则从initial_lr开始衰减;
若不存在,则执行CosineAnnealingLR会在optimizer.param_groups中添加initial_lr键值,其值等于lr
重置初始学习率,在load_checkpoint中会加载优化器,但其中的initial_lr还是之前的,所以需要覆盖为self.lr,让其从self.lr衰减
'''
self.optimizer.param_groups[0]['initial_lr'] = self.lr
stage1_epoches = self.epoch_stage1 - self.start_epoch
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, stage1_epoches+10)
# 防止训练到一半暂停重新训练,日志被覆盖
global_step_before = self.start_epoch*len(self.train_loader)
for epoch in range(self.start_epoch, self.epoch_stage1):
epoch += 1
self.unet.train(True)
# 学习率重启
# if epoch == 30:
# self.optimizer.param_groups[0]['initial_lr'] = 0.0001
# lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, 25)
epoch_loss = 0
tbar = tqdm.tqdm(self.train_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
masks = masks.to(self.device)
# SR : Segmentation Result
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = self.criterion(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
for loss_index, loss_item in enumerate(loss_set):
if loss_index > 0:
loss_name = 'stage1_loss_%d' % loss_index
self.writer.add_scalar(loss_name, loss_item.item(), global_step_before + i)
loss = loss_set[0]
else:
loss = loss_set
epoch_loss += loss.item()
# Backprop + optimize
self.reset_grad()
loss.backward()
self.optimizer.step()
params_groups_lr = str()
for group_ind, param_group in enumerate(self.optimizer.param_groups):
params_groups_lr = params_groups_lr + 'params_group_%d' % (group_ind) + ': %.12f, ' % (param_group['lr'])
# 保存到tensorboard,每一步存储一个
self.writer.add_scalar('Stage1_train_loss', loss.item(), global_step_before+i)
descript = "Train Loss: %.7f, lr: %s" % (loss.item(), params_groups_lr)
tbar.set_description(desc=descript)
# 更新global_step_before为下次迭代做准备
global_step_before += len(tbar)
# Print the log info
print('Finish Stage1 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage1, epoch_loss/len(tbar)))
write_txt(self.save_path, 'Finish Stage1 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage1, epoch_loss/len(tbar)))
# 验证模型,保存权重,并保存日志
loss_mean, dice_mean = self.validation(stage=1)
if dice_mean > self.max_dice:
is_best = True
self.max_dice = dice_mean
else: is_best = False
self.lr = lr_scheduler.get_lr()
state = {'epoch': epoch,
'state_dict': self.unet.module.state_dict(),
'max_dice': self.max_dice,
'optimizer' : self.optimizer.state_dict(),
'lr' : self.lr}
self.save_checkpoint(state, 1, index, is_best)
self.writer.add_scalar('Stage1_val_loss', loss_mean, epoch)
self.writer.add_scalar('Stage1_val_dice', dice_mean, epoch)
self.writer.add_scalar('Stage1_lr', self.lr[0], epoch)
# 学习率衰减
lr_scheduler.step()
def train_stage2(self, index):
# # 冻结BN层, see https://zhuanlan.zhihu.com/p/65439075 and https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/100736591271 for more information
# def set_bn_eval(m):
# classname = m.__class__.__name__
# if classname.find('BatchNorm') != -1:
# m.eval()
# self.unet.apply(set_bn_eval)
# self.optimizer = optim.Adam([{'params': self.unet.decoder.parameters(), 'lr': 1e-5}, {'params': self.unet.encoder.parameters(), 'lr': 1e-7},])
self.optimizer = optim.Adam(self.unet.module.parameters(), self.lr_stage2, weight_decay=self.weight_decay_stage2)
# 加载的resume分为两种情况:之前没有训练第二个阶段,现在要加载第一个阶段的参数;第二个阶段训练了一半要继续训练
if self.resume:
# 若第二个阶段训练一半,要重新加载 TODO
if self.resume.split('_')[2] == '2':
self.load_checkpoint(load_optimizer=True) # 当load_optimizer为True会重新加载学习率和优化器
'''
CosineAnnealingLR:若存在['initial_lr'],则从initial_lr开始衰减;
若不存在,则执行CosineAnnealingLR会在optimizer.param_groups中添加initial_lr键值,其值等于lr
重置初始学习率,在load_checkpoint中会加载优化器,但其中的initial_lr还是之前的,所以需要覆盖为self.lr,让其从self.lr衰减
'''
self.optimizer.param_groups[0]['initial_lr'] = self.lr
# 若第一阶段结束后没有直接进行第二个阶段,中间暂停了
elif self.resume.split('_')[2] == '1':
self.load_checkpoint(load_optimizer=False)
self.start_epoch = 0
self.max_dice = 0
# 第一阶段结束后直接进行第二个阶段,中间并没有暂停
else:
self.start_epoch = 0
self.max_dice = 0
# 防止训练到一半暂停重新训练,日志被覆盖
global_step_before = self.start_epoch*len(self.train_loader)
stage2_epoches = self.epoch_stage2 - self.start_epoch
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, stage2_epoches+5)
for epoch in range(self.start_epoch, self.epoch_stage2):
epoch += 1
self.unet.train(True)
epoch_loss = 0
self.reset_grad() # 梯度累加的时候需要使用
tbar = tqdm.tqdm(self.train_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
masks = masks.to(self.device)
assert images.size(2) == 1024
# SR : Segmentation Result
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = self.criterion_stage2(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
for loss_index, loss_item in enumerate(loss_set):
if loss_index > 0:
loss_name = 'stage2_loss_%d' % loss_index
self.writer.add_scalar(loss_name, loss_item.item(), global_step_before + i)
loss = loss_set[0]
else:
loss = loss_set
epoch_loss += loss.item()
# Backprop + optimize, see https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/20 for Accumulating Gradients
if epoch <= self.epoch_stage2 - self.epoch_stage2_accumulation:
self.reset_grad()
loss.backward()
self.optimizer.step()
else:
# loss = loss / self.accumulation_steps # Normalize our loss (if averaged)
loss.backward() # Backward pass
if (i+1) % self.accumulation_steps == 0: # Wait for several backward steps
self.optimizer.step() # Now we can do an optimizer step
self.reset_grad()
params_groups_lr = str()
for group_ind, param_group in enumerate(self.optimizer.param_groups):
params_groups_lr = params_groups_lr + 'params_group_%d' % (group_ind) + ': %.12f, ' % (param_group['lr'])
# 保存到tensorboard,每一步存储一个
self.writer.add_scalar('Stage2_train_loss', loss.item(), global_step_before+i)
descript = "Train Loss: %.7f, lr: %s" % (loss.item(), params_groups_lr)
tbar.set_description(desc=descript)
# 更新global_step_before为下次迭代做准备
global_step_before += len(tbar)
# Print the log info
print('Finish Stage2 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage2, epoch_loss/len(tbar)))
write_txt(self.save_path, 'Finish Stage2 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage2, epoch_loss/len(tbar)))
# 验证模型,保存权重,并保存日志
loss_mean, dice_mean = self.validation(stage=2)
if dice_mean > self.max_dice:
is_best = True
self.max_dice = dice_mean
else: is_best = False
self.lr = lr_scheduler.get_lr()
state = {'epoch': epoch,
'state_dict': self.unet.module.state_dict(),
'max_dice': self.max_dice,
'optimizer' : self.optimizer.state_dict(),
'lr' : self.lr}
self.save_checkpoint(state, 2, index, is_best)
self.writer.add_scalar('Stage2_val_loss', loss_mean, epoch)
self.writer.add_scalar('Stage2_val_dice', dice_mean, epoch)
self.writer.add_scalar('Stage2_lr', self.lr[0], epoch)
# 学习率衰减
lr_scheduler.step()
# stage3, 接着stage2的训练,只训练有mask的样本
def train_stage3(self, index):
# # 冻结BN层, see https://zhuanlan.zhihu.com/p/65439075 and https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/100736591271 for more information
# def set_bn_eval(m):
# classname = m.__class__.__name__
# if classname.find('BatchNorm') != -1:
# m.eval()
# self.unet.apply(set_bn_eval)
# self.optimizer = optim.Adam([{'params': self.unet.decoder.parameters(), 'lr': 1e-5}, {'params': self.unet.encoder.parameters(), 'lr': 1e-7},])
self.optimizer = optim.Adam(self.unet.module.parameters(), self.lr_stage3, weight_decay=self.weight_decay_stage3)
# 如果是 train_stage23,则resume只在第二阶段起作用
if self.mode == 'train_stage23':
self.resume = None
# 加载的resume分为两种情况:之前没有训练第三个阶段,现在要加载第二个阶段的参数;第三个阶段训练了一半要继续训练
if self.resume:
# 若第三个阶段训练一半,要重新加载 TODO
if self.resume.split('_')[2] == '3':
self.load_checkpoint(load_optimizer=True) # 当load_optimizer为True会重新加载学习率和优化器
'''
CosineAnnealingLR:若存在['initial_lr'],则从initial_lr开始衰减;
若不存在,则执行CosineAnnealingLR会在optimizer.param_groups中添加initial_lr键值,其值等于lr
重置初始学习率,在load_checkpoint中会加载优化器,但其中的initial_lr还是之前的,所以需要覆盖为self.lr,让其从self.lr衰减
'''
self.optimizer.param_groups[0]['initial_lr'] = self.lr
# 若第二阶段结束后没有直接进行第三个阶段,中间暂停了
elif self.resume.split('_')[2] == '2':
self.load_checkpoint(load_optimizer=False)
self.start_epoch = 0
self.max_dice = 0
# 第二阶段结束后直接进行第三个阶段,中间并没有暂停
else:
print('start stage3 after stage2 directly!')
self.start_epoch = 0
self.max_dice = 0
# 防止训练到一半暂停重新训练,日志被覆盖
global_step_before = self.start_epoch*len(self.train_loader)
stage3_epoches = self.epoch_stage3 - self.start_epoch
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, stage3_epoches+5)
for epoch in range(self.start_epoch, self.epoch_stage3):
epoch += 1
self.unet.train(True)
epoch_loss = 0
self.reset_grad() # 梯度累加的时候需要使用
tbar = tqdm.tqdm(self.train_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
masks = masks.to(self.device)
assert images.size(2) == 1024
# SR : Segmentation Result
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = self.criterion_stage3(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
for loss_index, loss_item in enumerate(loss_set):
if loss_index > 0:
loss_name = 'stage3_loss_%d' % loss_index
self.writer.add_scalar(loss_name, loss_item.item(), global_step_before + i)
loss = loss_set[0]
else:
loss = loss_set
epoch_loss += loss.item()
# Backprop + optimize, see https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/20 for Accumulating Gradients
if epoch <= self.epoch_stage3 - self.epoch_stage3_accumulation:
self.reset_grad()
loss.backward()
self.optimizer.step()
else:
# loss = loss / self.accumulation_steps # Normalize our loss (if averaged)
loss.backward() # Backward pass
if (i+1) % self.accumulation_steps == 0: # Wait for several backward steps
self.optimizer.step() # Now we can do an optimizer step
self.reset_grad()
params_groups_lr = str()
for group_ind, param_group in enumerate(self.optimizer.param_groups):
params_groups_lr = params_groups_lr + 'params_group_%d' % (group_ind) + ': %.12f, ' % (param_group['lr'])
# 保存到tensorboard,每一步存储一个
self.writer.add_scalar('Stage3_train_loss', loss.item(), global_step_before+i)
descript = "Train Loss: %.7f, lr: %s" % (loss.item(), params_groups_lr)
tbar.set_description(desc=descript)
# 更新global_step_before为下次迭代做准备
global_step_before += len(tbar)
# Print the log info
print('Finish Stage3 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage3, epoch_loss/len(tbar)))
write_txt(self.save_path, 'Finish Stage3 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage3, epoch_loss/len(tbar)))
# 验证模型,保存权重,并保存日志
loss_mean, dice_mean = self.validation(stage=3)
if dice_mean > self.max_dice:
is_best = True
self.max_dice = dice_mean
else: is_best = False
self.lr = lr_scheduler.get_lr()
state = {'epoch': epoch,
'state_dict': self.unet.module.state_dict(),
'max_dice': self.max_dice,
'optimizer' : self.optimizer.state_dict(),
'lr' : self.lr}
self.save_checkpoint(state, 3, index, is_best)
self.writer.add_scalar('Stage3_val_loss', loss_mean, epoch)
self.writer.add_scalar('Stage3_val_dice', dice_mean, epoch)
self.writer.add_scalar('Stage3_lr', self.lr[0], epoch)
# 学习率衰减
lr_scheduler.step()
def validation(self, stage=1):
# 验证的时候,train(False)是必须的0,设置其中的BN层、dropout等为eval模式
# with torch.no_grad(): 可以有,在这个上下文管理器中,不反向传播,会加快速度,可以使用较大batch size
self.unet.eval()
tbar = tqdm.tqdm(self.valid_loader)
loss_sum, dice_sum = 0, 0
if stage == 1:
criterion = self.criterion
elif stage == 2:
criterion = self.criterion_stage2
elif stage == 3:
criterion = self.criterion_stage3
with torch.no_grad():
for i, (images, masks) in enumerate(tbar):
images = images.to(self.device)
masks = masks.to(self.device)
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = criterion(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
loss = loss_set[0]
else:
loss = loss_set
loss_sum += loss.item()
# 计算dice系数,预测出的矩阵要经过sigmoid含义以及阈值,阈值默认为0.5
net_output_flat_sign = (torch.sigmoid(net_output_flat)>0.5).float()
dice = self.dice_overall(net_output_flat_sign, masks_flat).mean()
dice_sum += dice.item()
descript = "Val Loss: {:.7f}, dice: {:.7f}".format(loss.item(), dice.item())
tbar.set_description(desc=descript)
loss_mean, dice_mean = loss_sum/len(tbar), dice_sum/len(tbar)
print("Val Loss: {:.7f}, dice: {:.7f}".format(loss_mean, dice_mean))
write_txt(self.save_path, "Val Loss: {:.7f}, dice: {:.7f}".format(loss_mean, dice_mean))
return loss_mean, dice_mean
# dice for threshold selection
def dice_overall(self, preds, targs):
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
targs = targs.view(n, -1)
# preds, targs = preds.to(self.device), targs.to(self.device)
preds, targs = preds.cpu(), targs.cpu()
# tensor之间按位相成,求两个集合的交(只有1×1等于1)后。按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的交集大小
intersect = (preds * targs).sum(-1).float()
# tensor之间按位相加,求两个集合的并。然后按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的并集大小
union = (preds + targs).sum(-1).float()
'''
输入图片真实类标与预测类标无并集有两种情况:第一种为预测与真实均没有类标,此时并集之和为0;第二种为真实有类标,但是预测完全错误,此时并集之和不为0;
寻找输入图片真实类标与预测类标并集之和为0的情况,将其交集置为1,并集置为2,最后还有一个2*交集/并集,值为1;
其余情况,直接按照2*交集/并集计算,因为上面的并集并没有减去交集,所以需要拿2*交集,其最大值为1
'''
u0 = union == 0
intersect[u0] = 1
union[u0] = 2
return (2. * intersect / union)
def classify_score(self, preds, targs):
'''若当前图像中有mask,则为正类,若当前图像中无mask,则为负类。从分类的角度得分当前的准确率
Args:
preds: 预测出的mask矩阵
targs: 真实的mask矩阵
Return: 分类准确率
'''
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
targs = targs.view(n, -1)
# preds, targs = preds.to(self.device), targs.to(self.device)
preds_, targs_ = torch.sum(preds, 1), torch.sum(targs, 1)
preds_, targs_ = preds_ > 0, targs_ > 0
preds_, targs_ = preds_.cpu(), targs_.cpu()
score = torch.sum(preds_ == targs_)
return score.item()/n
def choose_threshold(self, model_path, index):
'''利用线性法搜索当前模型的最优阈值和最优像素阈值;先利用粗略搜索和精细搜索两个过程搜索出最优阈值,然后搜索出最优像素阈值;并保存搜索图
Args:
model_path: 当前模型权重的位置
index: 当前为第几个fold
Return: 最优阈值,最优像素阈值,最高得分
'''
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
stage = eval(model_path.split('/')[-1].split('_')[2])
print('Loaded from %s, using choose_threshold!' % model_path)
self.unet.eval()
with torch.no_grad():
# 先大概选取阈值范围
dices_big = []
thrs_big = np.arange(0.1, 1, 0.1) # 阈值列表
for th in thrs_big:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
# preds[preds.view(preds.shape[0],-1).sum(-1) < noise_th,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_big.append(sum(tmp) / len(tmp))
dices_big = np.array(dices_big)
best_thrs_big = thrs_big[dices_big.argmax()]
# 精细选取范围
dices_little = []
thrs_little = np.arange(best_thrs_big-0.05, best_thrs_big+0.05, 0.01) # 阈值列表
for th in thrs_little:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
# preds[preds.view(preds.shape[0],-1).sum(-1) < noise_th,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_little.append(sum(tmp) / len(tmp))
dices_little = np.array(dices_little)
# score = dices.max()
best_thr = thrs_little[dices_little.argmax()]
# 选最优像素阈值
if stage != 3:
dices_pixel = []
pixel_thrs = np.arange(0, 2304, 256) # 阈值列表
for pixel_thr in pixel_thrs:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > best_thr).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < pixel_thr,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_pixel.append(sum(tmp) / len(tmp))
dices_pixel = np.array(dices_pixel)
score = dices_pixel.max()
best_pixel_thr = pixel_thrs[dices_pixel.argmax()]
elif stage == 3:
best_pixel_thr, score = 0, dices_little.max()
print('best_thr:{}, best_pixel_thr:{}, score:{}'.format(best_thr, best_pixel_thr, score))
plt.figure(figsize=(10.4, 4.8))
plt.subplot(1, 3, 1)
plt.title('Large-scale search')
plt.plot(thrs_big, dices_big)
plt.subplot(1, 3, 2)
plt.title('Little-scale search')
plt.plot(thrs_little, dices_little)
plt.subplot(1, 3, 3)
plt.title('pixel thrs search')
if stage != 3:
plt.plot(pixel_thrs, dices_pixel)
plt.savefig(os.path.join(self.save_path, 'stage{}'.format(stage)+'_fold'+str(index)))
# plt.show()
plt.close()
return float(best_thr), float(best_pixel_thr), float(score)
def pred_mask_count(self, model_path, masks_bool, val_index, best_thr, best_pixel_thr):
'''加载模型,根据最优阈值和最优像素阈值,得到在验证集上的分类准确率。适用于训练的第二阶段使用 dice 选完阈值,查看分类准确率
Args:
model_path: 当前模型的权重路径
masks_bool: 全部数据集中的每个是否含有mask
val_index: 当前验证集的在全部数据集的下标
best_thr: 选出的最优阈值
best_pixel_thr: 选出的最优像素阈值
Return: None, 打印出有多少个真实情况有多少个正样本,实际预测出了多少个样本。但是不是很严谨,因为这不能代表正确率。
'''
count_true, count_pred = 0,0
for index1 in val_index:
if masks_bool[index1]:
count_true += 1
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
print('Loaded from %s' % model_path)
self.unet.eval()
with torch.no_grad():
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > best_thr).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < best_pixel_thr,...] = 0.0 # 过滤噪声点
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
for index2 in range(n):
pred = preds[index2, ...]
if torch.sum(pred) > 0:
count_pred += 1
tmp.append(self.dice_overall(preds, masks).mean())
print('score:', sum(tmp) / len(tmp))
print('count_true:{}, count_pred:{}'.format(count_true, count_pred))
def grid_search(self, thrs_big, pixel_thrs):
'''利用网格法搜索最优阈值和最优像素阈值
Args:
thrs_big: 网格法搜索时的一系列阈值
pixel_thrs: 网格搜索时的一系列像素阈值
Return: 最优阈值,最优像素阈值,最高得分,网络矩阵中每个位置的得分
'''
with torch.no_grad():
# 先大概选取阈值范围和像素阈值范围
dices_big = [] # 存放的是二维矩阵,每一行为每一个阈值下所有像素阈值得到的得分
for th in thrs_big:
dices_pixel = []
for pixel_thr in pixel_thrs:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < pixel_thr,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_pixel.append(sum(tmp) / len(tmp))
dices_big.append(dices_pixel)
dices_big = np.array(dices_big)
print('粗略挑选最优阈值和最优像素阈值,dices_big_shape:{}'.format(np.shape(dices_big)))
re = np.where(dices_big == np.max(dices_big))
# 如果有多个最大值的处理方式
if np.shape(re)[1] != 1:
re = re[0]
best_thrs_big, best_pixel_thr = thrs_big[int(re[0])], pixel_thrs[int(re[1])]
best_thr, score = best_thrs_big, dices_big.max()
return best_thr, best_pixel_thr, score, dices_big
def choose_threshold_grid(self, model_path, index):
'''利用网格法搜索当前模型的最优阈值和最优像素阈值,分为粗略搜索和精细搜索两个过程;并保存热力图
Args:
model_path: 当前模型权重的位置
index: 当前为第几个fold
Return: 最优阈值,最优像素阈值,最高得分
'''
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
stage = eval(model_path.split('/')[-1].split('_')[2])
print('Loaded from %s, using choose_threshold_grid!' % model_path)
self.unet.eval()
thrs_big1 = np.arange(0.60, 0.81, 0.015) # 阈值列表
pixel_thrs1 = np.arange(768, 2305, 256) # 像素阈值列表
best_thr1, best_pixel_thr1, score1, dices_big1 = self.grid_search(thrs_big1, pixel_thrs1)
print('best_thr1:{}, best_pixel_thr1:{}, score1:{}'.format(best_thr1, best_pixel_thr1, score1))
thrs_big2 = np.arange(best_thr1-0.015, best_thr1+0.015, 0.0075) # 阈值列表
pixel_thrs2 = np.arange(best_pixel_thr1-256, best_pixel_thr1+257, 128) # 像素阈值列表
best_thr2, best_pixel_thr2, score2, dices_big2 = self.grid_search(thrs_big2, pixel_thrs2)
print('best_thr2:{}, best_pixel_thr2:{}, score2:{}'.format(best_thr2, best_pixel_thr2, score2))
if score1 < score2: best_thr, best_pixel_thr, score, dices_big = best_thr2, best_pixel_thr2, score2, dices_big2
else: best_thr, best_pixel_thr, score, dices_big = best_thr1, best_pixel_thr1, score1, dices_big1
print('best_thr:{}, best_pixel_thr:{}, score:{}'.format(best_thr, best_pixel_thr, score))
f, (ax1, ax2) = plt.subplots(figsize=(14.4, 4.8), ncols=2)
cmap = sns.cubehelix_palette(start = 1.5, rot = 3, gamma=0.8, as_cmap = True)
data1 = pd.DataFrame(data=dices_big1, index=np.around(thrs_big1, 3), columns=pixel_thrs1)
sns.heatmap(data1, linewidths = 0.05, ax = ax1, vmax=np.max(dices_big1), vmin=np.min(dices_big1), cmap=cmap, annot=True, fmt='.4f')
ax1.set_title('Large-scale search')
data2 = pd.DataFrame(data=dices_big2, index=np.around(thrs_big2, 3), columns=pixel_thrs2)
sns.heatmap(data2, linewidths = 0.05, ax = ax2, vmax=np.max(dices_big2), vmin=np.min(dices_big2), cmap=cmap, annot=True, fmt='.4f')
ax2.set_title('Little-scale search')
f.savefig(os.path.join(self.save_path, 'stage{}'.format(stage)+'_fold'+str(index)))
# plt.show()
plt.close()
return float(best_thr), float(best_pixel_thr), float(score)
def get_dice_onval(self, model_path, best_thr, pixel_thr):
'''已经训练好模型,并且选完阈值后。根据当前模型,best_thr, pixel_thr得到在验证集的表现
Args:
model_path: 要加载的模型路径
best_thr: 选出的最优阈值
pixel_thr: 选出的最优像素阈值
Return: None
'''
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
stage = eval(model_path.split('/')[-1].split('_')[2])
print('Loaded from %s, using get_dice_onval!' % model_path)
self.unet.eval()
with torch.no_grad():
# 选最优像素阈值
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > best_thr).to(self.device).float() # 大于阈值的归为1
if stage != 3:
preds[preds.view(preds.shape[0], -1).sum(-1) < pixel_thr, ...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
score = sum(tmp) / len(tmp)
print('best_thr:{}, best_pixel_thr:{}, score:{}'.format(best_thr, pixel_thr, score))
| [
"seaborn.cubehelix_palette",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"matplotlib.pyplot.switch_backend",
"models.network.U_Net",
"numpy.arange",
"segmentation_models_pytorch.PSPNet",
"torch.utils.tensorboard.SummaryWriter",
"models.network.R2AttU_Net",
"matplotlib.pyplot.plot",
... | [((448, 473), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (466, 473), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1290), 'utils.loss.SoftBCEDiceLoss', 'SoftBCEDiceLoss', ([], {'weight': '[0.25, 0.75]'}), '(weight=[0.25, 0.75])\n', (1269, 1290), False, 'from utils.loss import GetLoss, RobustFocalLoss2d, BCEDiceLoss, SoftBCEDiceLoss, SoftBceLoss, LovaszLoss\n'), ((1408, 1444), 'utils.loss.SoftBCEDiceLoss', 'SoftBCEDiceLoss', ([], {'weight': '[0.25, 0.75]'}), '(weight=[0.25, 0.75])\n', (1423, 1444), False, 'from utils.loss import GetLoss, RobustFocalLoss2d, BCEDiceLoss, SoftBCEDiceLoss, SoftBceLoss, LovaszLoss\n'), ((1478, 1514), 'utils.loss.SoftBCEDiceLoss', 'SoftBCEDiceLoss', ([], {'weight': '[0.25, 0.75]'}), '(weight=[0.25, 0.75])\n', (1493, 1514), False, 'from utils.loss import GetLoss, RobustFocalLoss2d, BCEDiceLoss, SoftBCEDiceLoss, SoftBceLoss, LovaszLoss\n'), ((4909, 4934), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4932, 4934), False, 'import torch\n'), ((5764, 5842), 'os.path.join', 'os.path.join', (['self.save_path', "('%s_%d_%d.pth' % (self.model_type, stage, index))"], {}), "(self.save_path, '%s_%d_%d.pth' % (self.model_type, stage, index))\n", (5776, 5842), False, 'import os, shutil\n'), ((5852, 5879), 'torch.save', 'torch.save', (['state', 'pth_path'], {}), '(state, pth_path)\n', (5862, 5879), False, 'import torch\n'), ((6314, 6355), 'os.path.join', 'os.path.join', (['self.save_path', 'self.resume'], {}), '(self.save_path, self.resume)\n', (6326, 6355), False, 'import os, shutil\n'), ((6368, 6395), 'os.path.isfile', 'os.path.isfile', (['weight_path'], {}), '(weight_path)\n', (6382, 6395), False, 'import os, shutil\n'), ((8160, 8233), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['self.optimizer', '(stage1_epoches + 10)'], {}), '(self.optimizer, stage1_epoches + 10)\n', (8196, 8233), False, 'from torch import optim\n'), ((13765, 13837), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['self.optimizer', '(stage2_epoches + 5)'], {}), '(self.optimizer, stage2_epoches + 5)\n', (13801, 13837), False, 'from torch import optim\n'), ((20058, 20130), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['self.optimizer', '(stage3_epoches + 5)'], {}), '(self.optimizer, stage3_epoches + 5)\n', (20094, 20130), False, 'from torch import optim\n'), ((24489, 24517), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_loader'], {}), '(self.valid_loader)\n', (24498, 24517), False, 'import tqdm\n'), ((27880, 27907), 'torch.sum', 'torch.sum', (['(preds_ == targs_)'], {}), '(preds_ == targs_)\n', (27889, 27907), False, 'import torch\n'), ((31731, 31762), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10.4, 4.8)'}), '(figsize=(10.4, 4.8))\n', (31741, 31762), True, 'import matplotlib.pyplot as plt\n'), ((31772, 31792), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (31783, 31792), True, 'import matplotlib.pyplot as plt\n'), ((31802, 31833), 'matplotlib.pyplot.title', 'plt.title', (['"""Large-scale search"""'], {}), "('Large-scale search')\n", (31811, 31833), True, 'import matplotlib.pyplot as plt\n'), ((31843, 31872), 'matplotlib.pyplot.plot', 'plt.plot', (['thrs_big', 'dices_big'], {}), '(thrs_big, dices_big)\n', (31851, 31872), True, 'import matplotlib.pyplot as plt\n'), ((31882, 31902), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (31893, 31902), True, 'import matplotlib.pyplot as plt\n'), ((31912, 31944), 'matplotlib.pyplot.title', 'plt.title', (['"""Little-scale search"""'], {}), "('Little-scale search')\n", (31921, 31944), True, 'import matplotlib.pyplot as plt\n'), ((31954, 31989), 'matplotlib.pyplot.plot', 'plt.plot', (['thrs_little', 'dices_little'], {}), '(thrs_little, dices_little)\n', (31962, 31989), True, 'import matplotlib.pyplot as plt\n'), ((31999, 32019), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (32010, 32019), True, 'import matplotlib.pyplot as plt\n'), ((32029, 32059), 'matplotlib.pyplot.title', 'plt.title', (['"""pixel thrs search"""'], {}), "('pixel thrs search')\n", (32038, 32059), True, 'import matplotlib.pyplot as plt\n'), ((32257, 32268), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (32266, 32268), True, 'import matplotlib.pyplot as plt\n'), ((36385, 36412), 'numpy.arange', 'np.arange', (['(0.6)', '(0.81)', '(0.015)'], {}), '(0.6, 0.81, 0.015)\n', (36394, 36412), True, 'import numpy as np\n'), ((36445, 36470), 'numpy.arange', 'np.arange', (['(768)', '(2305)', '(256)'], {}), '(768, 2305, 256)\n', (36454, 36470), True, 'import numpy as np\n'), ((36708, 36763), 'numpy.arange', 'np.arange', (['(best_thr1 - 0.015)', '(best_thr1 + 0.015)', '(0.0075)'], {}), '(best_thr1 - 0.015, best_thr1 + 0.015, 0.0075)\n', (36717, 36763), True, 'import numpy as np\n'), ((36791, 36851), 'numpy.arange', 'np.arange', (['(best_pixel_thr1 - 256)', '(best_pixel_thr1 + 257)', '(128)'], {}), '(best_pixel_thr1 - 256, best_pixel_thr1 + 257, 128)\n', (36800, 36851), True, 'import numpy as np\n'), ((37433, 37475), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14.4, 4.8)', 'ncols': '(2)'}), '(figsize=(14.4, 4.8), ncols=2)\n', (37445, 37475), True, 'import matplotlib.pyplot as plt\n'), ((37494, 37558), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'start': '(1.5)', 'rot': '(3)', 'gamma': '(0.8)', 'as_cmap': '(True)'}), '(start=1.5, rot=3, gamma=0.8, as_cmap=True)\n', (37515, 37558), True, 'import seaborn as sns\n'), ((38262, 38273), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (38271, 38273), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2290), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': "(self.save_path + '/' + TIMESTAMP)"}), "(log_dir=self.save_path + '/' + TIMESTAMP)\n", (2248, 2290), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3060, 3101), 'models.network.U_Net', 'U_Net', ([], {'img_ch': '(3)', 'output_ch': 'self.output_ch'}), '(img_ch=3, output_ch=self.output_ch)\n', (3065, 3101), False, 'from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net\n'), ((4961, 4993), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.unet'], {}), '(self.unet)\n', (4982, 4993), False, 'import torch\n'), ((5955, 6002), 'utils.mask_functions.write_txt', 'write_txt', (['self.save_path', '"""Saving Best Model."""'], {}), "(self.save_path, 'Saving Best Model.')\n", (5964, 6002), False, 'from utils.mask_functions import write_txt\n'), ((6423, 6446), 'torch.load', 'torch.load', (['weight_path'], {}), '(weight_path)\n', (6433, 6446), False, 'import torch\n'), ((7055, 7155), 'utils.mask_functions.write_txt', 'write_txt', (['self.save_path', "('%s is Successfully Loaded from %s' % (self.model_type, weight_path))"], {}), "(self.save_path, '%s is Successfully Loaded from %s' % (self.\n model_type, weight_path))\n", (7064, 7155), False, 'from utils.mask_functions import write_txt\n'), ((8740, 8768), 'tqdm.tqdm', 'tqdm.tqdm', (['self.train_loader'], {}), '(self.train_loader)\n', (8749, 8768), False, 'import tqdm\n'), ((14072, 14100), 'tqdm.tqdm', 'tqdm.tqdm', (['self.train_loader'], {}), '(self.train_loader)\n', (14081, 14100), False, 'import tqdm\n'), ((20365, 20393), 'tqdm.tqdm', 'tqdm.tqdm', (['self.train_loader'], {}), '(self.train_loader)\n', (20374, 20393), False, 'import tqdm\n'), ((24777, 24792), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24790, 24792), False, 'import torch\n'), ((27720, 27739), 'torch.sum', 'torch.sum', (['preds', '(1)'], {}), '(preds, 1)\n', (27729, 27739), False, 'import torch\n'), ((27741, 27760), 'torch.sum', 'torch.sum', (['targs', '(1)'], {}), '(targs, 1)\n', (27750, 27760), False, 'import torch\n'), ((28484, 28499), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (28497, 28499), False, 'import torch\n'), ((28578, 28600), 'numpy.arange', 'np.arange', (['(0.1)', '(1)', '(0.1)'], {}), '(0.1, 1, 0.1)\n', (28587, 28600), True, 'import numpy as np\n'), ((29343, 29362), 'numpy.array', 'np.array', (['dices_big'], {}), '(dices_big)\n', (29351, 29362), True, 'import numpy as np\n'), ((29503, 29562), 'numpy.arange', 'np.arange', (['(best_thrs_big - 0.05)', '(best_thrs_big + 0.05)', '(0.01)'], {}), '(best_thrs_big - 0.05, best_thrs_big + 0.05, 0.01)\n', (29512, 29562), True, 'import numpy as np\n'), ((30310, 30332), 'numpy.array', 'np.array', (['dices_little'], {}), '(dices_little)\n', (30318, 30332), True, 'import numpy as np\n'), ((32097, 32130), 'matplotlib.pyplot.plot', 'plt.plot', (['pixel_thrs', 'dices_pixel'], {}), '(pixel_thrs, dices_pixel)\n', (32105, 32130), True, 'import matplotlib.pyplot as plt\n'), ((33124, 33139), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (33137, 33139), False, 'import torch\n'), ((33183, 33211), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_loader'], {}), '(self.valid_loader)\n', (33192, 33211), False, 'import tqdm\n'), ((34356, 34371), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (34369, 34371), False, 'import torch\n'), ((35373, 35392), 'numpy.array', 'np.array', (['dices_big'], {}), '(dices_big)\n', (35381, 35392), True, 'import numpy as np\n'), ((38896, 38911), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (38909, 38911), False, 'import torch\n'), ((38978, 39006), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_loader'], {}), '(self.valid_loader)\n', (38987, 39006), False, 'import tqdm\n'), ((2183, 2206), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2204, 2206), False, 'import datetime\n'), ((2791, 2816), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2814, 2816), False, 'import torch\n'), ((3171, 3224), 'models.network.R2U_Net', 'R2U_Net', ([], {'img_ch': '(3)', 'output_ch': 'self.output_ch', 't': 'self.t'}), '(img_ch=3, output_ch=self.output_ch, t=self.t)\n', (3178, 3224), False, 'from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net\n'), ((6032, 6110), 'os.path.join', 'os.path.join', (['self.save_path', "('%s_%d_%d.pth' % (self.model_type, stage, index))"], {}), "(self.save_path, '%s_%d_%d.pth' % (self.model_type, stage, index))\n", (6044, 6110), False, 'import os, shutil\n'), ((6112, 6199), 'os.path.join', 'os.path.join', (['self.save_path', "('%s_%d_%d_best.pth' % (self.model_type, stage, index))"], {}), "(self.save_path, '%s_%d_%d_best.pth' % (self.model_type, stage,\n index))\n", (6124, 6199), False, 'import os, shutil\n'), ((28262, 28284), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (28272, 28284), False, 'import torch\n'), ((28692, 28720), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_loader'], {}), '(self.valid_loader)\n', (28701, 28720), False, 'import tqdm\n'), ((29653, 29681), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_loader'], {}), '(self.valid_loader)\n', (29662, 29681), False, 'import tqdm\n'), ((30556, 30579), 'numpy.arange', 'np.arange', (['(0)', '(2304)', '(256)'], {}), '(0, 2304, 256)\n', (30565, 30579), True, 'import numpy as np\n'), ((31392, 31413), 'numpy.array', 'np.array', (['dices_pixel'], {}), '(dices_pixel)\n', (31400, 31413), True, 'import numpy as np\n'), ((32998, 33020), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (33008, 33020), False, 'import torch\n'), ((36151, 36173), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (36161, 36173), False, 'import torch\n'), ((37618, 37641), 'numpy.around', 'np.around', (['thrs_big1', '(3)'], {}), '(thrs_big1, 3)\n', (37627, 37641), True, 'import numpy as np\n'), ((37726, 37744), 'numpy.max', 'np.max', (['dices_big1'], {}), '(dices_big1)\n', (37732, 37744), True, 'import numpy as np\n'), ((37751, 37769), 'numpy.min', 'np.min', (['dices_big1'], {}), '(dices_big1)\n', (37757, 37769), True, 'import numpy as np\n'), ((37905, 37928), 'numpy.around', 'np.around', (['thrs_big2', '(3)'], {}), '(thrs_big2, 3)\n', (37914, 37928), True, 'import numpy as np\n'), ((38013, 38031), 'numpy.max', 'np.max', (['dices_big2'], {}), '(dices_big2)\n', (38019, 38031), True, 'import numpy as np\n'), ((38038, 38056), 'numpy.min', 'np.min', (['dices_big2'], {}), '(dices_big2)\n', (38044, 38056), True, 'import numpy as np\n'), ((38684, 38706), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (38694, 38706), False, 'import torch\n'), ((3295, 3339), 'models.network.AttU_Net', 'AttU_Net', ([], {'img_ch': '(3)', 'output_ch': 'self.output_ch'}), '(img_ch=3, output_ch=self.output_ch)\n', (3303, 3339), False, 'from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net\n'), ((30692, 30720), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_loader'], {}), '(self.valid_loader)\n', (30701, 30720), False, 'import tqdm\n'), ((34638, 34666), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_loader'], {}), '(self.valid_loader)\n', (34647, 34666), False, 'import tqdm\n'), ((35478, 35497), 'numpy.shape', 'np.shape', (['dices_big'], {}), '(dices_big)\n', (35486, 35497), True, 'import numpy as np\n'), ((35518, 35535), 'numpy.max', 'np.max', (['dices_big'], {}), '(dices_big)\n', (35524, 35535), True, 'import numpy as np\n'), ((35582, 35594), 'numpy.shape', 'np.shape', (['re'], {}), '(re)\n', (35590, 35594), True, 'import numpy as np\n'), ((3412, 3468), 'models.network.R2AttU_Net', 'R2AttU_Net', ([], {'img_ch': '(3)', 'output_ch': 'self.output_ch', 't': 'self.t'}), '(img_ch=3, output_ch=self.output_ch, t=self.t)\n', (3422, 3468), False, 'from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net\n'), ((33827, 33842), 'torch.sum', 'torch.sum', (['pred'], {}), '(pred)\n', (33836, 33842), False, 'import torch\n'), ((3645, 3710), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('resnet34', encoder_weights='imagenet', activation=None)\n", (3653, 3710), True, 'import segmentation_models_pytorch as smp\n'), ((25674, 25704), 'torch.sigmoid', 'torch.sigmoid', (['net_output_flat'], {}), '(net_output_flat)\n', (25687, 25704), False, 'import torch\n'), ((3786, 3851), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""resnet50"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('resnet50', encoder_weights='imagenet', activation=None)\n", (3794, 3851), True, 'import segmentation_models_pytorch as smp\n'), ((3937, 4012), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""se_resnext50_32x4d"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('se_resnext50_32x4d', encoder_weights='imagenet', activation=None)\n", (3945, 4012), True, 'import segmentation_models_pytorch as smp\n'), ((4091, 4159), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""densenet121"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('densenet121', encoder_weights='imagenet', activation=None)\n", (4099, 4159), True, 'import segmentation_models_pytorch as smp\n'), ((4237, 4330), 'models.Transpose_unet.unet.model.Unet', 'Unet_t', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None', 'use_ConvTranspose2d': '(True)'}), "('resnet34', encoder_weights='imagenet', activation=None,\n use_ConvTranspose2d=True)\n", (4243, 4330), True, 'from models.Transpose_unet.unet.model import Unet as Unet_t\n'), ((4406, 4473), 'models.octave_unet.unet.model.OctaveUnet', 'OctaveUnet', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('resnet34', encoder_weights='imagenet', activation=None)\n", (4416, 4473), False, 'from models.octave_unet.unet.model import OctaveUnet\n'), ((4553, 4590), 'models.linknet.LinkNet34', 'LinkNet34', ([], {'num_classes': 'self.output_ch'}), '(num_classes=self.output_ch)\n', (4562, 4590), False, 'from models.linknet import LinkNet34\n'), ((4666, 4738), 'models.deeplabv3.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'model_backbone': '"""res50_atrous"""', 'num_classes': 'self.output_ch'}), "(model_backbone='res50_atrous', num_classes=self.output_ch)\n", (4679, 4738), False, 'from models.deeplabv3.deeplabv3plus import DeepLabV3Plus\n'), ((4816, 4894), 'segmentation_models_pytorch.PSPNet', 'smp.PSPNet', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'classes': '(1)', 'activation': 'None'}), "('resnet34', encoder_weights='imagenet', classes=1, activation=None)\n", (4826, 4894), True, 'import segmentation_models_pytorch as smp\n')] |
from socket_pair.socket_pair import SockPairs
import time
import cv2
import numpy as np
myself = 'RPI'
others = ['Wrapper']
sockObj = SockPairs(myself, others)
max_iter = 50
N = 40
times = []
sockObj.sync_all()
for i in range(max_iter):
start = time.time()
sockObj.sync_all()
img_list = sockObj.listen(frm='Wrapper')
sockObj.sync_all()
time_passed = time.time() - start
print(f'{i}/{max_iter} {time_passed}')
times.append(time_passed)
print(f'Average time: {np.mean(times)}') | [
"numpy.mean",
"socket_pair.socket_pair.SockPairs",
"time.time"
] | [((136, 161), 'socket_pair.socket_pair.SockPairs', 'SockPairs', (['myself', 'others'], {}), '(myself, others)\n', (145, 161), False, 'from socket_pair.socket_pair import SockPairs\n'), ((254, 265), 'time.time', 'time.time', ([], {}), '()\n', (263, 265), False, 'import time\n'), ((379, 390), 'time.time', 'time.time', ([], {}), '()\n', (388, 390), False, 'import time\n'), ((496, 510), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (503, 510), True, 'import numpy as np\n')] |
import numpy as np
import sys
import math
from itertools import combinations
class Sphere:
def __init__(self, name, mass, radius, xpos, ypos, zpos, xvel, yvel, zvel):
self.name = name
self.radius = radius
self.mass = mass
self.pos = np.array((xpos, ypos, zpos))
self.vel = np.array((xvel, yvel, zvel))
def overlaps(self, other):
dist = self.pos - other.pos
dist_norm = np.linalg.norm(dist)
return dist_norm <= (self.radius + other.radius)
def move(self, dt):
self.pos += self.vel * dt
class World:
def __init__(self, spheres, radius, TimeLimit, dt):
self.spheres = spheres
self.dt = dt
self.radius = radius
self.energy = 0
self.momentum = np.array((0.0, 0.0, 0.0))
self.time = 0.0
self.TimeLimit = TimeLimit
self.initial()
def getenergy(self):
self.energy = 0
for sphere in self.spheres:
self.energy = self.energy+0.5 * sphere.mass * sum((sphere.vel ** 2))
def getmomentum(self):
self.momentum = np.array((0.0, 0.0, 0.0))
for sphere in self.spheres:
self.momentum += sphere.mass * sphere.vel
def new_vel(self, sphere1, sphere2):
m1=sphere1.mass
m2=sphere2.mass
mtotal = m1 + m2
r1=sphere1.pos
r2=sphere2.pos
v1=sphere1.vel
v2=sphere2.vel
sphere1.vel = v1 - 2*(m2 / mtotal)*(np.dot(v1-v2, r1-r2)/np.dot((r1-r2),(r1-r2)))* (r1 - r2)
sphere2.vel = v2 - 2*(m1 / mtotal)*(np.dot(v2-v1, r2-r1)/np.dot((r2-r1),(r2-r1)))* (r2 - r1)
def collisions(self):
for i,j in combinations(self.spheres, 2):
if i.overlaps(j):
self.new_vel(i, j)
self.collide(i, j)
def boundary_collisions(self, p):
if math.sqrt(np.dot(p.pos, p.pos.T)) + p.radius < self.radius:
pass
else:
p.vel = p.vel - 2 * (np.dot(p.vel, p.pos) * (p.pos / np.sum(p.pos ** 2)))
self.reflect(p)
def run(self):
while self.energy > 0.0 :
self.time = self.time+self.dt
if self.time>=self.TimeLimit:
return 0
for i, p in enumerate(self.spheres):
p.move(self.dt)
self.boundary_collisions(p)
self.collisions()
self.getenergy()
def initial(self):
if len(self.spheres) > 0:
print("Here are the initial conditions.")
print("universe radius {}".format(self.radius))
print("end simulation {}".format(self.TimeLimit))
for sphere in self.spheres:
s_str = "{} m={} R={} p=({:g},{:g},{:g}) v=({:g},{:g},{:g})".format(sphere.name, sphere.mass, sphere.radius, sphere.pos[0], sphere.pos[1], sphere.pos[2], sphere.vel[0], sphere.vel[1], sphere.vel[2])
print(s_str)
self.getenergy()
self.getmomentum()
print("energy: {:g}".format(self.energy))
print("momemtum: ({:g},{:g},{:g})".format(self.momentum[0], self.momentum[1], self.momentum[2]))
print('\n')
print("Here are the events.")
print('\n')
def reflect(self, sphere):
if len(self.spheres) > 0:
print("time of event:{:g}".format(self.time))
print("reflecting %s" %sphere.name)
for sphere in self.spheres:
sphere_info="{} m={} R={} p=({:g},{:g},{:g}) v=({:g},{:g},{:g})".format(sphere.name, sphere.mass, sphere.radius, sphere.pos[0], sphere.pos[1], sphere.pos[2], sphere.vel[0], sphere.vel[1], sphere.vel[2])
print(sphere_info)
self.getenergy()
self.getmomentum()
print("energy: {:g}".format(self.energy))
print("momemtum: ({:g},{:g},{:g})".format(self.momentum[0], self.momentum[1], self.momentum[2]))
print('\n')
def collide(self, sphere1, sphere2):
if len(self.spheres) > 0:
print("time of event: {:g}".format(self.time))
print("colliding %s %s" %(sphere1.name, sphere2.name))
for sphere in self.spheres:
s_str = "{} m={} R={} p=({:g},{:g},{:g}) v=({:g},{:g},{:g})".format(sphere.name, sphere.mass, sphere.radius, sphere.pos[0], sphere.pos[1], sphere.pos[2], sphere.vel[0], sphere.vel[1], sphere.vel[2])
print(s_str)
self.getenergy()
self.getmomentum()
print("energy: {:g}".format(self.energy))
print("momemtum: ({:g},{:g},{:g})".format(self.momentum[0], self.momentum[1], self.momentum[2]))
print('\n')
def Read():
if len(sys.argv)==3:
radius = float(sys.argv[1])
TimeLimit = int(sys.argv[2])
else:
return 0
sphereList = []
for line in sys.stdin:
lines = line.split()
if len(lines) == 9:
name_=lines[8]
mass_=float(lines[0])
radius_=float(lines[1])
xpos_=float(lines[2])
ypos_=float(lines[3])
zpos_=float(lines[4])
xvel_=float(lines[5])
yvel_=float(lines[6])
zvel_=float(lines[7])
sphere = Sphere(name_, mass_, radius_, xpos_, ypos_, zpos_, xvel_, yvel_, zvel_)
sphereList.append(sphere)
else:
return 0
dt = 0.0001
sim = World(sphereList, radius, TimeLimit, dt)
sim.run()
def main():
Read()
if __name__ == '__main__':
main()
| [
"itertools.combinations",
"numpy.array",
"numpy.dot",
"numpy.sum",
"numpy.linalg.norm"
] | [((271, 299), 'numpy.array', 'np.array', (['(xpos, ypos, zpos)'], {}), '((xpos, ypos, zpos))\n', (279, 299), True, 'import numpy as np\n'), ((319, 347), 'numpy.array', 'np.array', (['(xvel, yvel, zvel)'], {}), '((xvel, yvel, zvel))\n', (327, 347), True, 'import numpy as np\n'), ((437, 457), 'numpy.linalg.norm', 'np.linalg.norm', (['dist'], {}), '(dist)\n', (451, 457), True, 'import numpy as np\n'), ((777, 802), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (785, 802), True, 'import numpy as np\n'), ((1104, 1129), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (1112, 1129), True, 'import numpy as np\n'), ((1677, 1706), 'itertools.combinations', 'combinations', (['self.spheres', '(2)'], {}), '(self.spheres, 2)\n', (1689, 1706), False, 'from itertools import combinations\n'), ((1870, 1892), 'numpy.dot', 'np.dot', (['p.pos', 'p.pos.T'], {}), '(p.pos, p.pos.T)\n', (1876, 1892), True, 'import numpy as np\n'), ((1473, 1497), 'numpy.dot', 'np.dot', (['(v1 - v2)', '(r1 - r2)'], {}), '(v1 - v2, r1 - r2)\n', (1479, 1497), True, 'import numpy as np\n'), ((1494, 1518), 'numpy.dot', 'np.dot', (['(r1 - r2)', '(r1 - r2)'], {}), '(r1 - r2, r1 - r2)\n', (1500, 1518), True, 'import numpy as np\n'), ((1574, 1598), 'numpy.dot', 'np.dot', (['(v2 - v1)', '(r2 - r1)'], {}), '(v2 - v1, r2 - r1)\n', (1580, 1598), True, 'import numpy as np\n'), ((1595, 1619), 'numpy.dot', 'np.dot', (['(r2 - r1)', '(r2 - r1)'], {}), '(r2 - r1, r2 - r1)\n', (1601, 1619), True, 'import numpy as np\n'), ((1985, 2005), 'numpy.dot', 'np.dot', (['p.vel', 'p.pos'], {}), '(p.vel, p.pos)\n', (1991, 2005), True, 'import numpy as np\n'), ((2017, 2035), 'numpy.sum', 'np.sum', (['(p.pos ** 2)'], {}), '(p.pos ** 2)\n', (2023, 2035), True, 'import numpy as np\n')] |
import sys
cmd_folder = "../../../vis"
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from get_boxlib import ReadBoxLib, get_files
import numpy as np
import pylab as plt
import matplotlib as mpl
from matplotlib.image import NonUniformImage
from multiprocessing import Pool
import h5py
import matplotlib.gridspec as gridspec
from scipy.interpolate import RectBivariateSpline
from mpl_toolkits.axes_grid1 import make_axes_locatable
#==============================================================================
#
#==============================================================================
def check():
plt_file = "Orszag-Tang"
component = "mhd"
window = [[0, 0], [1,1]]
# get a list of all the files in this directory
files = get_files('.', include=[plt_file], exclude=["input", "temp", ".png"], get_all=True)
# get tracer particle data
# get fluid variables
rh5 = ReadBoxLib(files[-1], max_level=-1, limits=window)
xn, z = rh5.get("p-%s"%component, grid='node')
xn, yn = xn
x = xn[0:-1]+(xn[1] + xn[0])/2.0
y = yn[0:-1]+(yn[1] + yn[0])/2.0
y, x = np.meshgrid(y, x)
# =============================================================================
#
# =============================================================================
# read in reference values
# HIGH-ORDER UPWIND SCHEMES FOR MULTIDIMENSIONAL MAGNETOHYDRODYNAMICS
# THE ASTROPHYSICAL JOURNAL, 530 : 508-524, 2000 February 10
Londrillo_1D_hi = h5py.File("y=0-4277_t=0-5.hdf5", "r")
# =============================================================================
#
# =============================================================================
plt.rc("font", family="serif")
plt.rc("font", size=8)
plt.rc("mathtext", fontset="cm")
# matplotlib.rc('text', usetex = True)
params= {'text.latex.preamble' : [r'\usepackage{amsmath}']}
plt.rcParams.update(params)
axes = []
fig = plt.figure(figsize=(5,2.5))
gs = gridspec.GridSpec(nrows=1, ncols=3, width_ratios=[0.6, 1, 0.05], wspace=0.01, bottom=0.14, top=0.97, left=0.1, right=0.88)
#====
ax = fig.add_subplot(gs[0,1]); axes.append(ax)
ax_cb = fig.add_subplot(gs[0,2])
# plot the contour
im = NonUniformImage(ax, interpolation='bilinear', extent=[xn[0], yn[0], xn[-1], yn[-1]],
cmap="viridis")
im.set_data(x[:,0], y[0,:], z.T)
ax.images.append(im)
# divider = make_axes_locatable(ax)
# ax_cb = divider.new_horizontal(size="5%", pad=0.05)
plt.colorbar(im, cax=ax_cb, label=r"$p$")
ax.plot([0,1],2*[0.4277], "r--", lw=1)
ax.set_aspect(1)
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.set_xticks([])
ax.set_yticks([])
#====
xx = x[:,0]
yy = y[0,:]
#====
ax = fig.add_subplot(gs[0,0]); axes.append(ax)
# plot reference points
cx = []
cy = []
for key, value in Londrillo_1D_hi.items():
if "Line" not in key:
continue
if "CX" not in value:
continue
cx.append(value["CX"][()])
cy.append(value["CY"][()])
cx = np.array(cx)
cy = np.array(cy)
I = np.argsort(cx)
cx = cx[I]
cy = cy[I]
ax.plot(cx, cy, "-ok", lw=0.25, ms=2.0, mfc='none', mew=0.3)
p = RectBivariateSpline(xx, yy, z)
sy = p(cx, cx.size*[0.4277], grid=False)
ax.plot(cx, sy, 'k-', lw=0.5)
rms = np.sqrt(np.mean(np.square(cy - sy)))
# print("RMS = ",rms)
ax.set_xlim(0,1)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$p$")
#====
fig.savefig(plt_file+".png", dpi=300)
plt.close(fig)
if rms > 0.05:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(check()) | [
"sys.path.insert",
"pylab.rc",
"scipy.interpolate.RectBivariateSpline",
"get_boxlib.get_files",
"matplotlib.image.NonUniformImage",
"h5py.File",
"pylab.figure",
"numpy.array",
"matplotlib.gridspec.GridSpec",
"numpy.argsort",
"pylab.colorbar",
"pylab.close",
"numpy.square",
"pylab.rcParams.... | [((75, 105), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_folder'], {}), '(0, cmd_folder)\n', (90, 105), False, 'import sys\n'), ((781, 868), 'get_boxlib.get_files', 'get_files', (['"""."""'], {'include': '[plt_file]', 'exclude': "['input', 'temp', '.png']", 'get_all': '(True)'}), "('.', include=[plt_file], exclude=['input', 'temp', '.png'],\n get_all=True)\n", (790, 868), False, 'from get_boxlib import ReadBoxLib, get_files\n'), ((934, 984), 'get_boxlib.ReadBoxLib', 'ReadBoxLib', (['files[-1]'], {'max_level': '(-1)', 'limits': 'window'}), '(files[-1], max_level=-1, limits=window)\n', (944, 984), False, 'from get_boxlib import ReadBoxLib, get_files\n'), ((1144, 1161), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (1155, 1161), True, 'import numpy as np\n'), ((1532, 1569), 'h5py.File', 'h5py.File', (['"""y=0-4277_t=0-5.hdf5"""', '"""r"""'], {}), "('y=0-4277_t=0-5.hdf5', 'r')\n", (1541, 1569), False, 'import h5py\n'), ((1752, 1782), 'pylab.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (1758, 1782), True, 'import pylab as plt\n'), ((1787, 1809), 'pylab.rc', 'plt.rc', (['"""font"""'], {'size': '(8)'}), "('font', size=8)\n", (1793, 1809), True, 'import pylab as plt\n'), ((1814, 1846), 'pylab.rc', 'plt.rc', (['"""mathtext"""'], {'fontset': '"""cm"""'}), "('mathtext', fontset='cm')\n", (1820, 1846), True, 'import pylab as plt\n'), ((1958, 1985), 'pylab.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (1977, 1985), True, 'import pylab as plt\n'), ((2012, 2040), 'pylab.figure', 'plt.figure', ([], {'figsize': '(5, 2.5)'}), '(figsize=(5, 2.5))\n', (2022, 2040), True, 'import pylab as plt\n'), ((2050, 2177), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(3)', 'width_ratios': '[0.6, 1, 0.05]', 'wspace': '(0.01)', 'bottom': '(0.14)', 'top': '(0.97)', 'left': '(0.1)', 'right': '(0.88)'}), '(nrows=1, ncols=3, width_ratios=[0.6, 1, 0.05], wspace=\n 0.01, bottom=0.14, top=0.97, left=0.1, right=0.88)\n', (2067, 2177), True, 'import matplotlib.gridspec as gridspec\n'), ((2306, 2410), 'matplotlib.image.NonUniformImage', 'NonUniformImage', (['ax'], {'interpolation': '"""bilinear"""', 'extent': '[xn[0], yn[0], xn[-1], yn[-1]]', 'cmap': '"""viridis"""'}), "(ax, interpolation='bilinear', extent=[xn[0], yn[0], xn[-1],\n yn[-1]], cmap='viridis')\n", (2321, 2410), False, 'from matplotlib.image import NonUniformImage\n'), ((2596, 2636), 'pylab.colorbar', 'plt.colorbar', (['im'], {'cax': 'ax_cb', 'label': '"""$p$"""'}), "(im, cax=ax_cb, label='$p$')\n", (2608, 2636), True, 'import pylab as plt\n'), ((3180, 3192), 'numpy.array', 'np.array', (['cx'], {}), '(cx)\n', (3188, 3192), True, 'import numpy as np\n'), ((3202, 3214), 'numpy.array', 'np.array', (['cy'], {}), '(cy)\n', (3210, 3214), True, 'import numpy as np\n'), ((3224, 3238), 'numpy.argsort', 'np.argsort', (['cx'], {}), '(cx)\n', (3234, 3238), True, 'import numpy as np\n'), ((3344, 3374), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['xx', 'yy', 'z'], {}), '(xx, yy, z)\n', (3363, 3374), False, 'from scipy.interpolate import RectBivariateSpline\n'), ((3662, 3676), 'pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (3671, 3676), True, 'import pylab as plt\n'), ((3482, 3500), 'numpy.square', 'np.square', (['(cy - sy)'], {}), '(cy - sy)\n', (3491, 3500), True, 'import numpy as np\n')] |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import time
import uuid
from functools import partial
import gevent
from mars import promise
from mars.actors import FunctionActor, create_actor_pool
from mars.cluster_info import ClusterInfoActor
from mars.compat import six
from mars.config import options
from mars.errors import StoreFull
from mars.scheduler.kvstore import KVStoreActor
from mars.utils import get_next_port, calc_data_size
from mars.worker import *
from mars.worker.distributor import WorkerDistributor
from mars.worker.chunkstore import PlasmaChunkStore
from mars.worker.tests.base import WorkerCase
from mars.worker.utils import WorkerActor
from pyarrow import plasma
class HolderActor(FunctionActor):
def __init__(self):
super(HolderActor, self).__init__()
self._state = 0
def trigger(self):
self._state = 1
def obtain(self):
return self._state
class WorkerRegistrationTestActor(WorkerActor):
def __init__(self):
super(WorkerRegistrationTestActor, self).__init__()
self._finished = False
def get_finished(self):
return self._finished
def register(self, session_id, chunk_keys):
import numpy as np
cache_ref = self.promise_ref(ChunkHolderActor.default_name())
left_keys = set(chunk_keys)
def _put_chunk(chunk_key, data, spill_times=1):
try:
refs = self._chunk_store.put(session_id, chunk_key, data)
cache_ref.register_chunk(session_id, chunk_key)
del refs
left_keys.remove(chunk_key)
except StoreFull:
return cache_ref.spill_size(2 * spill_times * calc_data_size(data), _promise=True) \
.then(partial(_put_chunk, chunk_key, data, 2 * spill_times))
promises = []
for idx, chunk_key in enumerate(chunk_keys):
data = np.ones((640 * 1024,), dtype=np.int16) * idx
promises.append(promise.Promise(done=True) \
.then(partial(_put_chunk, chunk_key, data)))
promise.all_(promises).then(lambda *_: setattr(self, '_finished', True))
def run_transfer_worker(pool_address, session_id, plasma_socket, chunk_keys,
spill_dir, msg_queue):
from mars.config import options
from mars.utils import PlasmaProcessHelper
options.worker.plasma_socket = plasma_socket
options.worker.spill_directory = spill_dir
plasma_helper = PlasmaProcessHelper(size=1024 * 1024 * 10, socket=options.worker.plasma_socket)
try:
plasma_helper.run()
with create_actor_pool(n_process=2, backend='gevent', distributor=WorkerDistributor(2),
address=pool_address) as pool:
try:
pool.create_actor(ClusterInfoActor, schedulers=[pool_address],
uid=ClusterInfoActor.default_name())
pool.create_actor(KVStoreActor, uid=KVStoreActor.default_name())
pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
pool.create_actor(QuotaActor, 1024 * 1024 * 20, uid=MemQuotaActor.default_name())
holder_ref = pool.create_actor(HolderActor, uid='HolderActor')
chunk_holder_ref = pool.create_actor(ChunkHolderActor, plasma_helper._size,
uid=ChunkHolderActor.default_name())
pool.create_actor(SpillActor)
pool.create_actor(SenderActor, uid='%s' % str(uuid.uuid4()))
pool.create_actor(SenderActor, uid='%s' % str(uuid.uuid4()))
pool.create_actor(ReceiverActor, uid='%s' % str(uuid.uuid4()))
pool.create_actor(ReceiverActor, uid='%s' % str(uuid.uuid4()))
register_actor = pool.create_actor(WorkerRegistrationTestActor)
register_actor.register(session_id, chunk_keys)
check_time = time.time()
while not register_actor.get_finished():
gevent.sleep(0.5)
if time.time() - check_time > 60:
raise SystemError('Wait result timeout')
register_actor.destroy()
msg_queue.put(1)
check_time = time.time()
while not holder_ref.obtain():
gevent.sleep(1)
if time.time() - check_time > 60:
raise SystemError('Wait result timeout')
finally:
pool.destroy_actor(chunk_holder_ref)
finally:
plasma_helper.stop()
class TransferTestActor(WorkerActor):
def __init__(self, local_pool_addr, remote_pool_addr, remote_plasma_socket, remote_spill_dir):
super(TransferTestActor, self).__init__()
self._local_pool_addr = local_pool_addr
self._remote_pool_addr = remote_pool_addr
self._remote_plasma_socket = remote_plasma_socket
self._remote_spill_dir = remote_spill_dir
self._remote_plasma_client = None
self._remote_store = None
self._finish_info = (None, None)
def post_create(self):
super(TransferTestActor, self).post_create()
self._remote_plasma_client = plasma.connect(self._remote_plasma_socket, '', 0)
self._remote_store = PlasmaChunkStore(
self._remote_plasma_client, self.ctx.actor_ref(KVStoreActor.default_name()))
def pre_destroy(self):
self._remote_plasma_client.disconnect()
def get_results(self):
return self._finish_info
def do_transfer_test(self, session_id, chunk_key):
from mars.worker.spill import build_spill_file_name
from mars.serialize import dataserializer
from numpy.testing import assert_array_equal
remote_dispatch_ref = self.promise_ref(DispatchActor.default_name(), address=self._remote_pool_addr)
def _call_send_data(sender_uid):
sender_ref = self.promise_ref(sender_uid, address=self._remote_pool_addr)
return sender_ref.send_data(session_id, chunk_key, self._local_pool_addr, _promise=True)
def _test_data_exist(*_):
try:
local_data = self._chunk_store.get(session_id, chunk_key)
except KeyError:
with open(build_spill_file_name(chunk_key), 'rb') as spill_file:
local_data = dataserializer.load(spill_file)
try:
remote_data = self._remote_store.get(session_id, chunk_key)
except KeyError:
with open(build_spill_file_name(chunk_key, self._remote_spill_dir), 'rb') as spill_file:
remote_data = dataserializer.load(spill_file)
assert_array_equal(local_data, remote_data)
del local_data, remote_data
remote_dispatch_ref.get_free_slot('sender', _promise=True) \
.then(_call_send_data) \
.then(_test_data_exist) \
.then(
lambda *_: setattr(self, '_finish_info', (chunk_key, None)),
lambda *exc: setattr(self, '_finish_info', (chunk_key, exc)),
)
class Test(WorkerCase):
def setUp(self):
super(Test, self).setUp()
self._old_block_size = options.worker.transfer_block_size
options.worker.transfer_block_size = 4 * 1024
def tearDown(self):
super(Test, self).tearDown()
options.worker.transfer_block_size = self._old_block_size
def testSimpleTransfer(self):
import tempfile
session_id = str(uuid.uuid4())
local_pool_addr = 'localhost:%d' % get_next_port()
remote_pool_addr = 'localhost:%d' % get_next_port()
remote_chunk_keys = [str(uuid.uuid4()) for _ in range(9)]
msg_queue = multiprocessing.Queue()
remote_plasma_socket = '/tmp/plasma_%d_%d.sock' % (os.getpid(), id(run_transfer_worker))
remote_spill_dir = os.path.join(tempfile.gettempdir(),
'mars_spill_%d_%d' % (os.getpid(), id(run_transfer_worker)))
proc = multiprocessing.Process(
target=run_transfer_worker,
args=(remote_pool_addr, session_id, remote_plasma_socket,
remote_chunk_keys, remote_spill_dir, msg_queue)
)
proc.start()
try:
msg_queue.get(30)
except:
if proc.is_alive():
proc.terminate()
raise
with create_actor_pool(n_process=1, distributor=WorkerDistributor(3),
backend='gevent', address=local_pool_addr) as pool:
pool.create_actor(ClusterInfoActor, schedulers=[local_pool_addr],
uid=ClusterInfoActor.default_name())
pool.create_actor(KVStoreActor, uid=KVStoreActor.default_name())
pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
pool.create_actor(QuotaActor, 1024 * 1024 * 20, uid=MemQuotaActor.default_name())
cache_ref = pool.create_actor(ChunkHolderActor, self.plasma_storage_size,
uid=ChunkHolderActor.default_name())
pool.create_actor(SpillActor)
sender_refs = [
pool.create_actor(SenderActor, uid='w:1:%s' % str(uuid.uuid4())),
pool.create_actor(SenderActor, uid='w:2:%s' % str(uuid.uuid4())),
]
receiver_refs = [
pool.create_actor(ReceiverActor, uid='w:1:%s' % str(uuid.uuid4())),
pool.create_actor(ReceiverActor, uid='w:1:%s' % str(uuid.uuid4())),
pool.create_actor(ReceiverActor, uid='w:2:%s' % str(uuid.uuid4())),
pool.create_actor(ReceiverActor, uid='w:2:%s' % str(uuid.uuid4())),
]
test_ref = pool.create_actor(TransferTestActor, local_pool_addr, remote_pool_addr,
remote_plasma_socket, remote_spill_dir)
try:
for data_id in (-1, 1):
chunk_key = remote_chunk_keys[data_id]
test_ref.do_transfer_test(session_id, chunk_key)
check_time = time.time()
while test_ref.get_results()[0] != chunk_key:
gevent.sleep(0.5)
if not proc.is_alive():
raise SystemError('Transfer worker dead. exit code %s' % proc.exitcode)
if time.time() - check_time > 60:
raise SystemError('Wait result timeout')
exc = test_ref.get_results()[1]
if exc:
six.reraise(*exc)
remote_holder_ref = pool.actor_ref('HolderActor', address=remote_pool_addr)
remote_holder_ref.trigger()
finally:
for ref in sender_refs:
pool.destroy_actor(ref)
for ref in receiver_refs:
pool.destroy_actor(ref)
pool.destroy_actor(cache_ref)
pool.destroy_actor(test_ref)
os.unlink(remote_plasma_socket)
if proc.is_alive():
proc.terminate()
| [
"mars.worker.distributor.WorkerDistributor",
"mars.utils.calc_data_size",
"multiprocessing.Process",
"mars.compat.six.reraise",
"mars.cluster_info.ClusterInfoActor.default_name",
"mars.promise.Promise",
"os.unlink",
"os.getpid",
"numpy.testing.assert_array_equal",
"mars.utils.get_next_port",
"nu... | [((3063, 3142), 'mars.utils.PlasmaProcessHelper', 'PlasmaProcessHelper', ([], {'size': '(1024 * 1024 * 10)', 'socket': 'options.worker.plasma_socket'}), '(size=1024 * 1024 * 10, socket=options.worker.plasma_socket)\n', (3082, 3142), False, 'from mars.utils import PlasmaProcessHelper\n'), ((5857, 5906), 'pyarrow.plasma.connect', 'plasma.connect', (['self._remote_plasma_socket', '""""""', '(0)'], {}), "(self._remote_plasma_socket, '', 0)\n", (5871, 5906), False, 'from pyarrow import plasma\n'), ((8384, 8407), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (8405, 8407), False, 'import multiprocessing\n'), ((8686, 8852), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'run_transfer_worker', 'args': '(remote_pool_addr, session_id, remote_plasma_socket, remote_chunk_keys,\n remote_spill_dir, msg_queue)'}), '(target=run_transfer_worker, args=(remote_pool_addr,\n session_id, remote_plasma_socket, remote_chunk_keys, remote_spill_dir,\n msg_queue))\n', (8709, 8852), False, 'import multiprocessing\n'), ((7345, 7388), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['local_data', 'remote_data'], {}), '(local_data, remote_data)\n', (7363, 7388), False, 'from numpy.testing import assert_array_equal\n'), ((8164, 8176), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8174, 8176), False, 'import uuid\n'), ((8222, 8237), 'mars.utils.get_next_port', 'get_next_port', ([], {}), '()\n', (8235, 8237), False, 'from mars.utils import get_next_port, calc_data_size\n'), ((8282, 8297), 'mars.utils.get_next_port', 'get_next_port', ([], {}), '()\n', (8295, 8297), False, 'from mars.utils import get_next_port, calc_data_size\n'), ((8546, 8567), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (8565, 8567), False, 'import tempfile\n'), ((2492, 2530), 'numpy.ones', 'np.ones', (['(640 * 1024,)'], {'dtype': 'np.int16'}), '((640 * 1024,), dtype=np.int16)\n', (2499, 2530), True, 'import numpy as np\n'), ((2663, 2685), 'mars.promise.all_', 'promise.all_', (['promises'], {}), '(promises)\n', (2675, 2685), False, 'from mars import promise\n'), ((4564, 4575), 'time.time', 'time.time', ([], {}), '()\n', (4573, 4575), False, 'import time\n'), ((4894, 4905), 'time.time', 'time.time', ([], {}), '()\n', (4903, 4905), False, 'import time\n'), ((6013, 6040), 'mars.scheduler.kvstore.KVStoreActor.default_name', 'KVStoreActor.default_name', ([], {}), '()\n', (6038, 6040), False, 'from mars.scheduler.kvstore import KVStoreActor\n'), ((8331, 8343), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8341, 8343), False, 'import uuid\n'), ((8468, 8479), 'os.getpid', 'os.getpid', ([], {}), '()\n', (8477, 8479), False, 'import os\n'), ((11762, 11793), 'os.unlink', 'os.unlink', (['remote_plasma_socket'], {}), '(remote_plasma_socket)\n', (11771, 11793), False, 'import os\n'), ((2616, 2652), 'functools.partial', 'partial', (['_put_chunk', 'chunk_key', 'data'], {}), '(_put_chunk, chunk_key, data)\n', (2623, 2652), False, 'from functools import partial\n'), ((3255, 3275), 'mars.worker.distributor.WorkerDistributor', 'WorkerDistributor', (['(2)'], {}), '(2)\n', (3272, 3275), False, 'from mars.worker.distributor import WorkerDistributor\n'), ((4653, 4670), 'gevent.sleep', 'gevent.sleep', (['(0.5)'], {}), '(0.5)\n', (4665, 4670), False, 'import gevent\n'), ((4973, 4988), 'gevent.sleep', 'gevent.sleep', (['(1)'], {}), '(1)\n', (4985, 4988), False, 'import gevent\n'), ((8631, 8642), 'os.getpid', 'os.getpid', ([], {}), '()\n', (8640, 8642), False, 'import os\n'), ((9117, 9137), 'mars.worker.distributor.WorkerDistributor', 'WorkerDistributor', (['(3)'], {}), '(3)\n', (9134, 9137), False, 'from mars.worker.distributor import WorkerDistributor\n'), ((9334, 9365), 'mars.cluster_info.ClusterInfoActor.default_name', 'ClusterInfoActor.default_name', ([], {}), '()\n', (9363, 9365), False, 'from mars.cluster_info import ClusterInfoActor\n'), ((9415, 9442), 'mars.scheduler.kvstore.KVStoreActor.default_name', 'KVStoreActor.default_name', ([], {}), '()\n', (9440, 9442), False, 'from mars.scheduler.kvstore import KVStoreActor\n'), ((10809, 10820), 'time.time', 'time.time', ([], {}), '()\n', (10818, 10820), False, 'import time\n'), ((2342, 2395), 'functools.partial', 'partial', (['_put_chunk', 'chunk_key', 'data', '(2 * spill_times)'], {}), '(_put_chunk, chunk_key, data, 2 * spill_times)\n', (2349, 2395), False, 'from functools import partial\n'), ((2565, 2591), 'mars.promise.Promise', 'promise.Promise', ([], {'done': '(True)'}), '(done=True)\n', (2580, 2591), False, 'from mars import promise\n'), ((3473, 3504), 'mars.cluster_info.ClusterInfoActor.default_name', 'ClusterInfoActor.default_name', ([], {}), '()\n', (3502, 3504), False, 'from mars.cluster_info import ClusterInfoActor\n'), ((3558, 3585), 'mars.scheduler.kvstore.KVStoreActor.default_name', 'KVStoreActor.default_name', ([], {}), '()\n', (3583, 3585), False, 'from mars.scheduler.kvstore import KVStoreActor\n'), ((7007, 7038), 'mars.serialize.dataserializer.load', 'dataserializer.load', (['spill_file'], {}), '(spill_file)\n', (7026, 7038), False, 'from mars.serialize import dataserializer\n'), ((7301, 7332), 'mars.serialize.dataserializer.load', 'dataserializer.load', (['spill_file'], {}), '(spill_file)\n', (7320, 7332), False, 'from mars.serialize import dataserializer\n'), ((10911, 10928), 'gevent.sleep', 'gevent.sleep', (['(0.5)'], {}), '(0.5)\n', (10923, 10928), False, 'import gevent\n'), ((11308, 11325), 'mars.compat.six.reraise', 'six.reraise', (['*exc'], {}), '(*exc)\n', (11319, 11325), False, 'from mars.compat import six\n'), ((4694, 4705), 'time.time', 'time.time', ([], {}), '()\n', (4703, 4705), False, 'import time\n'), ((5012, 5023), 'time.time', 'time.time', ([], {}), '()\n', (5021, 5023), False, 'import time\n'), ((6919, 6951), 'mars.worker.spill.build_spill_file_name', 'build_spill_file_name', (['chunk_key'], {}), '(chunk_key)\n', (6940, 6951), False, 'from mars.worker.spill import build_spill_file_name\n'), ((7188, 7244), 'mars.worker.spill.build_spill_file_name', 'build_spill_file_name', (['chunk_key', 'self._remote_spill_dir'], {}), '(chunk_key, self._remote_spill_dir)\n', (7209, 7244), False, 'from mars.worker.spill import build_spill_file_name\n'), ((4138, 4150), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4148, 4150), False, 'import uuid\n'), ((4215, 4227), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4225, 4227), False, 'import uuid\n'), ((4295, 4307), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4305, 4307), False, 'import uuid\n'), ((4374, 4386), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4384, 4386), False, 'import uuid\n'), ((9919, 9931), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9929, 9931), False, 'import uuid\n'), ((10001, 10013), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10011, 10013), False, 'import uuid\n'), ((10130, 10142), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10140, 10142), False, 'import uuid\n'), ((10214, 10226), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10224, 10226), False, 'import uuid\n'), ((10298, 10310), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10308, 10310), False, 'import uuid\n'), ((10382, 10394), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10392, 10394), False, 'import uuid\n'), ((11104, 11115), 'time.time', 'time.time', ([], {}), '()\n', (11113, 11115), False, 'import time\n'), ((2277, 2297), 'mars.utils.calc_data_size', 'calc_data_size', (['data'], {}), '(data)\n', (2291, 2297), False, 'from mars.utils import get_next_port, calc_data_size\n')] |
# -*- coding: utf-8 -*-
"""4_focus_random_classify_random_train_classify.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1pHaGvxPWtFJXolLP6BXSnxm-_rZpiWB5
"""
from google.colab import drive
drive.mount('/content/drive')
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]])#.type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx])#.type("torch.DoubleTensor"))
label = foreground_label[fg_idx]- fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(i)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
# class Focus(nn.Module):
# def __init__(self):
# super(Focus, self).__init__()
# self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0)
# self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0)
# self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0)
# self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
# self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
# self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
# self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# self.batch_norm1 = nn.BatchNorm2d(32)
# self.batch_norm2 = nn.BatchNorm2d(128)
# self.dropout1 = nn.Dropout2d(p=0.05)
# self.dropout2 = nn.Dropout2d(p=0.1)
# self.fc1 = nn.Linear(128,64)
# self.fc2 = nn.Linear(64, 32)
# self.fc3 = nn.Linear(32, 10)
# self.fc4 = nn.Linear(10, 2)
# def forward(self, x):
# x = self.conv1(x)
# x = F.relu(self.batch_norm1(x))
# x = (F.relu(self.conv2(x)))
# x = self.pool(x)
# x = self.conv3(x)
# x = F.relu(self.batch_norm2(x))
# x = (F.relu(self.conv4(x)))
# x = self.pool(x)
# x = self.dropout1(x)
# x = self.conv5(x)
# x = F.relu(self.batch_norm2(x))
# x = self.conv6(x)
# x1 = F.tanh(x)
# x = F.relu(x)
# x = self.pool(x)
# x = x.view(x.size(0), -1)
# x = self.dropout2(x)
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.dropout2(x)
# x = F.relu(self.fc3(x))
# x = self.fc4(x)
# return x,x1
class Focus(nn.Module):
def __init__(self,pretrained =True):
super(Focus, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_norm1 = nn.BatchNorm2d(32)
self.batch_norm2 = nn.BatchNorm2d(128)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
self.fc4 = nn.Linear(10, 2)
self.pretrained = pretrained
def forward(self,z): #y is avg image #z batch of list of 9 images
y = torch.zeros([batch,128, 3,3], dtype=torch.float64)
x = torch.zeros([batch,9],dtype=torch.float64)
ftr = torch.zeros([batch,9,128,3,3])
y = y.to("cuda")
x = x.to("cuda")
ftr = ftr.to("cuda")
for i in range(9):
out,ftrs = self.helper(z[:,i])
#print(out.shape)
x[:,i] = out
ftr[:,i] = ftrs
x = F.softmax(x,dim=1)
# x1 = x[:,0]
# torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],ftr[:,i])
return x, y #alpha,avg_data
def helper(self, x):
#x1 = x
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm2(x))
x = self.conv6(x)
x1 = F.tanh(x)
x = F.relu(x)
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = F.relu(self.fc3(x))
if self.pretrained==True:
x = self.fc4(x)
x = x[:,1] -x[:,0]
else:
x = self.fc4(x)
x = x[:,0]
return x,x1
focus_net = Focus().double()
focus_net = focus_net.to("cuda")
# focus_net.load_state_dict( torch.load("/content/drive/My Drive/Cheating_data/Focus_net_weights/focus_net_6layer_cnn.pt"))
# print(focus_net.fc4)
# print(focus_net.fc4.weight)
# print(focus_net.fc4.bias)
# temp = focus_net.fc4.weight.data
# temp2 = focus_net.fc4.bias.data
# focus_net.fc4 = nn.Linear(10,1).double()
# focus_net.fc4.weight.data = temp[1,:]-temp[0,:]
# focus_net.fc4.bias.data = temp[1,:]-temp[0,:]
# focus_net = focus_net.to("cuda")
# print(focus_net.fc4.weight)
# print(focus_net.fc4.bias)
"""Changing the last layer of Focus net"""
for params in focus_net.parameters():
params.requires_grad = False
# for params in focus_net.parameters():
# print(params)
# break;
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2,padding=1)
self.batch_norm1 = nn.BatchNorm2d(32)
self.batch_norm2 = nn.BatchNorm2d(128)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.global_average_pooling = nn.AvgPool2d(kernel_size=2)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
self.fc4 = nn.Linear(10, 3)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv6(x)))
x = self.pool(x)
#print(x.shape)
x = self.global_average_pooling(x)
x = x.squeeze()
#x = x.view(x.size(0), -1)
#print(x.shape)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
classify = Classification().double()
classify = classify.to("cuda")
classify.load_state_dict( torch.load("/content/classify_weights.pt"))
classify.conv1 = nn.Conv2d(in_channels=128, out_channels=32, kernel_size=3, padding=1)
classify = classify.double()
classify = classify.to("cuda")
for params in classify.parameters():
params.requires_grad = True
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i+30000)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
import torch.optim as optim
criterion_classify = nn.CrossEntropyLoss()
optimizer_focus = optim.SGD(focus_net.parameters(), lr=0.01, momentum=0.9)
optimizer_classify = optim.SGD(classify.parameters(), lr=0.01, momentum=0.9)
col1=[]
col2=[]
col3=[]
col4=[]
col5=[]
col6=[]
col7=[]
col8=[]
col9=[]
col10=[]
col11=[]
col12=[]
col13=[]
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
print(count)
print("="*100)
col1.append(0)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
nos_epochs = 150
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
for epoch in range(nos_epochs): # loop over the dataset multiple times
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
running_loss = 0.0
epoch_loss = []
cnt=0
iteration = desired_num // batch
#training data set
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
# zero the parameter gradients
#optimizer_focus.zero_grad()
optimizer_classify.zero_grad()
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
# outputs, alphas, avg_images = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
# print(outputs)
# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))
loss = criterion_classify(outputs, labels)
loss.backward()
#optimizer_focus.step()
optimizer_classify.step()
running_loss += loss.item()
mini = 60
if cnt % mini == mini-1: # print every 40 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))
epoch_loss.append(running_loss/mini)
running_loss = 0.0
cnt=cnt+1
if epoch % 5 == 0:
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
if(np.mean(epoch_loss) <= 0.03):
break;
if epoch % 5 == 0:
col1.append(epoch+1)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
#************************************************************************
#testing data set
with torch.no_grad():
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
#outputs, alphas, avg_images = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
print('Finished Training')
for params in focus_net.parameters():
print(params.requires_grad)
name = "4_focus_pretrained_classify_random_train_classify"
print(name)
torch.save(focus_net.state_dict(),"/content/weights_focus.pt")
torch.save(classify.state_dict(),"/content/weights_classify.pt")
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = col1
df_train[columns[1]] = col2
df_train[columns[2]] = col3
df_train[columns[3]] = col4
df_train[columns[4]] = col5
df_train[columns[5]] = col6
df_train[columns[6]] = col7
df_test[columns[0]] = col1
df_test[columns[1]] = col8
df_test[columns[2]] = col9
df_test[columns[3]] = col10
df_test[columns[4]] = col11
df_test[columns[5]] = col12
df_test[columns[6]] = col13
df_train
# plt.figure(12,12)
plt.plot(col1,col2, label='argmax > 0.5')
plt.plot(col1,col3, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.show()
plt.plot(col1,col4, label ="focus_true_pred_true ")
plt.plot(col1,col5, label ="focus_false_pred_true ")
plt.plot(col1,col6, label ="focus_true_pred_false ")
plt.plot(col1,col7, label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.savefig("training.png")
plt.show()
df_test
# plt.figure(12,12)
plt.plot(col1,col8, label='argmax > 0.5')
plt.plot(col1,col9, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.title("On Testing set")
plt.show()
plt.plot(col1,col10, label ="focus_true_pred_true ")
plt.plot(col1,col11, label ="focus_false_pred_true ")
plt.plot(col1,col12, label ="focus_true_pred_false ")
plt.plot(col1,col13, label ="focus_false_pred_false ")
plt.title("On Testing set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.savefig("test.png")
plt.show()
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
print(count)
print("="*100)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
correct = 0
total = 0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
| [
"torch.mul",
"torch.nn.CrossEntropyLoss",
"google.colab.drive.mount",
"matplotlib.pyplot.ylabel",
"torch.max",
"torch.nn.AvgPool2d",
"torch.nn.functional.softmax",
"torch.nn.BatchNorm2d",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"pandas.DataFra... | [((264, 293), 'google.colab.drive.mount', 'drive.mount', (['"""/content/drive"""'], {}), "('/content/drive')\n", (275, 293), False, 'from google.colab import drive\n'), ((675, 708), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (698, 708), False, 'import warnings\n'), ((844, 939), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=True, download=True,\n transform=transform)\n", (872, 939), False, 'import torchvision\n'), ((948, 1044), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=False, download=True,\n transform=transform)\n", (976, 1044), False, 'import torchvision\n'), ((1056, 1122), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(10)', 'shuffle': '(True)'}), '(trainset, batch_size=10, shuffle=True)\n', (1083, 1122), False, 'import torch\n'), ((1136, 1202), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(10)', 'shuffle': '(False)'}), '(testset, batch_size=10, shuffle=False)\n', (1163, 1202), False, 'import torch\n'), ((1955, 1984), 'torch.tensor', 'torch.tensor', (['foreground_data'], {}), '(foreground_data)\n', (1967, 1984), False, 'import torch\n'), ((2004, 2034), 'torch.tensor', 'torch.tensor', (['foreground_label'], {}), '(foreground_label)\n', (2016, 2034), False, 'import torch\n'), ((2053, 2082), 'torch.tensor', 'torch.tensor', (['background_data'], {}), '(background_data)\n', (2065, 2082), False, 'import torch\n'), ((2102, 2132), 'torch.tensor', 'torch.tensor', (['background_label'], {}), '(background_label)\n', (2114, 2132), False, 'import torch\n'), ((4305, 4352), 'torch.utils.data.DataLoader', 'DataLoader', (['msd'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(msd, batch_size=batch, shuffle=True)\n', (4315, 4352), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((11078, 11147), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(32)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=128, out_channels=32, kernel_size=3, padding=1)\n', (11087, 11147), True, 'import torch.nn as nn\n'), ((11971, 12025), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(test_data, batch_size=batch, shuffle=False)\n', (11981, 12025), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((12077, 12098), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (12096, 12098), True, 'import torch.nn as nn\n'), ((21361, 21375), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21373, 21375), True, 'import pandas as pd\n'), ((21386, 21400), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21398, 21400), True, 'import pandas as pd\n'), ((21823, 21865), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col2'], {'label': '"""argmax > 0.5"""'}), "(col1, col2, label='argmax > 0.5')\n", (21831, 21865), True, 'from matplotlib import pyplot as plt\n'), ((21865, 21907), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col3'], {'label': '"""argmax < 0.5"""'}), "(col1, col3, label='argmax < 0.5')\n", (21873, 21907), True, 'from matplotlib import pyplot as plt\n'), ((21908, 21962), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(loc='center left', bbox_to_anchor=(1, 0.5))\n", (21918, 21962), True, 'from matplotlib import pyplot as plt\n'), ((21963, 21983), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (21973, 21983), True, 'from matplotlib import pyplot as plt\n'), ((21984, 22011), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""training data"""'], {}), "('training data')\n", (21994, 22011), True, 'from matplotlib import pyplot as plt\n'), ((22012, 22040), 'matplotlib.pyplot.title', 'plt.title', (['"""On Training set"""'], {}), "('On Training set')\n", (22021, 22040), True, 'from matplotlib import pyplot as plt\n'), ((22041, 22051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22049, 22051), True, 'from matplotlib import pyplot as plt\n'), ((22053, 22104), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col4'], {'label': '"""focus_true_pred_true """'}), "(col1, col4, label='focus_true_pred_true ')\n", (22061, 22104), True, 'from matplotlib import pyplot as plt\n'), ((22105, 22157), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col5'], {'label': '"""focus_false_pred_true """'}), "(col1, col5, label='focus_false_pred_true ')\n", (22113, 22157), True, 'from matplotlib import pyplot as plt\n'), ((22158, 22210), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col6'], {'label': '"""focus_true_pred_false """'}), "(col1, col6, label='focus_true_pred_false ')\n", (22166, 22210), True, 'from matplotlib import pyplot as plt\n'), ((22211, 22264), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col7'], {'label': '"""focus_false_pred_false """'}), "(col1, col7, label='focus_false_pred_false ')\n", (22219, 22264), True, 'from matplotlib import pyplot as plt\n'), ((22265, 22293), 'matplotlib.pyplot.title', 'plt.title', (['"""On Training set"""'], {}), "('On Training set')\n", (22274, 22293), True, 'from matplotlib import pyplot as plt\n'), ((22294, 22348), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(loc='center left', bbox_to_anchor=(1, 0.5))\n", (22304, 22348), True, 'from matplotlib import pyplot as plt\n'), ((22349, 22369), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (22359, 22369), True, 'from matplotlib import pyplot as plt\n'), ((22370, 22397), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""training data"""'], {}), "('training data')\n", (22380, 22397), True, 'from matplotlib import pyplot as plt\n'), ((22398, 22425), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""training.png"""'], {}), "('training.png')\n", (22409, 22425), True, 'from matplotlib import pyplot as plt\n'), ((22427, 22437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22435, 22437), True, 'from matplotlib import pyplot as plt\n'), ((22468, 22510), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col8'], {'label': '"""argmax > 0.5"""'}), "(col1, col8, label='argmax > 0.5')\n", (22476, 22510), True, 'from matplotlib import pyplot as plt\n'), ((22510, 22552), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col9'], {'label': '"""argmax < 0.5"""'}), "(col1, col9, label='argmax < 0.5')\n", (22518, 22552), True, 'from matplotlib import pyplot as plt\n'), ((22553, 22607), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(loc='center left', bbox_to_anchor=(1, 0.5))\n", (22563, 22607), True, 'from matplotlib import pyplot as plt\n'), ((22608, 22628), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (22618, 22628), True, 'from matplotlib import pyplot as plt\n'), ((22629, 22655), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Testing data"""'], {}), "('Testing data')\n", (22639, 22655), True, 'from matplotlib import pyplot as plt\n'), ((22656, 22683), 'matplotlib.pyplot.title', 'plt.title', (['"""On Testing set"""'], {}), "('On Testing set')\n", (22665, 22683), True, 'from matplotlib import pyplot as plt\n'), ((22684, 22694), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22692, 22694), True, 'from matplotlib import pyplot as plt\n'), ((22696, 22748), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col10'], {'label': '"""focus_true_pred_true """'}), "(col1, col10, label='focus_true_pred_true ')\n", (22704, 22748), True, 'from matplotlib import pyplot as plt\n'), ((22749, 22802), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col11'], {'label': '"""focus_false_pred_true """'}), "(col1, col11, label='focus_false_pred_true ')\n", (22757, 22802), True, 'from matplotlib import pyplot as plt\n'), ((22803, 22856), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col12'], {'label': '"""focus_true_pred_false """'}), "(col1, col12, label='focus_true_pred_false ')\n", (22811, 22856), True, 'from matplotlib import pyplot as plt\n'), ((22857, 22911), 'matplotlib.pyplot.plot', 'plt.plot', (['col1', 'col13'], {'label': '"""focus_false_pred_false """'}), "(col1, col13, label='focus_false_pred_false ')\n", (22865, 22911), True, 'from matplotlib import pyplot as plt\n'), ((22912, 22939), 'matplotlib.pyplot.title', 'plt.title', (['"""On Testing set"""'], {}), "('On Testing set')\n", (22921, 22939), True, 'from matplotlib import pyplot as plt\n'), ((22940, 22994), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(loc='center left', bbox_to_anchor=(1, 0.5))\n", (22950, 22994), True, 'from matplotlib import pyplot as plt\n'), ((22995, 23015), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (23005, 23015), True, 'from matplotlib import pyplot as plt\n'), ((23016, 23042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Testing data"""'], {}), "('Testing data')\n", (23026, 23042), True, 'from matplotlib import pyplot as plt\n'), ((23043, 23066), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.png"""'], {}), "('test.png')\n", (23054, 23066), True, 'from matplotlib import pyplot as plt\n'), ((23067, 23077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23075, 23077), True, 'from matplotlib import pyplot as plt\n'), ((2867, 2890), 'torch.stack', 'torch.stack', (['image_list'], {}), '(image_list)\n', (2878, 2890), False, 'import torch\n'), ((3305, 3322), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (3319, 3322), True, 'import numpy as np\n'), ((3334, 3364), 'numpy.random.randint', 'np.random.randint', (['(0)', '(35000)', '(8)'], {}), '(0, 35000, 8)\n', (3351, 3364), True, 'import numpy as np\n'), ((3374, 3401), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15000)'], {}), '(0, 15000)\n', (3391, 3401), True, 'import numpy as np\n'), ((3408, 3431), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (3425, 3431), True, 'import numpy as np\n'), ((11015, 11057), 'torch.load', 'torch.load', (['"""/content/classify_weights.pt"""'], {}), "('/content/classify_weights.pt')\n", (11025, 11057), False, 'import torch\n'), ((11616, 11641), 'numpy.random.seed', 'np.random.seed', (['(i + 30000)'], {}), '(i + 30000)\n', (11630, 11641), True, 'import numpy as np\n'), ((11651, 11681), 'numpy.random.randint', 'np.random.randint', (['(0)', '(35000)', '(8)'], {}), '(0, 35000, 8)\n', (11668, 11681), True, 'import numpy as np\n'), ((11691, 11718), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15000)'], {}), '(0, 15000)\n', (11708, 11718), True, 'import numpy as np\n'), ((11725, 11748), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (11742, 11748), True, 'import numpy as np\n'), ((12560, 12575), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12573, 12575), False, 'import torch\n'), ((14860, 14875), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14873, 14875), False, 'import torch\n'), ((23278, 23293), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23291, 23293), False, 'import torch\n'), ((25352, 25367), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25365, 25367), False, 'import torch\n'), ((27211, 27226), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27224, 27226), False, 'import torch\n'), ((27778, 27793), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27791, 27793), False, 'import torch\n'), ((747, 768), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (766, 768), False, 'from torchvision import transforms, utils\n'), ((775, 829), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (795, 829), False, 'from torchvision import transforms, utils\n'), ((6128, 6195), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(3)', 'out_channels': '(32)', 'kernel_size': '(3)', 'padding': '(0)'}), '(in_channels=3, out_channels=32, kernel_size=3, padding=0)\n', (6137, 6195), True, 'import torch.nn as nn\n'), ((6213, 6281), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(3)', 'padding': '(0)'}), '(in_channels=32, out_channels=64, kernel_size=3, padding=0)\n', (6222, 6281), True, 'import torch.nn as nn\n'), ((6299, 6368), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(0)'}), '(in_channels=64, out_channels=128, kernel_size=3, padding=0)\n', (6308, 6368), True, 'import torch.nn as nn\n'), ((6386, 6456), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(0)'}), '(in_channels=128, out_channels=128, kernel_size=3, padding=0)\n', (6395, 6456), True, 'import torch.nn as nn\n'), ((6474, 6544), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(0)'}), '(in_channels=128, out_channels=128, kernel_size=3, padding=0)\n', (6483, 6544), True, 'import torch.nn as nn\n'), ((6562, 6632), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=128, out_channels=128, kernel_size=3, padding=1)\n', (6571, 6632), True, 'import torch.nn as nn\n'), ((6649, 6686), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (6661, 6686), True, 'import torch.nn as nn\n'), ((6710, 6728), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (6724, 6728), True, 'import torch.nn as nn\n'), ((6752, 6771), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (6766, 6771), True, 'import torch.nn as nn\n'), ((6792, 6812), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {'p': '(0.05)'}), '(p=0.05)\n', (6804, 6812), True, 'import torch.nn as nn\n'), ((6833, 6852), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {'p': '(0.1)'}), '(p=0.1)\n', (6845, 6852), True, 'import torch.nn as nn\n'), ((6868, 6886), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (6877, 6886), True, 'import torch.nn as nn\n'), ((6901, 6918), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (6910, 6918), True, 'import torch.nn as nn\n'), ((6934, 6951), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(10)'], {}), '(32, 10)\n', (6943, 6951), True, 'import torch.nn as nn\n'), ((6967, 6983), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(2)'], {}), '(10, 2)\n', (6976, 6983), True, 'import torch.nn as nn\n'), ((7095, 7147), 'torch.zeros', 'torch.zeros', (['[batch, 128, 3, 3]'], {'dtype': 'torch.float64'}), '([batch, 128, 3, 3], dtype=torch.float64)\n', (7106, 7147), False, 'import torch\n'), ((7154, 7198), 'torch.zeros', 'torch.zeros', (['[batch, 9]'], {'dtype': 'torch.float64'}), '([batch, 9], dtype=torch.float64)\n', (7165, 7198), False, 'import torch\n'), ((7207, 7241), 'torch.zeros', 'torch.zeros', (['[batch, 9, 128, 3, 3]'], {}), '([batch, 9, 128, 3, 3])\n', (7218, 7241), False, 'import torch\n'), ((7452, 7471), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (7461, 7471), True, 'import torch.nn.functional as F\n'), ((8072, 8081), 'torch.nn.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (8078, 8081), True, 'import torch.nn.functional as F\n'), ((8090, 8099), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (8096, 8099), True, 'import torch.nn.functional as F\n'), ((9302, 9369), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(3)', 'out_channels': '(32)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=3, out_channels=32, kernel_size=3, padding=1)\n', (9311, 9369), True, 'import torch.nn as nn\n'), ((9387, 9455), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=32, out_channels=64, kernel_size=3, padding=1)\n', (9396, 9455), True, 'import torch.nn as nn\n'), ((9473, 9542), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=64, out_channels=128, kernel_size=3, padding=1)\n', (9482, 9542), True, 'import torch.nn as nn\n'), ((9560, 9630), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=128, out_channels=128, kernel_size=3, padding=1)\n', (9569, 9630), True, 'import torch.nn as nn\n'), ((9648, 9718), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=128, out_channels=128, kernel_size=3, padding=1)\n', (9657, 9718), True, 'import torch.nn as nn\n'), ((9736, 9806), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=128, out_channels=128, kernel_size=3, padding=1)\n', (9745, 9806), True, 'import torch.nn as nn\n'), ((9823, 9871), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=2, stride=2, padding=1)\n', (9835, 9871), True, 'import torch.nn as nn\n'), ((9894, 9912), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (9908, 9912), True, 'import torch.nn as nn\n'), ((9936, 9955), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (9950, 9955), True, 'import torch.nn as nn\n'), ((9976, 9996), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {'p': '(0.05)'}), '(p=0.05)\n', (9988, 9996), True, 'import torch.nn as nn\n'), ((10017, 10036), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {'p': '(0.1)'}), '(p=0.1)\n', (10029, 10036), True, 'import torch.nn as nn\n'), ((10071, 10098), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (10083, 10098), True, 'import torch.nn as nn\n'), ((10114, 10132), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (10123, 10132), True, 'import torch.nn as nn\n'), ((10147, 10164), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (10156, 10164), True, 'import torch.nn as nn\n'), ((10180, 10197), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(10)'], {}), '(32, 10)\n', (10189, 10197), True, 'import torch.nn as nn\n'), ((10213, 10229), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(3)'], {}), '(10, 3)\n', (10222, 10229), True, 'import torch.nn as nn\n'), ((12858, 12884), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (12867, 12884), False, 'import torch\n'), ((15157, 15183), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (15166, 15183), False, 'import torch\n'), ((17855, 17881), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (17864, 17881), False, 'import torch\n'), ((19010, 19029), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (19017, 19029), True, 'import numpy as np\n'), ((23576, 23602), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (23585, 23602), False, 'import torch\n'), ((25649, 25675), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (25658, 25675), False, 'import torch\n'), ((27478, 27504), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (27487, 27504), False, 'import torch\n'), ((28044, 28070), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (28053, 28070), False, 'import torch\n'), ((12953, 12976), 'torch.argmax', 'torch.argmax', (['alphas[j]'], {}), '(alphas[j])\n', (12965, 12976), False, 'import torch\n'), ((15235, 15258), 'torch.argmax', 'torch.argmax', (['alphas[j]'], {}), '(alphas[j])\n', (15247, 15258), False, 'import torch\n'), ((19448, 19463), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19461, 19463), False, 'import torch\n'), ((23671, 23694), 'torch.argmax', 'torch.argmax', (['alphas[j]'], {}), '(alphas[j])\n', (23683, 23694), False, 'import torch\n'), ((25727, 25750), 'torch.argmax', 'torch.argmax', (['alphas[j]'], {}), '(alphas[j])\n', (25739, 25750), False, 'import torch\n'), ((7613, 7658), 'torch.mul', 'torch.mul', (['x1[:, None, None, None]', 'ftr[:, i]'], {}), '(x1[:, None, None, None], ftr[:, i])\n', (7622, 7658), False, 'import torch\n'), ((18445, 18468), 'torch.argmax', 'torch.argmax', (['alphas[j]'], {}), '(alphas[j])\n', (18457, 18468), False, 'import torch\n'), ((19985, 20011), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (19994, 20011), False, 'import torch\n'), ((20063, 20086), 'torch.argmax', 'torch.argmax', (['alphas[j]'], {}), '(alphas[j])\n', (20075, 20086), False, 'import torch\n')] |
# Python > Numpy > Inner and Outer
# Use NumPy to find the inner and outer product of arrays.
#
# https://www.hackerrank.com/challenges/np-inner-and-outer/problem
#
import numpy
u = numpy.array(input().split(), numpy.int)
v = numpy.array(input().split(), numpy.int)
print(numpy.inner(u, v))
print(numpy.outer(u, v))
| [
"numpy.outer",
"numpy.inner"
] | [((275, 292), 'numpy.inner', 'numpy.inner', (['u', 'v'], {}), '(u, v)\n', (286, 292), False, 'import numpy\n'), ((300, 317), 'numpy.outer', 'numpy.outer', (['u', 'v'], {}), '(u, v)\n', (311, 317), False, 'import numpy\n')] |
import numpy as np
from openmdao.api import ExplicitComponent
from pycycle.constants import R_UNIVERSAL_SI
class PsCalc(ExplicitComponent):
"""Mach number, Area calculation for when Ps is known"""
def setup(self):
self.add_input('gamma', val=1.4)
self.add_input('n_moles', shape=1)
self.add_input('Ts', val=518., units="degK", desc="Static temp")
self.add_input('ht', val=0., units="J/kg", desc="Total enthalpy reference condition")
self.add_input('hs', val=0., units="J/kg", desc="Static enthalpy")
self.add_input('W', val=0.0, desc="mass flow rate", units="kg/s")
self.add_input('rho', val=1.0, desc="density", units="kg/m**3")
self.add_output('MN', val=1.0, desc="computed mach number")
self.add_output('V', val=1.0, units="m/s", desc="computed speed", res_ref=1e3)
self.add_output('Vsonic', val=1.0, units="m/s", desc="computed speed of sound", res_ref=1e3)
self.add_output('area', val=1.0, units="m**2", desc="computed area")
self.declare_partials('V', ['ht', 'hs'])
self.declare_partials('Vsonic', ['gamma', 'n_moles', 'Ts'])
self.declare_partials('MN', ['gamma', 'n_moles', 'Ts', 'hs', 'ht'])
self.declare_partials('area', ['rho', 'W', 'hs', 'ht'])
def compute(self, inputs, outputs):
outputs['Vsonic'] = Vsonic = np.sqrt(inputs['gamma'] * R_UNIVERSAL_SI * inputs['n_moles'] * inputs['Ts'])
# If ht < hs then V will be imaginary, so use an inverse relationship to allow solution process to continue
if inputs['ht'] >= inputs['hs']:
outputs['V'] = V = np.sqrt(2.0 * (inputs['ht'] - inputs['hs']))
else:
# print('Warning: in', self.pathname, 'ht < hs, inverting relationship to get a real velocity, ht = ', inputs['ht'], 'hs = ', inputs['hs'])
outputs['V'] = V = np.sqrt(2.0 * (inputs['hs'] - inputs['ht']))
outputs['MN'] = V / Vsonic
outputs['area'] = inputs['W'] / (inputs['rho'] * V)
def compute_partials(self, inputs, J):
Vsonic = np.sqrt(inputs['gamma'] * R_UNIVERSAL_SI * inputs['n_moles'] * inputs['Ts'])
J['Vsonic','gamma'] = Vsonic / (2.0 * inputs['gamma'])
J['Vsonic','n_moles'] = Vsonic / (2.0 * inputs['n_moles'])
J['Vsonic','Ts'] = Vsonic / (2.0 * inputs['Ts'])
if inputs['ht'] >= inputs['hs']:
V = np.sqrt(2.0 * (inputs['ht'] - inputs['hs']))
J['V','ht'] = 1.0 / V
J['V','hs'] = -1.0 / V
else:
V = np.sqrt(2.0 * (inputs['hs'] - inputs['ht']))
J['V','hs'] = 1.0 / V
J['V','ht'] = -1.0 / V
J['MN','ht'] = 1.0 / Vsonic * J['V','ht']
J['MN','hs'] = 1.0 / Vsonic * J['V','hs']
J['MN','gamma'] = -V / Vsonic**2 * J['Vsonic','gamma']
J['MN','n_moles'] = -V / Vsonic**2 * J['Vsonic','n_moles']
J['MN','Ts'] = -V / Vsonic**2 * J['Vsonic','Ts']
J['area','W'] = 1.0 / (inputs['rho'] * V)
J['area','rho'] = -inputs['W'] / (inputs['rho']**2 * V)
J['area','ht'] = -inputs['W'] / (inputs['rho'] * V**2) * J['V','ht']
J['area','hs'] = -inputs['W'] / (inputs['rho'] * V**2) * J['V','hs']
| [
"numpy.sqrt"
] | [((1370, 1446), 'numpy.sqrt', 'np.sqrt', (["(inputs['gamma'] * R_UNIVERSAL_SI * inputs['n_moles'] * inputs['Ts'])"], {}), "(inputs['gamma'] * R_UNIVERSAL_SI * inputs['n_moles'] * inputs['Ts'])\n", (1377, 1446), True, 'import numpy as np\n'), ((2082, 2158), 'numpy.sqrt', 'np.sqrt', (["(inputs['gamma'] * R_UNIVERSAL_SI * inputs['n_moles'] * inputs['Ts'])"], {}), "(inputs['gamma'] * R_UNIVERSAL_SI * inputs['n_moles'] * inputs['Ts'])\n", (2089, 2158), True, 'import numpy as np\n'), ((1636, 1680), 'numpy.sqrt', 'np.sqrt', (["(2.0 * (inputs['ht'] - inputs['hs']))"], {}), "(2.0 * (inputs['ht'] - inputs['hs']))\n", (1643, 1680), True, 'import numpy as np\n'), ((1878, 1922), 'numpy.sqrt', 'np.sqrt', (["(2.0 * (inputs['hs'] - inputs['ht']))"], {}), "(2.0 * (inputs['hs'] - inputs['ht']))\n", (1885, 1922), True, 'import numpy as np\n'), ((2405, 2449), 'numpy.sqrt', 'np.sqrt', (["(2.0 * (inputs['ht'] - inputs['hs']))"], {}), "(2.0 * (inputs['ht'] - inputs['hs']))\n", (2412, 2449), True, 'import numpy as np\n'), ((2549, 2593), 'numpy.sqrt', 'np.sqrt', (["(2.0 * (inputs['hs'] - inputs['ht']))"], {}), "(2.0 * (inputs['hs'] - inputs['ht']))\n", (2556, 2593), True, 'import numpy as np\n')] |
"""Toy cluttered table domain.
This environment is created to test our planner's ability to handle
failures reported by the environment.
"""
from typing import Dict, List, Optional, Sequence, Set
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from gym.spaces import Box
from predicators.src import utils
from predicators.src.envs import BaseEnv
from predicators.src.settings import CFG
from predicators.src.structs import Action, Array, GroundAtom, Object, \
ParameterizedOption, Predicate, State, Task, Type
class ClutteredTableEnv(BaseEnv):
"""Toy cluttered table domain."""
def __init__(self) -> None:
super().__init__()
# Types
self._can_type = Type(
"can", ["pose_x", "pose_y", "radius", "is_grasped", "is_trashed"])
# Predicates
self._HandEmpty = Predicate("HandEmpty", [], self._HandEmpty_holds)
self._Holding = Predicate("Holding", [self._can_type],
self._Holding_holds)
self._Untrashed = Predicate("Untrashed", [self._can_type],
self._Untrashed_holds)
# Options
self._Grasp = utils.SingletonParameterizedOption(
"Grasp",
self._Grasp_policy,
types=[self._can_type],
params_space=Box(0, 1, (4, )))
self._Dump = utils.SingletonParameterizedOption(
"Dump", self._Dump_policy)
@classmethod
def get_name(cls) -> str:
return "cluttered_table"
def simulate(self, state: State, action: Action) -> State:
assert self.action_space.contains(action.arr)
next_state = state.copy()
# Figure out which can is currently grasped, if any.
grasped_can = None
for can in state:
if state.get(can, "is_grasped") > 0.5:
assert grasped_can is None, "Multiple cans grasped?"
assert state.get(can, "is_trashed") < 0.5, \
"Grasped a can that has been trashed?"
grasped_can = can
if np.all(action.arr == 0.0):
# Handle dumping action.
if grasped_can is not None:
next_state.set(grasped_can, "pose_x", -999)
next_state.set(grasped_can, "pose_y", -999)
next_state.set(grasped_can, "is_grasped", 0.0)
next_state.set(grasped_can, "is_trashed", 1.0)
return next_state
# Handle grasping action.
if grasped_can is not None:
return next_state # can't grasp while already grasping
start_x, start_y, end_x, end_y = action.arr
desired_can = None
for can in state:
this_x = state.get(can, "pose_x")
this_y = state.get(can, "pose_y")
this_radius = state.get(can, "radius")
if np.linalg.norm([end_x - this_x, end_y - this_y]) < this_radius:
assert desired_can is None
desired_can = can
if desired_can is None:
return next_state # end point wasn't at any can
self._check_collisions(start_x, start_y, end_x, end_y, state,
desired_can)
# No collisions, update state and return.
next_state.set(desired_can, "is_grasped", 1.0)
return next_state
def _generate_train_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_train_tasks, train_or_test="train")
def _generate_test_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_test_tasks, train_or_test="test")
@property
def predicates(self) -> Set[Predicate]:
return {self._HandEmpty, self._Holding, self._Untrashed}
@property
def goal_predicates(self) -> Set[Predicate]:
return {self._Holding}
@property
def types(self) -> Set[Type]:
return {self._can_type}
@property
def options(self) -> Set[ParameterizedOption]:
return {self._Grasp, self._Dump}
@property
def action_space(self) -> Box:
# The action_space is 4-dimensional. The first two dimensions are the
# start point of the vector corresponding to the grasp approach. The
# last two dimensions are the end point. Dumping is a special action
# where all 4 dimensions are 0.
return Box(0, 1, (4, ))
def render_state_plt(
self,
state: State,
task: Task,
action: Optional[Action] = None,
caption: Optional[str] = None) -> matplotlib.figure.Figure:
fig, ax = plt.subplots(1, 1)
ax.set_aspect('equal')
assert len(task.goal) == 1
goal_atom = next(iter(task.goal))
assert goal_atom.predicate == self._Holding
assert len(goal_atom.objects) == 1
goal_can = goal_atom.objects[0]
# Draw cans
lw = 1
goal_color = "green"
other_color = "red"
lcolor = "black"
for can in state:
if state.get(can, "is_grasped"):
circ = plt.Circle(
(state.get(can, "pose_x"), state.get(can, "pose_y")),
1.75 * state.get(can, "radius"),
facecolor="gray",
alpha=0.5)
ax.add_patch(circ)
if can == goal_can:
c = goal_color
else:
c = other_color
circ = plt.Circle(
(state.get(can, "pose_x"), state.get(can, "pose_y")),
state.get(can, "radius"),
linewidth=lw,
edgecolor=lcolor,
facecolor=c)
ax.add_patch(circ)
# Draw action
if action:
start_x, start_y, end_x, end_y = action.arr
dx, dy = end_x - start_x, end_y - start_y
arrow = plt.Arrow(start_x, start_y, dx, dy, width=0.1)
ax.add_patch(arrow)
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.xticks([])
plt.yticks([])
if caption is not None:
plt.suptitle(caption, wrap=True)
plt.tight_layout()
return fig
def _get_tasks(self, num: int, train_or_test: str) -> List[Task]:
tasks = []
cans = []
for i in range(
max(CFG.cluttered_table_num_cans_train,
CFG.cluttered_table_num_cans_test)):
cans.append(Object(f"can{i}", self._can_type))
goal = {GroundAtom(self._Holding, [cans[0]])}
for _ in range(num):
tasks.append(
Task(self._create_initial_state(cans, train_or_test), goal))
return tasks
def _create_initial_state(self, cans: List[Object],
train_or_test: str) -> State:
data: Dict[Object, Array] = {}
assert train_or_test in ("train", "test")
if train_or_test == "train":
num_cans = CFG.cluttered_table_num_cans_train
rng = self._train_rng
elif train_or_test == "test":
num_cans = CFG.cluttered_table_num_cans_test
rng = self._test_rng
radius = CFG.cluttered_table_can_radius
for i in range(num_cans):
can = cans[i]
while True:
# keep cans near center of table to allow grasps from all angles
pose = np.array(rng.uniform(0.25, 0.75, size=2),
dtype=np.float32)
if not self._any_intersection(pose, radius, data):
break
# [pose_x, pose_y, radius, is_grasped, is_trashed]
data[can] = np.array([pose[0], pose[1], radius, 0.0, 0.0])
return State(data)
@staticmethod
def _HandEmpty_holds(state: State, objects: Sequence[Object]) -> bool:
assert not objects
for can in state:
if state.get(can, "is_grasped") > 0.5:
return False
return True
@staticmethod
def _Holding_holds(state: State, objects: Sequence[Object]) -> bool:
can, = objects
return state.get(can, "is_grasped") > 0.5
@staticmethod
def _Untrashed_holds(state: State, objects: Sequence[Object]) -> bool:
can, = objects
return state.get(can, "is_trashed") < 0.5
@staticmethod
def _Grasp_policy(state: State, memory: Dict, objects: Sequence[Object],
params: Array) -> Action:
del state, memory, objects # unused
return Action(params) # action is simply the parameter
@staticmethod
def _Dump_policy(state: State, memory: Dict, objects: Sequence[Object],
params: Array) -> Action:
del state, memory, objects, params # unused
return Action(np.zeros(4,
dtype=np.float32)) # no parameter for dumping
@staticmethod
def _any_intersection(pose: Array, radius: float,
data: Dict[Object, Array]) -> bool:
for other in data:
other_feats = data[other]
other_x = other_feats[0]
other_y = other_feats[1]
other_radius = other_feats[2]
distance = np.linalg.norm([other_x - pose[0], other_y - pose[1]])
if distance <= (radius + other_radius):
return True
return False
@staticmethod
def _check_collisions(start_x: float,
start_y: float,
end_x: float,
end_y: float,
state: State,
ignored_can: Optional[Object] = None) -> None:
"""Handle collision checking.
We'll just threshold the angle between the grasp approach vector
and the vector between (end_x, end_y) and any other can. Doing
an actually correct geometric computation would involve the
radii somehow, but we don't really care about this. The argument
ignored_can is a can with which we don't care about colliding.
This is generally the desired can, but when attempting to place
a can, could also be the grasped can.
"""
vec1 = np.array([end_x - start_x, end_y - start_y])
colliding_can = None
colliding_can_max_dist = float("-inf")
for can in state:
if can == ignored_can:
continue
this_x = state.get(can, "pose_x")
this_y = state.get(can, "pose_y")
vec2 = np.array([end_x - this_x, end_y - this_y])
angle = np.arccos(
np.clip(
vec1.dot(vec2) /
(np.linalg.norm(vec1) * np.linalg.norm(vec2)), -1.0, 1.0))
if abs(angle) < CFG.cluttered_table_collision_angle_thresh:
dist = np.linalg.norm(vec2)
if dist > colliding_can_max_dist:
colliding_can_max_dist = float(dist)
colliding_can = can
if colliding_can is not None:
raise utils.EnvironmentFailure(
"collision", {"offending_objects": {colliding_can}})
class ClutteredTablePlaceEnv(ClutteredTableEnv):
"""Toy cluttered table domain (place version).
This version places grasped cans instead of dumping them, and has
several additional constraints. There are two cans, a goal can and
an obstructing can in front of it. The action space is restricted so
that actions can only begin from the point (0.2,1) and end in the
[0,0.4] by [0,1.0] region. The goal behavior is to learn to pick up
the colliding can and place it out of the way of the goal can.
"""
def __init__(self) -> None:
super().__init__()
self._Place = utils.SingletonParameterizedOption(
"Place",
self._Place_policy,
types=[self._can_type],
params_space=Box(np.array([0, 0, 0, 0]), np.array([1, 1, 1, 1])))
self._Grasp = utils.SingletonParameterizedOption(
"Grasp",
self._Grasp_policy,
types=[self._can_type],
params_space=Box(np.array([0, 0, 0, 0]), np.array([1, 1, 1, 1])))
@classmethod
def get_name(cls) -> str:
return "cluttered_table_place"
@property
def options(self) -> Set[ParameterizedOption]:
return {self._Grasp, self._Place}
@property
def action_space(self) -> Box:
# The action's starting x,y coordinates are always (0.2,1), and the
# ending coordinates are in a more narrow region than in the original
# task. Constraints make this version of the task more challenging.
return Box(np.array([0, 0, 0, 0]), np.array([1, 1, 1, 1]))
@staticmethod
def _Place_policy(state: State, memory: Dict, objects: Sequence[Object],
params: Array) -> Action:
del state, memory, objects # unused
return Action(params) # action is simply the parameter
def simulate(self, state: State, action: Action) -> State:
assert self.action_space.contains(action.arr)
next_state = state.copy()
# Figure out which can is currently grasped, if any.
grasped_can = None
for can in state:
if state.get(can, "is_grasped") > 0.5:
assert grasped_can is None, "Multiple cans grasped?"
assert state.get(can, "is_trashed") < 0.5, \
"Grasped a can that has been trashed?"
grasped_can = can
# If there is a grasped can, use action vector to try to place the can.
if grasped_can is not None:
start_x, start_y, end_x, end_y = action.arr
next_state.set(grasped_can, "pose_x", end_x)
next_state.set(grasped_can, "pose_y", end_y)
next_state.set(grasped_can, "is_grasped", 0.0)
self._check_collisions(start_x, start_y, end_x, end_y, state,
grasped_can)
return next_state
# If no grasped can, use action vector to try to grasp a desired can.
start_x, start_y, end_x, end_y = action.arr
desired_can = None
for can in state:
this_x = state.get(can, "pose_x")
this_y = state.get(can, "pose_y")
this_radius = state.get(can, "radius")
if np.linalg.norm([end_x - this_x, end_y - this_y]) < this_radius:
assert desired_can is None
desired_can = can
if desired_can is None:
return next_state # end point wasn't at any can
self._check_collisions(start_x, start_y, end_x, end_y, state,
desired_can)
# No collisions, update state and return.
next_state.set(desired_can, "is_grasped", 1.0)
return next_state
def _create_initial_state(self, cans: List[Object],
train_or_test: str) -> State:
data: Dict[Object, Array] = {}
radius = CFG.cluttered_table_can_radius
# The goal can is placed behind an obstructing can and randomly either
# on the left or right. The obstructing can is in the middle of the
# action space.
goal_x = 0.3 if self._train_rng.uniform() < 0.5 else 0.1
# Always use exactly two cans.
data[cans[0]] = np.array([goal_x, 0.8, radius, 0.0, 0.0])
data[cans[1]] = np.array([0.2, 0.6, radius, 0.0, 0.0])
return State(data)
| [
"numpy.array",
"numpy.linalg.norm",
"predicators.src.utils.EnvironmentFailure",
"predicators.src.structs.Predicate",
"predicators.src.utils.SingletonParameterizedOption",
"predicators.src.structs.State",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xticks",
"predicators... | [((714, 785), 'predicators.src.structs.Type', 'Type', (['"""can"""', "['pose_x', 'pose_y', 'radius', 'is_grasped', 'is_trashed']"], {}), "('can', ['pose_x', 'pose_y', 'radius', 'is_grasped', 'is_trashed'])\n", (718, 785), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((846, 895), 'predicators.src.structs.Predicate', 'Predicate', (['"""HandEmpty"""', '[]', 'self._HandEmpty_holds'], {}), "('HandEmpty', [], self._HandEmpty_holds)\n", (855, 895), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((920, 979), 'predicators.src.structs.Predicate', 'Predicate', (['"""Holding"""', '[self._can_type]', 'self._Holding_holds'], {}), "('Holding', [self._can_type], self._Holding_holds)\n", (929, 979), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1040, 1103), 'predicators.src.structs.Predicate', 'Predicate', (['"""Untrashed"""', '[self._can_type]', 'self._Untrashed_holds'], {}), "('Untrashed', [self._can_type], self._Untrashed_holds)\n", (1049, 1103), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1369, 1430), 'predicators.src.utils.SingletonParameterizedOption', 'utils.SingletonParameterizedOption', (['"""Dump"""', 'self._Dump_policy'], {}), "('Dump', self._Dump_policy)\n", (1403, 1430), False, 'from predicators.src import utils\n'), ((2076, 2101), 'numpy.all', 'np.all', (['(action.arr == 0.0)'], {}), '(action.arr == 0.0)\n', (2082, 2101), True, 'import numpy as np\n'), ((4339, 4354), 'gym.spaces.Box', 'Box', (['(0)', '(1)', '(4,)'], {}), '(0, 1, (4,))\n', (4342, 4354), False, 'from gym.spaces import Box\n'), ((4586, 4604), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4598, 4604), True, 'import matplotlib.pyplot as plt\n'), ((5940, 5959), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (5948, 5959), True, 'import matplotlib.pyplot as plt\n'), ((5968, 5987), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (5976, 5987), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6010), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6006, 6010), True, 'import matplotlib.pyplot as plt\n'), ((6019, 6033), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6029, 6033), True, 'import matplotlib.pyplot as plt\n'), ((6119, 6137), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6135, 6137), True, 'import matplotlib.pyplot as plt\n'), ((7701, 7712), 'predicators.src.structs.State', 'State', (['data'], {}), '(data)\n', (7706, 7712), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((8496, 8510), 'predicators.src.structs.Action', 'Action', (['params'], {}), '(params)\n', (8502, 8510), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((10183, 10227), 'numpy.array', 'np.array', (['[end_x - start_x, end_y - start_y]'], {}), '([end_x - start_x, end_y - start_y])\n', (10191, 10227), True, 'import numpy as np\n'), ((12920, 12934), 'predicators.src.structs.Action', 'Action', (['params'], {}), '(params)\n', (12926, 12934), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((15337, 15378), 'numpy.array', 'np.array', (['[goal_x, 0.8, radius, 0.0, 0.0]'], {}), '([goal_x, 0.8, radius, 0.0, 0.0])\n', (15345, 15378), True, 'import numpy as np\n'), ((15403, 15441), 'numpy.array', 'np.array', (['[0.2, 0.6, radius, 0.0, 0.0]'], {}), '([0.2, 0.6, radius, 0.0, 0.0])\n', (15411, 15441), True, 'import numpy as np\n'), ((15457, 15468), 'predicators.src.structs.State', 'State', (['data'], {}), '(data)\n', (15462, 15468), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((5853, 5899), 'matplotlib.pyplot.Arrow', 'plt.Arrow', (['start_x', 'start_y', 'dx', 'dy'], {'width': '(0.1)'}), '(start_x, start_y, dx, dy, width=0.1)\n', (5862, 5899), True, 'import matplotlib.pyplot as plt\n'), ((6078, 6110), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['caption'], {'wrap': '(True)'}), '(caption, wrap=True)\n', (6090, 6110), True, 'import matplotlib.pyplot as plt\n'), ((6477, 6513), 'predicators.src.structs.GroundAtom', 'GroundAtom', (['self._Holding', '[cans[0]]'], {}), '(self._Holding, [cans[0]])\n', (6487, 6513), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((7639, 7685), 'numpy.array', 'np.array', (['[pose[0], pose[1], radius, 0.0, 0.0]'], {}), '([pose[0], pose[1], radius, 0.0, 0.0])\n', (7647, 7685), True, 'import numpy as np\n'), ((8762, 8791), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (8770, 8791), True, 'import numpy as np\n'), ((9191, 9245), 'numpy.linalg.norm', 'np.linalg.norm', (['[other_x - pose[0], other_y - pose[1]]'], {}), '([other_x - pose[0], other_y - pose[1]])\n', (9205, 9245), True, 'import numpy as np\n'), ((10501, 10543), 'numpy.array', 'np.array', (['[end_x - this_x, end_y - this_y]'], {}), '([end_x - this_x, end_y - this_y])\n', (10509, 10543), True, 'import numpy as np\n'), ((11035, 11112), 'predicators.src.utils.EnvironmentFailure', 'utils.EnvironmentFailure', (['"""collision"""', "{'offending_objects': {colliding_can}}"], {}), "('collision', {'offending_objects': {colliding_can}})\n", (11059, 11112), False, 'from predicators.src import utils\n'), ((12668, 12690), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (12676, 12690), True, 'import numpy as np\n'), ((12692, 12714), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (12700, 12714), True, 'import numpy as np\n'), ((1330, 1345), 'gym.spaces.Box', 'Box', (['(0)', '(1)', '(4,)'], {}), '(0, 1, (4,))\n', (1333, 1345), False, 'from gym.spaces import Box\n'), ((2857, 2905), 'numpy.linalg.norm', 'np.linalg.norm', (['[end_x - this_x, end_y - this_y]'], {}), '([end_x - this_x, end_y - this_y])\n', (2871, 2905), True, 'import numpy as np\n'), ((6426, 6459), 'predicators.src.structs.Object', 'Object', (['f"""can{i}"""', 'self._can_type'], {}), "(f'can{i}', self._can_type)\n", (6432, 6459), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((10811, 10831), 'numpy.linalg.norm', 'np.linalg.norm', (['vec2'], {}), '(vec2)\n', (10825, 10831), True, 'import numpy as np\n'), ((14347, 14395), 'numpy.linalg.norm', 'np.linalg.norm', (['[end_x - this_x, end_y - this_y]'], {}), '([end_x - this_x, end_y - this_y])\n', (14361, 14395), True, 'import numpy as np\n'), ((11900, 11922), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (11908, 11922), True, 'import numpy as np\n'), ((11924, 11946), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (11932, 11946), True, 'import numpy as np\n'), ((12125, 12147), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (12133, 12147), True, 'import numpy as np\n'), ((12149, 12171), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (12157, 12171), True, 'import numpy as np\n'), ((10658, 10678), 'numpy.linalg.norm', 'np.linalg.norm', (['vec1'], {}), '(vec1)\n', (10672, 10678), True, 'import numpy as np\n'), ((10681, 10701), 'numpy.linalg.norm', 'np.linalg.norm', (['vec2'], {}), '(vec2)\n', (10695, 10701), True, 'import numpy as np\n')] |
"""
CapsuleNet 的模型类
"""
import tensorflow as tf
import numpy as np
def capsules_generator(X):
"""
胶囊(向量神经元)生成器
:param X: 输入 [?, 256, 256, 1]
:return: 胶囊合集 [?, 107648, 22, 16]
"""
print('11111111111', X.get_shape().as_list())
# X = tf.reshape(X, [-1, 28, 28])
# X = tf.expand_dims(X, axis=-1)
'''
卷积层
** 代表传递一个字典类型的变量
conv1 的 shape 是 [?, 124, 124, 256]'''
conv1_params = {
"filters": 256,
"kernel_size": 9,
"strides": 2,
"padding": "valid",
"activation": tf.nn.relu,
}
conv1 = tf.layers.conv2d(X, name="conv1", **conv1_params)
'''
Primarily Capsules 层 -> 产生向量
由论文可知, 该层用 32 个滤波器(deep = 256 = 上层通道数)滤了 8 遍, 才产生了8维的向量
conv2 的 shape 是 [?, 58, 58, 256], 256=32*8'''
caps1_n_filter = 32
caps1_n_dims = 8
conv2_params = {
"filters": caps1_n_filter * caps1_n_dims,
"kernel_size": 9,
"strides": 2,
"padding": "valid",
"activation": tf.nn.relu
}
conv2 = tf.layers.conv2d(conv1, name="conv2", **conv2_params)
'''
该层 (PrimaryCaps) 每个 Capsule (1x8 向量) 和下层 (DigitCaps) 每个 Capsule (1x16 向量) 全连接,
那么, 最好生成一个变量含有 58*58*32 = 107648 个 8 维的 Capsule.
因此, 将 conv2 的 shape [?, 58, 58, 256] 转成 [?, 107648, 8] 即: 58*58*256 -> 107648*8 '''
wh = 58 # 58
caps1_n_caps = caps1_n_filter * wh * wh # 107648
caps1_raw = tf.reshape(conv2, [-1, caps1_n_caps, caps1_n_dims], name="caps1_raw") # [?, 107648, 8]
# 使用压缩函数, shape 依然是 [?, 1152, 8] -> [?, 107648, 8]
# 这里使用压缩函数我认为是为了减少计算量.
caps1_output = squash(caps1_raw, axis=2, name="caps1_output")
'''
Digit Capsules 层 -> 向量神经元 开始出现
向量神经元 -> 乘以第一个权重 变换矩阵
'''
caps2_n_caps = 22 # 下一层的胶囊数,也是本次分类任务的类别数
caps2_n_dims = 16
init_sigma = 0.01
# 随机产生姿态矩阵 W_tiled [?, 107648, 22, 16, 8]
W_init = tf.random_normal(
shape=(1, caps1_n_caps, caps2_n_caps, caps2_n_dims, caps1_n_dims),
stddev=init_sigma, dtype=tf.float32, name="W_init")
W = tf.Variable(W_init, name="W")
batch_size = tf.shape(X)[0]
W_tiled = tf.tile(W, [batch_size, 1, 1, 1, 1], name="W_tiled")
'''
e.g. - tf.expand_dims 给矩阵增加特定的维度
a = [1, 2, 3, 4]
d = tf.expand_dims(a, axis=-1)
print(session.run(d))
[[1]
[2]
[3]
[4]]
'''
# caps1_output 的 shape 由 [?, 107648, 8] 变为 [?, 107648, 8, 1]
caps1_output_expanded = tf.expand_dims(caps1_output, axis=-1, name="caps1_output_expanded")
# shape 由 [?, 107648, 8, 1] 变为 [?, 107648, 1, 8, 1]
caps1_output_tile = tf.expand_dims(caps1_output_expanded, axis=2, name="caps1_output_tile")
# shape 由 [?, 107648, 1, 8, 1] 变为 [?, 107648, 10, 8, 1]
caps1_output_tiled = tf.tile(caps1_output_tile, [1, 1, caps2_n_caps, 1, 1], name="caps1_output_tiled")
'''
高维矩阵乘法
caps2_matrixTFed 即是每一个低层 capsule 的输出
W_tiled 是 22*107648 个 8*16 矩阵
- 用 shape 为 [?, 107648, 22, 16, 8] 的 W_tiled
- 乘以 shape 为 [?, 107648, 22, 8, 1] 的 caps1_output_tiled
- 等于 shape 为 [?, 107648, 22, 16, 1] 的 caps2_matrixTFed
再用 tf.squeeze 去掉大小为 1 的维度, 变为 [?, 107648, 22, 16]'''
caps2_matrixTFed = tf.matmul(W_tiled, caps1_output_tiled, name="caps2_matrixTFed")
caps2_matrixTFed = tf.reshape(caps2_matrixTFed, [-1, caps1_n_caps, caps2_n_caps, caps2_n_dims])
return caps2_matrixTFed
def dynamic_routing(caps2_matrixTFed, batch_size, times=3):
"""
:param caps2_matrixTFed: 经过 W 矩阵变换过的 caps2 的输出 [?, 107648, 22, 16]
:param times: 循环的次数
:return: 压缩激活后的 [?, num_caps2, 16] 即 [?, 22, 16]
"""
the_shape = np.shape(caps2_matrixTFed)
# batch_size = the_shape[0]
num_caps1 = the_shape[1] # 107648
num_caps2 = the_shape[2] # 22
# dims_caps2 = the_shape[3]
with tf.name_scope("dynamic_routing"):
# 初始化:可能性值 b, shape = [?, 107648, 22, 1], 因为维度数要一致
b = tf.zeros([batch_size, num_caps1, num_caps2, 1],
dtype=np.float32, name="raw_weights")
# 初始化概率 c, shape = [?, 107648, 22, 1], 在第三个维度上做归一化, 保证传递给高层胶囊的概率总和为 1
c = tf.nn.softmax(b, axis=2, name="routing_weights")
for i in range(0, times):
# weighted_predictions 依然是 [?, 107648, 22, 16]
# tf.multiply()两个矩阵中对应元素各自相乘
weighted_predictions = tf.multiply(c, caps2_matrixTFed,
name="weighted_predictions")
# [?, 1, 22, 16]
sum_predictions = tf.reduce_sum(weighted_predictions, axis=1,
keepdims=True, name="sum_predictions")
v = squash(sum_predictions, axis=-1, name="caps2_output_round_" + str(i))
while i == 2:
# 去掉多余的维度
v = tf.squeeze(v)
y_ = get_y_(v)
return v, y_
# 再次变成 [?, 107648, 10, 16], 以便 低层胶囊的输出 和 平均预测值 矩阵相乘
v_tiled = tf.tile(v, [1, num_caps1, 1, 1],
name="caps2_output_round_1_tiled")
# 这里对应向量求点积
# agreement 会有正负, 取决于 caps2_predicted 和 v_tiled 中每个向量的值
# 版本一
# 对第一个(a)矩阵做了转置 transpose_a=True, 再求矩阵乘积, 好像有点不对
# agreement = tf.matmul(caps2_matrixTFed, v_tiled, transpose_a=True, name="agreement")
# 版本二
agreement_step1 = tf.multiply(caps2_matrixTFed, v_tiled)
agreement = tf.reduce_sum(agreement_step1, axis=-1, keepdims=True)
b = tf.add(b, agreement, name="raw_weights_round_2")
c = tf.nn.softmax(b, axis=2, name="routing_weights_round_2")
def squash(vector, axis=0, name="squash"):
"""
squash 压缩函数
:param axis: 需要相加的维度
:param vector: 输入向量, list 格式
:param name: 命名空间
:return: 压缩后的向量, list
"""
with tf.name_scope(name):
norm_up = tf.reduce_sum(np.square(vector), axis=axis, keepdims=True)
# 加上 10^-7 再开方,是为了防止分母为0
unit_vector = vector / tf.sqrt(norm_up + 10 ** -7)
squash_vector = norm_up / (norm_up + 1) * unit_vector
return squash_vector
def get_y_(model_out):
"""
得到预测值
:param model_out: [?, 22, 16]
:return: [?, ]
"""
y_step1 = tf.norm(model_out, axis=-1, keepdims=False)
# print(np.shape(y_step1))
y_ = tf.argmax(y_step1, axis=1)
print(np.shape(y_))
return tf.cast(y_, tf.int32)
# def my_routing(caps2_matrixTFed, batch_size):
# """
# :param caps2_matrixTFed: 经过 W 矩阵变换过的 caps2 的输出 [?, 1152, 10, 16]
# :param batch_size:
# :return: 压缩激活后的 [?, num_caps2, 16]
# """
# num_caps = np.shape(caps2_matrixTFed)[1]
# new = caps2_matrixTFed
#
# for i in range(num_caps):
#
#
#
# routed = [None, 10, 16]
#
# return routed
| [
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.multiply",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.random_normal",
"tensorflow.layers.conv2d",
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.Variable",
"tensorflow.add",
"numpy.square",
"tens... | [((578, 627), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['X'], {'name': '"""conv1"""'}), "(X, name='conv1', **conv1_params)\n", (594, 627), True, 'import tensorflow as tf\n'), ((1024, 1077), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['conv1'], {'name': '"""conv2"""'}), "(conv1, name='conv2', **conv2_params)\n", (1040, 1077), True, 'import tensorflow as tf\n'), ((1400, 1469), 'tensorflow.reshape', 'tf.reshape', (['conv2', '[-1, caps1_n_caps, caps1_n_dims]'], {'name': '"""caps1_raw"""'}), "(conv2, [-1, caps1_n_caps, caps1_n_dims], name='caps1_raw')\n", (1410, 1469), True, 'import tensorflow as tf\n'), ((1865, 2004), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '(1, caps1_n_caps, caps2_n_caps, caps2_n_dims, caps1_n_dims)', 'stddev': 'init_sigma', 'dtype': 'tf.float32', 'name': '"""W_init"""'}), "(shape=(1, caps1_n_caps, caps2_n_caps, caps2_n_dims,\n caps1_n_dims), stddev=init_sigma, dtype=tf.float32, name='W_init')\n", (1881, 2004), True, 'import tensorflow as tf\n'), ((2026, 2055), 'tensorflow.Variable', 'tf.Variable', (['W_init'], {'name': '"""W"""'}), "(W_init, name='W')\n", (2037, 2055), True, 'import tensorflow as tf\n'), ((2103, 2155), 'tensorflow.tile', 'tf.tile', (['W', '[batch_size, 1, 1, 1, 1]'], {'name': '"""W_tiled"""'}), "(W, [batch_size, 1, 1, 1, 1], name='W_tiled')\n", (2110, 2155), True, 'import tensorflow as tf\n'), ((2423, 2490), 'tensorflow.expand_dims', 'tf.expand_dims', (['caps1_output'], {'axis': '(-1)', 'name': '"""caps1_output_expanded"""'}), "(caps1_output, axis=-1, name='caps1_output_expanded')\n", (2437, 2490), True, 'import tensorflow as tf\n'), ((2571, 2642), 'tensorflow.expand_dims', 'tf.expand_dims', (['caps1_output_expanded'], {'axis': '(2)', 'name': '"""caps1_output_tile"""'}), "(caps1_output_expanded, axis=2, name='caps1_output_tile')\n", (2585, 2642), True, 'import tensorflow as tf\n'), ((2728, 2814), 'tensorflow.tile', 'tf.tile', (['caps1_output_tile', '[1, 1, caps2_n_caps, 1, 1]'], {'name': '"""caps1_output_tiled"""'}), "(caps1_output_tile, [1, 1, caps2_n_caps, 1, 1], name=\n 'caps1_output_tiled')\n", (2735, 2814), True, 'import tensorflow as tf\n'), ((3156, 3219), 'tensorflow.matmul', 'tf.matmul', (['W_tiled', 'caps1_output_tiled'], {'name': '"""caps2_matrixTFed"""'}), "(W_tiled, caps1_output_tiled, name='caps2_matrixTFed')\n", (3165, 3219), True, 'import tensorflow as tf\n'), ((3243, 3319), 'tensorflow.reshape', 'tf.reshape', (['caps2_matrixTFed', '[-1, caps1_n_caps, caps2_n_caps, caps2_n_dims]'], {}), '(caps2_matrixTFed, [-1, caps1_n_caps, caps2_n_caps, caps2_n_dims])\n', (3253, 3319), True, 'import tensorflow as tf\n'), ((3591, 3617), 'numpy.shape', 'np.shape', (['caps2_matrixTFed'], {}), '(caps2_matrixTFed)\n', (3599, 3617), True, 'import numpy as np\n'), ((6170, 6213), 'tensorflow.norm', 'tf.norm', (['model_out'], {'axis': '(-1)', 'keepdims': '(False)'}), '(model_out, axis=-1, keepdims=False)\n', (6177, 6213), True, 'import tensorflow as tf\n'), ((6254, 6280), 'tensorflow.argmax', 'tf.argmax', (['y_step1'], {'axis': '(1)'}), '(y_step1, axis=1)\n', (6263, 6280), True, 'import tensorflow as tf\n'), ((6316, 6337), 'tensorflow.cast', 'tf.cast', (['y_', 'tf.int32'], {}), '(y_, tf.int32)\n', (6323, 6337), True, 'import tensorflow as tf\n'), ((2074, 2085), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (2082, 2085), True, 'import tensorflow as tf\n'), ((3766, 3798), 'tensorflow.name_scope', 'tf.name_scope', (['"""dynamic_routing"""'], {}), "('dynamic_routing')\n", (3779, 3798), True, 'import tensorflow as tf\n'), ((3871, 3961), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, num_caps1, num_caps2, 1]'], {'dtype': 'np.float32', 'name': '"""raw_weights"""'}), "([batch_size, num_caps1, num_caps2, 1], dtype=np.float32, name=\n 'raw_weights')\n", (3879, 3961), True, 'import tensorflow as tf\n'), ((4069, 4117), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['b'], {'axis': '(2)', 'name': '"""routing_weights"""'}), "(b, axis=2, name='routing_weights')\n", (4082, 4117), True, 'import tensorflow as tf\n'), ((5771, 5790), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (5784, 5790), True, 'import tensorflow as tf\n'), ((6291, 6303), 'numpy.shape', 'np.shape', (['y_'], {}), '(y_)\n', (6299, 6303), True, 'import numpy as np\n'), ((4288, 4349), 'tensorflow.multiply', 'tf.multiply', (['c', 'caps2_matrixTFed'], {'name': '"""weighted_predictions"""'}), "(c, caps2_matrixTFed, name='weighted_predictions')\n", (4299, 4349), True, 'import tensorflow as tf\n'), ((4456, 4543), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weighted_predictions'], {'axis': '(1)', 'keepdims': '(True)', 'name': '"""sum_predictions"""'}), "(weighted_predictions, axis=1, keepdims=True, name=\n 'sum_predictions')\n", (4469, 4543), True, 'import tensorflow as tf\n'), ((4903, 4970), 'tensorflow.tile', 'tf.tile', (['v', '[1, num_caps1, 1, 1]'], {'name': '"""caps2_output_round_1_tiled"""'}), "(v, [1, num_caps1, 1, 1], name='caps2_output_round_1_tiled')\n", (4910, 4970), True, 'import tensorflow as tf\n'), ((5322, 5360), 'tensorflow.multiply', 'tf.multiply', (['caps2_matrixTFed', 'v_tiled'], {}), '(caps2_matrixTFed, v_tiled)\n', (5333, 5360), True, 'import tensorflow as tf\n'), ((5385, 5439), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['agreement_step1'], {'axis': '(-1)', 'keepdims': '(True)'}), '(agreement_step1, axis=-1, keepdims=True)\n', (5398, 5439), True, 'import tensorflow as tf\n'), ((5457, 5505), 'tensorflow.add', 'tf.add', (['b', 'agreement'], {'name': '"""raw_weights_round_2"""'}), "(b, agreement, name='raw_weights_round_2')\n", (5463, 5505), True, 'import tensorflow as tf\n'), ((5522, 5578), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['b'], {'axis': '(2)', 'name': '"""routing_weights_round_2"""'}), "(b, axis=2, name='routing_weights_round_2')\n", (5535, 5578), True, 'import tensorflow as tf\n'), ((5824, 5841), 'numpy.square', 'np.square', (['vector'], {}), '(vector)\n', (5833, 5841), True, 'import numpy as np\n'), ((5933, 5960), 'tensorflow.sqrt', 'tf.sqrt', (['(norm_up + 10 ** -7)'], {}), '(norm_up + 10 ** -7)\n', (5940, 5960), True, 'import tensorflow as tf\n'), ((4742, 4755), 'tensorflow.squeeze', 'tf.squeeze', (['v'], {}), '(v)\n', (4752, 4755), True, 'import tensorflow as tf\n')] |
'''
Testing requires the following steps:
1- predicting a volume and calculating the mean square distance between the
original & predicted frame --> e(t)
2- calculating the anomaly score as follows:
sa = [e(t) - min(e(t))]/max(e(t))
3- regularity score:
sr = 1-sa
4- compare score to a threshold
'''
def get_gt_vid(video_root_path,dataset, vid_idx, pred_vid):
""" Get a video representation for the ground truth
"""
import numpy as np
if dataset == 'UCSDped1':
gt_data = 'UCSD_ped1'
elif dataset == 'UCSDped2':
gt_data = 'UCSD_ped2'
gt_vid_raw = np.loadtxt('{0}/{1}/gt_files/gt_{2}_vid{3:02d}.txt'.format(video_root_path, dataset,gt_data, vid_idx+1))
gt_vid = np.zeros_like(pred_vid)
try:
start = int(gt_vid_raw[0])
end = int(gt_vid_raw[1])
gt_vid[start:end] = 1
except:
for event in gt_vid_raw:
start = int(event[0])
end = int(event[1])
gt_vid[start:end] = 1
return gt_vid
def regularity_score(x1, x2):
""" Calculate a regularity score
"""
import numpy as np
from skimage import measure
similarity_index = measure.compare_ssim(x1[0], x2[0], multichannel =True)
sr = 1.0 - similarity_index
return sr
def video_to_clips(X_test,t):
import numpy as np
sz = X_test.shape[0]-t+1
X_test = np.expand_dims(X_test, axis=-1)
sequences = np.zeros((sz, 10, 227, 227, 1))
for i in range(0, sz):
clip = np.zeros((t, 227, 227, 1))
for j in range(0, t):
clip[j] = X_test[i + j, :, :, :]
sequences[i] = clip
return np.array(sequences),sz
def t_predict_video (model, X_test, t =4):
""" Predict on whole video
"""
import numpy as np
sequences,sz = video_to_clips(X_test,t)
reconstructed_sequences = model.predict(sequences)
sa = np.array([np.linalg.norm(np.subtract(np.squeeze(sequences[i]),np.squeeze(reconstructed_sequences[i]))) for i in range(0,sz)])
sa_normalized = (sa - np.min(sa)) / (np.max(sa)-np.min(sa))
sr = 1.0 - sa_normalized
return sr, sr, sz
def t_predict_volumes(model, X_test, t =4, predict_frames = False):
""" Predict on volumes
"""
import numpy as np
video_scores = []
for number,bunch in enumerate(X_test):
n_bunch=np.expand_dims(bunch,axis=0)
reconstructed_bunch = model.predict(n_bunch)
score= regularity_score(n_bunch,reconstructed_bunch)
video_scores.append(score)
return video_scores
def test(logger, dataset, t, job_uuid, epoch, val_loss, video_root_path, n_videos):
""" Test the model's performance
Plot reconstruction errors/regularity scores plots
Plot the overall AUC
"""
import numpy as np
from keras.models import load_model
import os
import h5py
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from evaluate import plot_regularity_score, plot_reconstruction_error,calc_auc_overall
from PIL import Image
#fetching paths to test_data, job_folder and trained model
test_dir = os.path.join(video_root_path, '{0}/testing_numpy'.format(dataset))
job_folder = os.path.join(video_root_path,dataset,'logs/jobs',job_uuid)
model_filename = 'model_snapshot_e{:03d}_{:.6f}.h5'.format(epoch, val_loss)
#load model
temporal_model = load_model(os.path.join(job_folder, model_filename))
all_gt = []
all_pred = []
#loop on all videos in the test data
for videoid in range(n_videos):
videoname = 'testing_frames_{0:03d}.h5'.format(videoid+1)
filepath = os.path.join(test_dir, videoname)
logger.info("==> {}".format(filepath))
X_test = np.load(os.path.join(video_root_path, '{0}/testing_numpy/testing_frames_{1:03d}.npy'.format(dataset, videoid+1)))
#calculate regularity_score, reconstruction_error
score_vid, recon_error, sz = t_predict_video(temporal_model, X_test, t)
plot_reconstruction_error(video_root_path, dataset, videoid, logger, recon_error)
plot_regularity_score(video_root_path, dataset, videoid, logger, score_vid)
#for AUC
pred_vid = Image.fromarray(np.expand_dims(recon_error,1)).resize((sz+t,1))
pred_vid = np.squeeze(pred_vid)
gt_vid = get_gt_vid(video_root_path, dataset, videoid, pred_vid)
all_gt.append(gt_vid)
all_pred.append(pred_vid)
#calculate AUC
calc_auc_overall(logger, video_root_path, dataset, all_gt, all_pred)
| [
"os.path.join",
"numpy.min",
"evaluate.calc_auc_overall",
"numpy.squeeze",
"numpy.max",
"numpy.array",
"numpy.zeros",
"evaluate.plot_regularity_score",
"numpy.expand_dims",
"skimage.measure.compare_ssim",
"numpy.zeros_like",
"evaluate.plot_reconstruction_error"
] | [((786, 809), 'numpy.zeros_like', 'np.zeros_like', (['pred_vid'], {}), '(pred_vid)\n', (799, 809), True, 'import numpy as np\n'), ((1235, 1288), 'skimage.measure.compare_ssim', 'measure.compare_ssim', (['x1[0]', 'x2[0]'], {'multichannel': '(True)'}), '(x1[0], x2[0], multichannel=True)\n', (1255, 1288), False, 'from skimage import measure\n'), ((1432, 1463), 'numpy.expand_dims', 'np.expand_dims', (['X_test'], {'axis': '(-1)'}), '(X_test, axis=-1)\n', (1446, 1463), True, 'import numpy as np\n'), ((1480, 1511), 'numpy.zeros', 'np.zeros', (['(sz, 10, 227, 227, 1)'], {}), '((sz, 10, 227, 227, 1))\n', (1488, 1511), True, 'import numpy as np\n'), ((3250, 3311), 'os.path.join', 'os.path.join', (['video_root_path', 'dataset', '"""logs/jobs"""', 'job_uuid'], {}), "(video_root_path, dataset, 'logs/jobs', job_uuid)\n", (3262, 3311), False, 'import os\n'), ((4536, 4604), 'evaluate.calc_auc_overall', 'calc_auc_overall', (['logger', 'video_root_path', 'dataset', 'all_gt', 'all_pred'], {}), '(logger, video_root_path, dataset, all_gt, all_pred)\n', (4552, 4604), False, 'from evaluate import plot_regularity_score, plot_reconstruction_error, calc_auc_overall\n'), ((1554, 1580), 'numpy.zeros', 'np.zeros', (['(t, 227, 227, 1)'], {}), '((t, 227, 227, 1))\n', (1562, 1580), True, 'import numpy as np\n'), ((1695, 1714), 'numpy.array', 'np.array', (['sequences'], {}), '(sequences)\n', (1703, 1714), True, 'import numpy as np\n'), ((2385, 2414), 'numpy.expand_dims', 'np.expand_dims', (['bunch'], {'axis': '(0)'}), '(bunch, axis=0)\n', (2399, 2414), True, 'import numpy as np\n'), ((3438, 3478), 'os.path.join', 'os.path.join', (['job_folder', 'model_filename'], {}), '(job_folder, model_filename)\n', (3450, 3478), False, 'import os\n'), ((3679, 3712), 'os.path.join', 'os.path.join', (['test_dir', 'videoname'], {}), '(test_dir, videoname)\n', (3691, 3712), False, 'import os\n'), ((4048, 4133), 'evaluate.plot_reconstruction_error', 'plot_reconstruction_error', (['video_root_path', 'dataset', 'videoid', 'logger', 'recon_error'], {}), '(video_root_path, dataset, videoid, logger,\n recon_error)\n', (4073, 4133), False, 'from evaluate import plot_regularity_score, plot_reconstruction_error, calc_auc_overall\n'), ((4138, 4213), 'evaluate.plot_regularity_score', 'plot_regularity_score', (['video_root_path', 'dataset', 'videoid', 'logger', 'score_vid'], {}), '(video_root_path, dataset, videoid, logger, score_vid)\n', (4159, 4213), False, 'from evaluate import plot_regularity_score, plot_reconstruction_error, calc_auc_overall\n'), ((4342, 4362), 'numpy.squeeze', 'np.squeeze', (['pred_vid'], {}), '(pred_vid)\n', (4352, 4362), True, 'import numpy as np\n'), ((2084, 2094), 'numpy.min', 'np.min', (['sa'], {}), '(sa)\n', (2090, 2094), True, 'import numpy as np\n'), ((2099, 2109), 'numpy.max', 'np.max', (['sa'], {}), '(sa)\n', (2105, 2109), True, 'import numpy as np\n'), ((2110, 2120), 'numpy.min', 'np.min', (['sa'], {}), '(sa)\n', (2116, 2120), True, 'import numpy as np\n'), ((1969, 1993), 'numpy.squeeze', 'np.squeeze', (['sequences[i]'], {}), '(sequences[i])\n', (1979, 1993), True, 'import numpy as np\n'), ((1994, 2032), 'numpy.squeeze', 'np.squeeze', (['reconstructed_sequences[i]'], {}), '(reconstructed_sequences[i])\n', (2004, 2032), True, 'import numpy as np\n'), ((4275, 4305), 'numpy.expand_dims', 'np.expand_dims', (['recon_error', '(1)'], {}), '(recon_error, 1)\n', (4289, 4305), True, 'import numpy as np\n')] |
import numpy as np
def _make_gaussian(x_pts, y_pts, mfd, x_offset=0, y_offset=0):
x0 = (x_pts[-1]+x_pts[0])/2 + x_offset
y0 = (y_pts[-1]+y_pts[0])/2 + y_offset
xx, yy = np.meshgrid(x_pts, y_pts)
sigma = mfd * 0.707 / 2.355
sigma_x = sigma
sigma_y = sigma
gaus_2d = np.exp(-((xx-x0)**2/(2*sigma_x**2)+
(yy-y0)**2/(2*sigma_y**2)))
gaus_2d /= np.sum(gaus_2d)
return gaus_2d
def _overlap(mode, gaussian):
mode_1 = mode
mode_2 = np.sqrt(gaussian) # square-root for E-field (not power)
eta = np.abs(np.sum(np.conj(mode_1)*mode_2))**2 / \
(np.sum(np.abs(mode_1)**2) * np.sum(np.abs(mode_2)**2))
return eta
def reflection(n1, n2):
'''
Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power.
'''
r = abs((n1-n2) / (n1+n2))**2
return r
def transmission(n1, n2):
'''
Calculate the power transmission at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of transmitted power.
'''
return 1-reflection(n1, n2)
def coupling_efficiency(mode_solver, fibre_mfd,
fibre_offset_x=0, fibre_offset_y=0,
n_eff_fibre=1.441):
'''
Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency.
'''
etas = []
gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc,
fibre_mfd, fibre_offset_x, fibre_offset_y)
for mode, n_eff in zip(mode_solver.modes, mode_solver.n_effs):
o = abs(_overlap(mode, gaus))
t = abs(transmission(n_eff, n_eff_fibre))
eta = o * t
etas.append(eta)
return etas
| [
"numpy.abs",
"numpy.sqrt",
"numpy.conj",
"numpy.exp",
"numpy.sum",
"numpy.meshgrid"
] | [((182, 207), 'numpy.meshgrid', 'np.meshgrid', (['x_pts', 'y_pts'], {}), '(x_pts, y_pts)\n', (193, 207), True, 'import numpy as np\n'), ((296, 385), 'numpy.exp', 'np.exp', (['(-((xx - x0) ** 2 / (2 * sigma_x ** 2) + (yy - y0) ** 2 / (2 * sigma_y ** 2)))'], {}), '(-((xx - x0) ** 2 / (2 * sigma_x ** 2) + (yy - y0) ** 2 / (2 * \n sigma_y ** 2)))\n', (302, 385), True, 'import numpy as np\n'), ((398, 413), 'numpy.sum', 'np.sum', (['gaus_2d'], {}), '(gaus_2d)\n', (404, 413), True, 'import numpy as np\n'), ((496, 513), 'numpy.sqrt', 'np.sqrt', (['gaussian'], {}), '(gaussian)\n', (503, 513), True, 'import numpy as np\n'), ((624, 638), 'numpy.abs', 'np.abs', (['mode_1'], {}), '(mode_1)\n', (630, 638), True, 'import numpy as np\n'), ((652, 666), 'numpy.abs', 'np.abs', (['mode_2'], {}), '(mode_2)\n', (658, 666), True, 'import numpy as np\n'), ((576, 591), 'numpy.conj', 'np.conj', (['mode_1'], {}), '(mode_1)\n', (583, 591), True, 'import numpy as np\n')] |
import numpy as np
import pylab as plt
from pspy import so_dict,so_spectra,pspy_utils,so_map
import os,sys
import planck_utils
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
figure_dir='figures'
pspy_utils.create_directory(figure_dir)
iStart=d['iStart']
iStop=d['iStop']
binning_file=d['binning_file']
include_sys=d['include_systematics']
freqs=d['freqs']
lthmax=1600
if include_sys==True:
mc_dir='monteCarlo_syst'
plot_name='robustness'
else:
mc_dir='monteCarlo'
plot_name='bias'
freq_pairs=[]
for c1,freq1 in enumerate(freqs):
for c2,freq2 in enumerate(freqs):
if c1>c2: continue
freq_pairs+=[[freq1,freq2]]
lth,psth= pspy_utils.ps_lensed_theory_to_dict(d['theoryfile'],output_type='Cl',lmax=lthmax,lstart=2)
plt.figure(figsize=(18,10))
color_array=['red','blue','green','gray','purple','orange']
for fpair,color in zip(freq_pairs,color_array):
f0,f1=fpair
fname='%sx%s'%(f0,f1)
cl={}
error={}
mc_error={}
model={}
lmin,lmax=d['lrange_%sx%s'%(f0,f1)]
for spec in ['TT','EE','TE']:
model[spec,fname]=psth[spec]
lb,model[spec,fname]= planck_utils.binning(lth, model[spec,fname],lthmax,binning_file=binning_file)
id=np.where((lb>lmin) &(lb<lmax))
model[spec,fname]=model[spec,fname][id]
model['r',fname]=model['TE',fname]/np.sqrt(model['TT',fname]*model['EE',fname])
l,r,std_r=np.loadtxt('%s/spectra_r_%s_hm1xhm2.dat'%(mc_dir,fname),unpack=True)
cov_TTTT=np.loadtxt('%s/diagonal_select_cov_mat_TT_%s_%s_TT_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_EEEE=np.loadtxt('%s/diagonal_select_cov_mat_EE_%s_%s_EE_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_TETE=np.loadtxt('%s/diagonal_select_cov_mat_TE_%s_%s_TE_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_TTEE=np.loadtxt('%s/diagonal_select_cov_mat_TT_%s_%s_EE_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_TTTE=np.loadtxt('%s/diagonal_select_cov_mat_TT_%s_%s_TE_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_EETE=np.loadtxt('%s/diagonal_select_cov_mat_EE_%s_%s_TE_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_TTr=np.loadtxt('%s/diagonal_select_cov_mat_TT_%s_%s_r_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_EEr=np.loadtxt('%s/diagonal_select_cov_mat_EE_%s_%s_r_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_rr=np.loadtxt('%s/diagonal_select_cov_mat_r_%s_%s_r_%s_%s.dat'%(mc_dir,fname,'hm1xhm2',fname,'hm1xhm2'))
cov_TTr_th=model['r',fname]*(cov_TTTE/model['TE',fname]-1/2*(cov_TTTT/model['TT',fname]+cov_TTEE/model['EE',fname]))
cov_EEr_th=model['r',fname]*(cov_EETE/model['TE',fname]-1/2*(cov_EEEE/model['EE',fname]+cov_TTEE/model['TT',fname]))
plt.plot(l,cov_TTr,'o')
plt.plot(l,cov_TTr_th)
plt.show()
plt.plot(l,cov_EEr,'o')
plt.plot(l,cov_EEr_th)
plt.show()
| [
"pspy.so_dict.so_dict",
"numpy.sqrt",
"numpy.where",
"pylab.plot",
"pylab.figure",
"pspy.pspy_utils.create_directory",
"pspy.pspy_utils.ps_lensed_theory_to_dict",
"planck_utils.binning",
"numpy.loadtxt",
"pylab.show"
] | [((134, 151), 'pspy.so_dict.so_dict', 'so_dict.so_dict', ([], {}), '()\n', (149, 151), False, 'from pspy import so_dict, so_spectra, pspy_utils, so_map\n'), ((204, 243), 'pspy.pspy_utils.create_directory', 'pspy_utils.create_directory', (['figure_dir'], {}), '(figure_dir)\n', (231, 243), False, 'from pspy import so_dict, so_spectra, pspy_utils, so_map\n'), ((670, 768), 'pspy.pspy_utils.ps_lensed_theory_to_dict', 'pspy_utils.ps_lensed_theory_to_dict', (["d['theoryfile']"], {'output_type': '"""Cl"""', 'lmax': 'lthmax', 'lstart': '(2)'}), "(d['theoryfile'], output_type='Cl', lmax\n =lthmax, lstart=2)\n", (705, 768), False, 'from pspy import so_dict, so_spectra, pspy_utils, so_map\n'), ((763, 791), 'pylab.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (773, 791), True, 'import pylab as plt\n'), ((1409, 1481), 'numpy.loadtxt', 'np.loadtxt', (["('%s/spectra_r_%s_hm1xhm2.dat' % (mc_dir, fname))"], {'unpack': '(True)'}), "('%s/spectra_r_%s_hm1xhm2.dat' % (mc_dir, fname), unpack=True)\n", (1419, 1481), True, 'import numpy as np\n'), ((1493, 1606), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_TT_%s_%s_TT_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_TT_%s_%s_TT_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (1503, 1606), True, 'import numpy as np\n'), ((1610, 1723), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_EE_%s_%s_EE_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_EE_%s_%s_EE_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (1620, 1723), True, 'import numpy as np\n'), ((1727, 1840), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_TE_%s_%s_TE_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_TE_%s_%s_TE_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (1737, 1840), True, 'import numpy as np\n'), ((1845, 1958), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_TT_%s_%s_EE_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_TT_%s_%s_EE_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (1855, 1958), True, 'import numpy as np\n'), ((1962, 2075), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_TT_%s_%s_TE_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_TT_%s_%s_TE_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (1972, 2075), True, 'import numpy as np\n'), ((2079, 2192), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_EE_%s_%s_TE_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_EE_%s_%s_TE_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (2089, 2192), True, 'import numpy as np\n'), ((2196, 2308), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_TT_%s_%s_r_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_TT_%s_%s_r_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (2206, 2308), True, 'import numpy as np\n'), ((2311, 2423), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_EE_%s_%s_r_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_EE_%s_%s_r_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (2321, 2423), True, 'import numpy as np\n'), ((2426, 2537), 'numpy.loadtxt', 'np.loadtxt', (["('%s/diagonal_select_cov_mat_r_%s_%s_r_%s_%s.dat' % (mc_dir, fname,\n 'hm1xhm2', fname, 'hm1xhm2'))"], {}), "('%s/diagonal_select_cov_mat_r_%s_%s_r_%s_%s.dat' % (mc_dir,\n fname, 'hm1xhm2', fname, 'hm1xhm2'))\n", (2436, 2537), True, 'import numpy as np\n'), ((2777, 2802), 'pylab.plot', 'plt.plot', (['l', 'cov_TTr', '"""o"""'], {}), "(l, cov_TTr, 'o')\n", (2785, 2802), True, 'import pylab as plt\n'), ((2805, 2828), 'pylab.plot', 'plt.plot', (['l', 'cov_TTr_th'], {}), '(l, cov_TTr_th)\n', (2813, 2828), True, 'import pylab as plt\n'), ((2832, 2842), 'pylab.show', 'plt.show', ([], {}), '()\n', (2840, 2842), True, 'import pylab as plt\n'), ((2848, 2873), 'pylab.plot', 'plt.plot', (['l', 'cov_EEr', '"""o"""'], {}), "(l, cov_EEr, 'o')\n", (2856, 2873), True, 'import pylab as plt\n'), ((2876, 2899), 'pylab.plot', 'plt.plot', (['l', 'cov_EEr_th'], {}), '(l, cov_EEr_th)\n', (2884, 2899), True, 'import pylab as plt\n'), ((2903, 2913), 'pylab.show', 'plt.show', ([], {}), '()\n', (2911, 2913), True, 'import pylab as plt\n'), ((1141, 1226), 'planck_utils.binning', 'planck_utils.binning', (['lth', 'model[spec, fname]', 'lthmax'], {'binning_file': 'binning_file'}), '(lth, model[spec, fname], lthmax, binning_file=binning_file\n )\n', (1161, 1226), False, 'import planck_utils\n'), ((1230, 1265), 'numpy.where', 'np.where', (['((lb > lmin) & (lb < lmax))'], {}), '((lb > lmin) & (lb < lmax))\n', (1238, 1265), True, 'import numpy as np\n'), ((1349, 1397), 'numpy.sqrt', 'np.sqrt', (["(model['TT', fname] * model['EE', fname])"], {}), "(model['TT', fname] * model['EE', fname])\n", (1356, 1397), True, 'import numpy as np\n')] |
import argparse
import sys
import random
import csv
import ujson
import re
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import wordvecdata as wvd
from sklearn.metrics import average_precision_score
import pdb
COLUMNS = ["node1", "node2", "node3"]
LABEL_COLUMN = "label"
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Hyper parameters
#TODO pass these from command line
num_classes = 2
batch_size = 100
learning_rate = 0.0001
PATH = "saved_models/best_model.pth"
CUR_PATH = "saved_models/cur_model.pth"
#Torch Dataset class to hold data
class LinksDataset(Dataset):
def __init__(self, features_array, labels_array, transform=torch.from_numpy):
"""
Args:
features_array:
labels_array:
transform:
"""
self.link_features = features_array
self.labels = labels_array
self.transform = transform
def __len__(self):
return len(self.link_features)
def __getitem__(self, idx):
link_features = self.link_features[idx]
label = self.labels[idx]
if self.transform:
link_features = self.transform(link_features)
return link_features, label
# model
class MLP(nn.Module):
def __init__(self, num_classes, input_dim=300):
super(MLP, self).__init__()
self.layer1 = nn.Linear(input_dim, 100)
self.layer1_nonlinearity = nn.ReLU()
self.output_layer = nn.Linear(100, 2)
self.softplus = nn.Softplus()
def forward(self, x):
out = self.layer1(x)
out = self.layer1_nonlinearity(out)
out = out.reshape(out.size(0), -1)
out = self.output_layer(out)
out = self.softplus(out)
return out
def get_input(df, embeddings,index_map, combination_method='hadamard', data_purpose='train'):
"""Build model inputs."""
dim_size = embeddings.shape[1]
features = []
# Converts the label column into a constant Tensor.
label_values = [0 if val_ == 0.0 else 1 for val_ in df[LABEL_COLUMN].values]
original_labels = np.array(label_values)
feature_cols = {}
column_tensors = []
col_keys = []
for i in COLUMNS:
if i == 'node1':
col_weight = 10.0
elif i == 'node2':
col_weight = 10.0
words = [value for value in df[i].values]
col_keys.append(words)
ids = [index_map[word] for word in words]
column_tensors.append([np.multiply(np.array(embeddings[id_]), col_weight) for id_ in ids])
keys = []
for entity1, entity2 in zip(col_keys[0], col_keys[1]):
keys.append("%s::%s" % (entity1, entity2))
assert(combination_method in ['hadamard','average', 'weighted_l1', 'weighted_l2', 'concatenate']), "Invalid combination Method %s" % combination_method
padding = np.array([0.0] * dim_size, dtype='float32')
print("Combining with {}.".format(combination_method))
features = column_tensors[0]
for i in range(1, len(column_tensors)):
if combination_method == 'hadamard':
features = np.multiply(features, column_tensors[i])
elif combination_method == 'average':
features = np.mean(np.array([ features, column_tensors[i] ]), axis=0)
elif combination_method == 'weighted_l1':
features = np.absolute(np.subtract(features, column_tensors[i]))
elif combination_method == 'weighted_l2':
features = np.square(np.absolute(np.subtract(features, column_tensors[i])))
elif combination_method == 'concatenate':
features = np.concatenate([features, column_tensors[i]], 1)
return features, np.array(label_values), keys
def build_model(input_dim):
"""Build model."""
model = MLP(num_classes, input_dim).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
return model, criterion, optimizer
def train_and_eval(train_epochs, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name,
eval_filename, unformed_filename, positive_labels, combination_method, method, c_lst, lbd_type, experiment_name, a, c, gold_b):
"""Train and evaluate the model."""
index_map, weights = wvd.load(test_embeddings_file_name)
#Get positive labels
positive_labels = positive_labels.split(',')
print("reading training data...")
train_file_name = train_data
df_train = pd.read_table(train_file_name, dtype={'train_nodes':str})
df_train = df_train.sample(frac=1)
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
#Get inputs
train_x, labels, _ = get_input(df_train, weights, index_map, combination_method)
train_loader = torch.utils.data.DataLoader(dataset=LinksDataset(train_x, labels), batch_size=batch_size, shuffle=True)
pos_labels = [l_ for l_ in labels if l_ != 0]
#Start loading evaluation data (same as test data for cancer cases)
print("reading eval data...")
test_file_name = test_data
df_test = pd.read_table(test_file_name, dtype={'train_nodes':str})
# remove NaN elements
df_test = df_test.dropna(how='any', axis=0)
test_x, test_labels, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')
test_loader = torch.utils.data.DataLoader(dataset=LinksDataset(test_x, test_labels), batch_size=batch_size, shuffle=False)
#End loading evaluation data
print("\nBuilding model...")
feature_dim = train_x[0].shape[0]
model, criterion, optimizer = build_model(feature_dim)
# Train the model
print("\nTraining model...")
total_step = len(train_loader)
best_info = {'best_rank':1000000000}
evaluate_every = 5
for epoch in range(train_epochs):
for i, (train_x, labels) in enumerate(train_loader):
labels = labels.type(torch.LongTensor)
links = train_x.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(links)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 500 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, train_epochs, i+1, total_step, loss.item()))
if (epoch + 1) % evaluate_every == 0:
#Save the current model
torch.save(model, CUR_PATH)
#Load the last saved best model
lmodel = torch.load(CUR_PATH)
lmodel.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
# Test the model
with torch.no_grad():
predictions = []
for test_links, _ in test_loader:
test_links = test_links.to(device)
outputs = lmodel(test_links)
predicted, _ = torch.max(outputs.data, 1)
predictions.extend([tensor.item() for tensor in predicted])
if lbd_type == 'closed_discovery_abc':
rank, ties, output = do_case_cd_evaluations_abc(a, c, gold_b, [p for p in predictions], [x for x in test_original_x], experiment_name)
if rank < best_info['best_rank'] and ties < 11:
print("Saving because {} < {}".format(rank, best_info['best_rank']))
torch.save(model, PATH)
best_info['case_name'] = experiment_name
best_info['best_rank'] = rank
best_info['loss_at_best'] = loss.item()
best_info['epoch'] = epoch + 1
best_info['output'] = output
fil = open("{}.txt".format(experiment_name), 'w')
fil.write(str(best_info))
fil.close()
else:
print("ERROR: Invalid lbd_type: {}".format(lbd_type))
def do_case_cd_evaluations_abc(a, c, gold_b, predictions, test_x, case_name):
scores_dict = {}
for x_link, score in zip(test_x, predictions):
entity1 = x_link.split('::')[0]
entity2 = x_link.split('::')[1]
if entity1 in [a, c]:
b = entity2
else:
b = entity1
if b in scores_dict:
scores_dict[b].append(score)
else:
scores_dict[b] = [score]
gold_score = scores_dict[gold_b]
rank = 1
ties = 0
for b, score in scores_dict.iteritems():
if b == gold_b:
continue
if score > gold_score:
rank += 1
if score == gold_score:
ties += 1
output = "Of {} Bs, gold B {} rank: {} [ties: {}] (score: {}).".format(len(scores_dict), gold_b, rank, ties, gold_score)
print(output)
return rank, ties, output
FLAGS = None
def main(_):
train_and_eval(FLAGS.train_epochs, FLAGS.train_data, FLAGS.test_data, FLAGS.train_embeddings_data, FLAGS.test_embeddings_data, FLAGS.eval_filename,
FLAGS.unformed_filename, FLAGS.positive_labels, FLAGS.combination_method, FLAGS.method, FLAGS.c_list, FLAGS.lbd_type, FLAGS.experiment_name, FLAGS.a_node,
FLAGS.c_node, FLAGS.goldb_node)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--train_epochs",
type=int,
default=10,
help="Number of training epochs."
)
parser.add_argument(
"--experiment_name",
type=str,
default="",
help="Name of this experiment instance."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to training examples."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
parser.add_argument(
"--train_embeddings_data",
type=str,
default="",
help="Path to the pre-trained embeddings file for training."
)
parser.add_argument(
"--test_embeddings_data",
type=str,
default="",
help="Path to the pre-trained embeddings file for testing."
)
parser.add_argument(
"--eval_filename",
type=str,
default="",
help="Path to file with evalution data."
)
parser.add_argument(
"--unformed_filename",
type=str,
default="",
help="Path to .json file with unformed edges."
)
parser.add_argument(
"--positive_labels",
type=str,
default="I-LINK",
help="Label of positive classes in data, separated by comma."
)
parser.add_argument(
"--combination_method",
type=str,
default="concatenate",
help="How the features should be combined by the model."
)
parser.add_argument(
"--graph_bipartite",
type=str,
default=False,
help="Process graph as bipartitie or not for Common Neighbours."
)
parser.add_argument(
"--method",
type=str,
default="",
help="Method used to create embeddings."
)
parser.add_argument(
"--c_list",
type=str,
default="",
help="Path to the list of Cs for open discovery."
)
parser.add_argument(
"--b_list",
type=str,
default="",
help="Path to the list of Bs for closed discovery."
)
parser.add_argument(
"--lbd_type",
type=str,
default="",
help="The type of discovery for LBD."
)
parser.add_argument(
"--a_node",
type=str,
default="",
help="name of 'A' node."
)
parser.add_argument(
"--c_node",
type=str,
default="",
help="name of 'C' node."
)
parser.add_argument(
"--goldb_node",
type=str,
default="",
help="name of 'B' node deemed correct for case."
)
parser.add_argument(
"--case_name",
type=str,
default="",
help="name of closed discovery case."
)
FLAGS, unparsed = parser.parse_known_args()
main(sys.argv[0])
| [
"torch.nn.Softplus",
"torch.nn.ReLU",
"numpy.multiply",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"torch.load",
"torch.max",
"wordvecdata.load",
"numpy.subtract",
"numpy.array",
"torch.cuda.is_available",
"pandas.read_table",
"torch.nn.Linear",
"torch.save",
"torch.no_grad"... | [((2179, 2201), 'numpy.array', 'np.array', (['label_values'], {}), '(label_values)\n', (2187, 2201), True, 'import numpy as np\n'), ((2926, 2969), 'numpy.array', 'np.array', (['([0.0] * dim_size)'], {'dtype': '"""float32"""'}), "([0.0] * dim_size, dtype='float32')\n", (2934, 2969), True, 'import numpy as np\n'), ((3909, 3930), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3928, 3930), True, 'import torch.nn as nn\n'), ((4352, 4387), 'wordvecdata.load', 'wvd.load', (['test_embeddings_file_name'], {}), '(test_embeddings_file_name)\n', (4360, 4387), True, 'import wordvecdata as wvd\n'), ((4549, 4607), 'pandas.read_table', 'pd.read_table', (['train_file_name'], {'dtype': "{'train_nodes': str}"}), "(train_file_name, dtype={'train_nodes': str})\n", (4562, 4607), True, 'import pandas as pd\n'), ((5151, 5208), 'pandas.read_table', 'pd.read_table', (['test_file_name'], {'dtype': "{'train_nodes': str}"}), "(test_file_name, dtype={'train_nodes': str})\n", (5164, 5208), True, 'import pandas as pd\n'), ((9427, 9452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9450, 9452), False, 'import argparse\n'), ((410, 435), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (433, 435), False, 'import torch\n'), ((1454, 1479), 'torch.nn.Linear', 'nn.Linear', (['input_dim', '(100)'], {}), '(input_dim, 100)\n', (1463, 1479), True, 'import torch.nn as nn\n'), ((1515, 1524), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1522, 1524), True, 'import torch.nn as nn\n'), ((1553, 1570), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(2)'], {}), '(100, 2)\n', (1562, 1570), True, 'import torch.nn as nn\n'), ((1595, 1608), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1606, 1608), True, 'import torch.nn as nn\n'), ((3734, 3756), 'numpy.array', 'np.array', (['label_values'], {}), '(label_values)\n', (3742, 3756), True, 'import numpy as np\n'), ((3172, 3212), 'numpy.multiply', 'np.multiply', (['features', 'column_tensors[i]'], {}), '(features, column_tensors[i])\n', (3183, 3212), True, 'import numpy as np\n'), ((6596, 6623), 'torch.save', 'torch.save', (['model', 'CUR_PATH'], {}), '(model, CUR_PATH)\n', (6606, 6623), False, 'import torch\n'), ((6689, 6709), 'torch.load', 'torch.load', (['CUR_PATH'], {}), '(CUR_PATH)\n', (6699, 6709), False, 'import torch\n'), ((6870, 6885), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6883, 6885), False, 'import torch\n'), ((2576, 2601), 'numpy.array', 'np.array', (['embeddings[id_]'], {}), '(embeddings[id_])\n', (2584, 2601), True, 'import numpy as np\n'), ((3286, 3325), 'numpy.array', 'np.array', (['[features, column_tensors[i]]'], {}), '([features, column_tensors[i]])\n', (3294, 3325), True, 'import numpy as np\n'), ((7109, 7135), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (7118, 7135), False, 'import torch\n'), ((7593, 7616), 'torch.save', 'torch.save', (['model', 'PATH'], {}), '(model, PATH)\n', (7603, 7616), False, 'import torch\n'), ((3418, 3458), 'numpy.subtract', 'np.subtract', (['features', 'column_tensors[i]'], {}), '(features, column_tensors[i])\n', (3429, 3458), True, 'import numpy as np\n'), ((3663, 3711), 'numpy.concatenate', 'np.concatenate', (['[features, column_tensors[i]]', '(1)'], {}), '([features, column_tensors[i]], 1)\n', (3677, 3711), True, 'import numpy as np\n'), ((3551, 3591), 'numpy.subtract', 'np.subtract', (['features', 'column_tensors[i]'], {}), '(features, column_tensors[i])\n', (3562, 3591), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import sys, os
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, './PythonAPI/')
# sys.path.insert(0, os.path.abspath('data'))
for _ in sys.path:
print (_)
from PythonAPI.pycocotools.coco import COCO
import cv2
import numpy as np
import os
from libs.label_name_dict import coco_dict
annotation_path = '/home/yjr/DataSet/COCO/2017/annotations/instances_train2017.json'
print ("load coco .... it will cost about 17s..")
coco = COCO(annotation_path)
imgId_list = coco.getImgIds()
imgId_list = np.array(imgId_list)
total_imgs = len(imgId_list)
# print (NAME_LABEL_DICT)
def next_img(step):
if step % total_imgs == 0:
np.random.shuffle(imgId_list)
imgid = imgId_list[step % total_imgs]
imgname = coco.loadImgs(ids=[imgid])[0]['file_name']
# print (type(imgname), imgname)
img = cv2.imread(os.path.join("/home/yjr/DataSet/COCO/2017/train2017", imgname))
annotation = coco.imgToAnns[imgid]
gtbox_and_label_list = []
for ann in annotation:
box = ann['bbox']
box = [box[0], box[1], box[0]+box[2], box[1]+box[3]] # [xmin, ymin, xmax, ymax]
cat_id = ann['category_id']
cat_name = coco_dict.originID_classes[cat_id] #ID_NAME_DICT[cat_id]
label = coco_dict.NAME_LABEL_MAP[cat_name]
gtbox_and_label_list.append(box + [label])
gtbox_and_label_list = np.array(gtbox_and_label_list, dtype=np.int32)
# print (img.shape, gtbox_and_label_list.shape)
if gtbox_and_label_list.shape[0] == 0:
return next_img(step+1)
else:
return imgid, img[:, :, ::-1], gtbox_and_label_list
if __name__ == '__main__':
imgid, img, gtbox = next_img(3234)
print("::")
from libs.box_utils.draw_box_in_img import draw_boxes_with_label_and_scores
img = draw_boxes_with_label_and_scores(img_array=img, boxes=gtbox[:, :-1], labels=gtbox[:, -1],
scores=np.ones(shape=(len(gtbox), )))
print ("_----")
cv2.imshow("test", img)
cv2.waitKey(0)
| [
"PythonAPI.pycocotools.coco.COCO",
"sys.path.insert",
"os.path.join",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"numpy.random.shuffle"
] | [((155, 189), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./PythonAPI/"""'], {}), "(0, './PythonAPI/')\n", (170, 189), False, 'import sys, os\n'), ((553, 574), 'PythonAPI.pycocotools.coco.COCO', 'COCO', (['annotation_path'], {}), '(annotation_path)\n', (557, 574), False, 'from PythonAPI.pycocotools.coco import COCO\n'), ((622, 642), 'numpy.array', 'np.array', (['imgId_list'], {}), '(imgId_list)\n', (630, 642), True, 'import numpy as np\n'), ((1495, 1541), 'numpy.array', 'np.array', (['gtbox_and_label_list'], {'dtype': 'np.int32'}), '(gtbox_and_label_list, dtype=np.int32)\n', (1503, 1541), True, 'import numpy as np\n'), ((2134, 2157), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'img'], {}), "('test', img)\n", (2144, 2157), False, 'import cv2\n'), ((2163, 2177), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2174, 2177), False, 'import cv2\n'), ((772, 801), 'numpy.random.shuffle', 'np.random.shuffle', (['imgId_list'], {}), '(imgId_list)\n', (789, 801), True, 'import numpy as np\n'), ((965, 1027), 'os.path.join', 'os.path.join', (['"""/home/yjr/DataSet/COCO/2017/train2017"""', 'imgname'], {}), "('/home/yjr/DataSet/COCO/2017/train2017', imgname)\n", (977, 1027), False, 'import os\n')] |
'''PyTorch module.'''
import os
import numpy as np
from PIL import Image
import torch
class ImageDataset(torch.utils.data.Dataset):
'''Pytoch dataset for images.'''
def __init__(self, images, labels=None, label_dirname=False, resize=None, shape='chw', transform=None, preload=False, preload_limit=np.inf):
'''
Parameters
----------
images : list
List of image file paths.
labels : list, optional
List of image labels (default: image file names).
label_dirname : bool, optional
Use directory names as labels if True (default: False).
resize : None or tuple, optional
If not None, images will be resized by the specified size.
shape : str ({'chw', 'hwc', ...}), optional
Specify array shape (channel, hieght, and width).
transform : optional
Transformers (applied after resizing, reshaping, ans scaling to [0, 1])
preload : bool, optional
Pre-load images (default: False).
preload_limit : int
Memory size limit of preloading in GiB (default: unlimited).
Note
----
- Images are converted to RGB. Alpha channels in RGBA images are ignored.
'''
self.transform = transform
# Custom transforms
self.__shape = shape
self.__resize = resize
self.__data = {}
preload_size = 0
image_labels = []
for i, imf in enumerate(images):
# TODO: validate the image file
if label_dirname:
image_labels.append(os.path.basename(os.path.dirname(imf)))
else:
image_labels.append(os.path.basename(imf))
if preload:
data = self.__load_image(imf)
data_size = data.size * data.itemsize
if preload_size + data_size > preload_limit * (1024 ** 3):
preload = False
continue
self.__data.update({i: data})
preload_size += data_size
self.data_path = images
if not labels is None:
self.labels = labels
else:
self.labels = image_labels
self.n_sample = len(images)
def __len__(self):
return self.n_sample
def __getitem__(self, idx):
if idx in self.__data:
data = self.__data[idx]
else:
data = self.__load_image(self.data_path[idx])
if not self.transform is None:
date = self.transform(data)
else:
data = torch.Tensor(data)
label = self.labels[idx]
return data, label
def __load_image(self, fpath):
img = Image.open(fpath)
# CMYK, RGBA --> RGB
if img.mode == 'CMYK':
img = img.convert('RGB')
if img.mode == 'RGBA':
bg = Image.new('RGB', img.size, (255, 255, 255))
bg.paste(img, mask=img.split()[3])
img = bg
data = np.asarray(img)
# Monotone to RGB
if data.ndim == 2:
data = np.stack([data, data, data], axis=2)
# Resize the image
if not self.__resize is None:
data = np.array(Image.fromarray(data).resize(self.__resize, resample=2)) # bicubic
# Reshape
s2d = {'h': 0, 'w': 1, 'c': 2}
data = data.transpose((s2d[self.__shape[0]],
s2d[self.__shape[1]],
s2d[self.__shape[2]]))
# Scaling to [0, 1]
data = data / 255.
return data
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"PIL.Image.new",
"numpy.asarray",
"torch.Tensor",
"numpy.stack",
"os.path.dirname",
"os.path.basename"
] | [((2747, 2764), 'PIL.Image.open', 'Image.open', (['fpath'], {}), '(fpath)\n', (2757, 2764), False, 'from PIL import Image\n'), ((3039, 3054), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (3049, 3054), True, 'import numpy as np\n'), ((2616, 2634), 'torch.Tensor', 'torch.Tensor', (['data'], {}), '(data)\n', (2628, 2634), False, 'import torch\n'), ((2911, 2954), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'img.size', '(255, 255, 255)'], {}), "('RGB', img.size, (255, 255, 255))\n", (2920, 2954), False, 'from PIL import Image\n'), ((3128, 3164), 'numpy.stack', 'np.stack', (['[data, data, data]'], {'axis': '(2)'}), '([data, data, data], axis=2)\n', (3136, 3164), True, 'import numpy as np\n'), ((1717, 1738), 'os.path.basename', 'os.path.basename', (['imf'], {}), '(imf)\n', (1733, 1738), False, 'import os\n'), ((1640, 1660), 'os.path.dirname', 'os.path.dirname', (['imf'], {}), '(imf)\n', (1655, 1660), False, 'import os\n'), ((3259, 3280), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (3274, 3280), False, 'from PIL import Image\n')] |
from dimredu.eRPCAviaADMMFast import eRPCA as eRPCASparse
import numpy as np
def denseToSparse(M, E):
assert M.shape == E.shape, 'shape mismatch'
m = M.shape[0]
n = M.shape[1]
u = np.empty([m * n])
v = np.empty([m * n])
vecM = np.empty([m * n])
vecE = np.empty([m * n])
k = 0
for i in range(m):
for j in range(n):
u[k] = i
v[k] = j
vecM[k] = M[i, j]
vecE[k] = E[i, j]
k += 1
return m, n, u, v, vecM, vecE
def eRPCA(M, E, **kw):
m, n, u, v, vecM, vecE = denseToSparse(M, E)
maxRank = np.min(M.shape)
return eRPCASparse(m, n, u, v, vecM, vecE, maxRank, **kw)
def test_small():
X = np.random.random(size=[5, 15])
E = np.ones(X.shape)*1e-6
eRPCA(X, E)
if __name__ == '__main__':
test_small()
| [
"numpy.ones",
"numpy.random.random",
"dimredu.eRPCAviaADMMFast.eRPCA",
"numpy.empty",
"numpy.min"
] | [((208, 225), 'numpy.empty', 'np.empty', (['[m * n]'], {}), '([m * n])\n', (216, 225), True, 'import numpy as np\n'), ((235, 252), 'numpy.empty', 'np.empty', (['[m * n]'], {}), '([m * n])\n', (243, 252), True, 'import numpy as np\n'), ((265, 282), 'numpy.empty', 'np.empty', (['[m * n]'], {}), '([m * n])\n', (273, 282), True, 'import numpy as np\n'), ((295, 312), 'numpy.empty', 'np.empty', (['[m * n]'], {}), '([m * n])\n', (303, 312), True, 'import numpy as np\n'), ((634, 649), 'numpy.min', 'np.min', (['M.shape'], {}), '(M.shape)\n', (640, 649), True, 'import numpy as np\n'), ((662, 712), 'dimredu.eRPCAviaADMMFast.eRPCA', 'eRPCASparse', (['m', 'n', 'u', 'v', 'vecM', 'vecE', 'maxRank'], {}), '(m, n, u, v, vecM, vecE, maxRank, **kw)\n', (673, 712), True, 'from dimredu.eRPCAviaADMMFast import eRPCA as eRPCASparse\n'), ((745, 775), 'numpy.random.random', 'np.random.random', ([], {'size': '[5, 15]'}), '(size=[5, 15])\n', (761, 775), True, 'import numpy as np\n'), ((785, 801), 'numpy.ones', 'np.ones', (['X.shape'], {}), '(X.shape)\n', (792, 801), True, 'import numpy as np\n')] |
import json as j
import pandas as pd
import re
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, chi2
json_data = None
with open('../data/yelp_academic_dataset_review.json') as data_file:
lines = data_file.readlines()
joined_lines = "[" + ",".join(lines) + "]"
json_data = j.loads(joined_lines)
data = pd.DataFrame(json_data)
stemmer = SnowballStemmer('english')
words = stopwords.words("english")
data['cleaned'] = data['text'].apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split() if i not in words]).lower())
X_train, X_test, y_train, y_test = train_test_split(data['cleaned'], data.stars, test_size=0.2)
pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, 2), stop_words="english", sublinear_tf=True)),
('chi', SelectKBest(chi2, k=10000)),
('clf', LinearSVC(C=1.0, penalty='l1', max_iter=3000, dual=False))])
model = pipeline.fit(X_train, y_train)
vectorizer = model.named_steps['vect']
chi = model.named_steps['chi']
clf = model.named_steps['clf']
feature_names = vectorizer.get_feature_names()
feature_names = [feature_names[i] for i in chi.get_support(indices=True)]
feature_names = np.asarray(feature_names)
target_names = ['1', '2', '3', '4', '5']
print("top 10 keywords per class:")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print("%s: %s" % (label, " ".join(feature_names[top10])))
print("accuracy score: " + str(model.score(X_test, y_test)))
print(model.predict(['that was an awesome place. Great food!']))
| [
"json.loads",
"nltk.stem.SnowballStemmer",
"nltk.corpus.stopwords.words",
"sklearn.model_selection.train_test_split",
"sklearn.svm.LinearSVC",
"numpy.asarray",
"numpy.argsort",
"sklearn.feature_selection.SelectKBest",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.DataFrame",
"re.sub... | [((595, 618), 'pandas.DataFrame', 'pd.DataFrame', (['json_data'], {}), '(json_data)\n', (607, 618), True, 'import pandas as pd\n'), ((630, 656), 'nltk.stem.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (645, 656), False, 'from nltk.stem import SnowballStemmer\n'), ((665, 691), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (680, 691), False, 'from nltk.corpus import stopwords\n'), ((876, 936), 'sklearn.model_selection.train_test_split', 'train_test_split', (["data['cleaned']", 'data.stars'], {'test_size': '(0.2)'}), "(data['cleaned'], data.stars, test_size=0.2)\n", (892, 936), False, 'from sklearn.model_selection import train_test_split\n'), ((1477, 1502), 'numpy.asarray', 'np.asarray', (['feature_names'], {}), '(feature_names)\n', (1487, 1502), True, 'import numpy as np\n'), ((565, 586), 'json.loads', 'j.loads', (['joined_lines'], {}), '(joined_lines)\n', (572, 586), True, 'import json as j\n'), ((1634, 1658), 'numpy.argsort', 'np.argsort', (['clf.coef_[i]'], {}), '(clf.coef_[i])\n', (1644, 1658), True, 'import numpy as np\n'), ((968, 1044), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'stop_words': '"""english"""', 'sublinear_tf': '(True)'}), "(ngram_range=(1, 2), stop_words='english', sublinear_tf=True)\n", (983, 1044), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1077, 1103), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['chi2'], {'k': '(10000)'}), '(chi2, k=10000)\n', (1088, 1103), False, 'from sklearn.feature_selection import SelectKBest, chi2\n'), ((1135, 1192), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1.0)', 'penalty': '"""l1"""', 'max_iter': '(3000)', 'dual': '(False)'}), "(C=1.0, penalty='l1', max_iter=3000, dual=False)\n", (1144, 1192), False, 'from sklearn.svm import LinearSVC\n'), ((775, 802), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'x'], {}), "('[^a-zA-Z]', ' ', x)\n", (781, 802), False, 'import re\n')] |
#!/usr/bin/python3
import colorsys
import copy
from matplotlib import pyplot as plt
import numpy as np
import pickle
import pygame
from pygame import surfarray
import random
import scipy.misc
import PIL
from PIL import Image
from scipy.stats import logistic
pygame.init()
plt.ion()
# gui = 'console'
# gui = 'headless'
gui = 'pygame'
max_energy = 0
efficiency = 0.6
energy_drain = 1.0 # 0.99
plot_data = True
save_data = True
save_video_frames = True
save_history_images = True
testing = True
# testing = False
fast = False
if fast:
plot_data = False
save_video_frames = False
save_history_images = False
save_data = False
# Like loki1Dv
plot_data = False
save_data = False
save_video_frames = False
save_history_images = False
testing = True
if gui == 'headless':
plot_data = False
if testing:
land_size = 320 * 2
history = 240 * 2
else:
#land_size = 1680
#history = 1050
land_size = 840
history = 525
display_w = land_size # * 2
display_h = history # * 2
num_resources = 2
resources = np.zeros(num_resources)
resources[0] = 0.
resources[1] = 0.
resource_mutability = np.ones(resources.shape) * 0.00 # 2
sqrt_2_pi = np.sqrt(2 * np.pi)
# pygame.display.toggle_fullscreen()
if gui == 'pygame':
if testing:
display = pygame.display.set_mode((display_w, display_h))
else:
display = pygame.display.set_mode((display_w, display_h),pygame.FULLSCREEN)
class Agent(object):
def __init__(self, resource_size):
# Mutable data
self._means = np.random.uniform(size=resource_size)
self._sigmas = np.ones(resource_size) * 4
self._mutability_means = np.random.uniform(size=resource_size)
self._mutability_sigmas = np.random.uniform(size=resource_size)
self._reproduction_threshold = np.random.uniform() * 5
self._colour = np.random.uniform(size=(3,))
# self._mutation_level_means = 0.1
# self._mutation_level_sigmas = 0.1
self._mutation_level_repro = np.random.uniform()
# Params
self._energy = 0
self._mutation_level_colour = 0.01
self._age = 0
self._gen = 0
#print(self._means)
#print(self._sigmas)
def extract_energy(self, resources):
global max_energy
# env is list of resources
dist_squared = np.square(self._means - resources)
energy = (
(np.exp(-dist_squared / (2*self._sigmas*self._sigmas)))
/ (self._sigmas * sqrt_2_pi))
# print('energy', dist_squared, self._sigmas, energy)
self._energy += energy.max() * efficiency
self._energy *= energy_drain
# self._energy = min(max(self._energy, 0.), max_energy)
if self._energy > max_energy:
max_energy = self._energy
# print('max_energy = ', max_energy)
self._age += 1
def reproduce_stochastic(self, agents, neighbour_indices):
if self._energy >= self._reproduction_threshold:
choose = random.sample(range(len(neighbour_indices)), 1)[0]
neighbour_idx = neighbour_indices[choose]
prob = logistic.cdf(self._energy - 6)
if np.random.uniform() < prob:
agents[neighbour_idx] = self._make_offspring()
# else:
# self._energy /= 2
def reproduce_energy(self, agents, neighbour_indices):
if self._energy >= self._reproduction_threshold:
# if self._age >= self._reproduction_threshold:
choose = random.sample(range(len(neighbour_indices)), 1)[0]
neighbour_idx = neighbour_indices[choose]
if (agents[neighbour_idx] is None
or agents[neighbour_idx]._energy < self._energy):
agents[neighbour_idx] = self._make_offspring()
def _make_offspring(self):
clone = copy.deepcopy(self)
clone.mutate()
clone._energy /= 2
clone._gen += 1
clone._age = 0
self._energy /= 2
self._age = 0
return clone
def mutate(self):
mutate_array(self._means, self._mutability_means)
mutate_array(self._sigmas, self._mutability_sigmas, lower=1.0)
self._reproduction_threshold = mutate_value(
self._reproduction_threshold , self._mutation_level_repro,
lower=0.0)
mutate_array(self._colour, self._mutation_level_colour,
lower=0.0, higher=1.0, reflect=True)
mutate_array(self._mutability_means, 0.01, lower=0.0, higher=1.0,
reflect=True)
mutate_array(self._mutability_sigmas, 0.01, lower=0.0, higher=1.0,
reflect=True)
# self._mutation_level_means = mutate_value(
# self._mutation_level_means, 0.01, lower=0.0)
# self._mutation_level_sigmas = mutate_value(
# self._mutation_level_sigmas, 0.01, lower=0.0)
self._mutation_level_repro = mutate_value(
self._mutation_level_repro, 0.01, lower=0.0)
def gen_to_char(self):
chars = list('.:-=+*#%@')
# '.,-+*oO$0#$@
return chars[self._gen % len(chars)]
def energy_to_char(self):
return str(int(self._energy))
def __rgb(self):
return self._colour * 255
@property
def rgb(self):
# RGB modulated by energy
return self._colour[:] * (self._energy/max_energy)
@property
def irgb(self):
# er = 0.9
# self._colour[2] = ((self._energy * er)/max_energy) + (1-er)
er = 1.0
self._colour[2] = self._colour[2] * er + (1-er)
# self._colour[2] = 1-(((self._energy * er)/max_energy) + (1-er))
sr = 1.0
self._colour[1] = self._colour[1] * sr + (1-sr)
return np.array(colorsys.hsv_to_rgb(*self._colour.tolist()))
def _rgb(self):
v = self._energy / max_energy
s = self._colour[1]
return np.array(colorsys.hsv_to_rgb(
self._colour[0], self._colour[1], v)) * 255
def mutate_array(arr, level, lower=None, higher=None, reflect=False):
arr += (np.random.normal(size=arr.shape) * level)
if lower is not None:
if reflect:
arr[arr < lower] = 2 * lower - arr[arr < lower]
else:
arr[arr < lower] = lower
if higher is not None:
if reflect:
arr[arr > higher] = 2 * higher - arr[arr > higher]
else:
arr[arr > higher] = higher
return arr
def mutate_value(val, level, lower=None, higher=None):
val += np.random.normal() * level
if lower is not None and val < lower:
val = lower
if higher is not None and val > higher:
val = higher
return val
# agents = [Agent(len(resources)) for _ in range(10)]
world = np.zeros((land_size, num_resources))
# agents = [None for _ in range(land_size)]
agents = [Agent(num_resources) for _ in range(land_size)]
agents[int(land_size/2)] = Agent(num_resources)
# import pdb; pdb.set_trace()
bitmap = np.zeros((land_size, history ,3)).astype(np.uint8)
def step_world(world, incoming_resource, agents):
world[:] = incoming_resource
# print('incoming', incoming_resource)
world_size = world.shape[0]
indices = list(range(world_size))
random.shuffle(indices)
for pos in indices:
agent = agents[pos]
resource = world[pos]
if agent is not None:
agent.extract_energy(resource)
deaths = 0
for pos in indices:
agent = agents[pos]
if agent is not None:
if pos == 0:
neighbour_indices = [1]
elif pos == world_size - 1:
neighbour_indices = [world_size - 2]
else:
neighbour_indices = [pos - 1, pos + 1]
agent.reproduce_stochastic(agents, neighbour_indices)
# if agent._energy < max_energy / 10:
# agents[pos] = None
# deaths += 1
# print(agent._sigmas)
# print(agent._means)
if deaths > 0:
print('deaths', deaths)
def show(world, agents):
out = ''
for i in range(world.shape[0]):
if agents[i] is not None:
out += agents[i].gen_to_char()
else:
out += ' '
out += ' | '
for i in range(world.shape[0]):
if agents[i] is not None:
out += agents[i].energy_to_char()
else:
out += ' '
print(out)
def draw_agents(bitmap, row, world, agents):
bitmap[:,row] = np.zeros((3), dtype=np.uint8)
for i in range(world.shape[0]):
if agents[i] is not None:
bitmap[i,row] = agents[i].rgb * 255
def draw_agents_roll(bitmap, world, agents):
bitmap = np.roll(bitmap, 1, axis=1)
bitmap[:,0] = np.zeros((3), dtype=np.uint8)
for i in range(world.shape[0]):
if agents[i] is not None:
bitmap[i,0] = agents[i].rgb * 255
return bitmap
# print(bitmap[:,row])
def get_data(agents):
means = []
mut_means = []
sigmas = []
mut_sigmas = []
reproduction_threshold = []
energy = []
for agent in agents:
if agent is not None:
means.append(agent._means)
mut_means.append(agent._mutability_means)
sigmas.append(agent._sigmas)
mut_sigmas.append(agent._mutability_sigmas)
reproduction_threshold.append(agent._reproduction_threshold)
energy.append(agent._energy)
return dict(means=means, sigmas=sigmas,
reproduction_threshold=reproduction_threshold,
energy=energy, mut_means=mut_means, mut_sigmas=mut_sigmas)
def stats(vals):
vals = np.array(vals)
return vals.min(), vals.mean(), vals.max()
stop = False
#for t in range(1000):
energy_hist = []
max_energy_hist = []
repo_hist = []
mut_means_hist = []
mut_sigmas_hist = []
t = 0
current_res = np.zeros_like(resources)
with open('output/loki_data_t{}.pkl'.format(t), 'wb') as handle:
data = get_data(agents)
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
two_pi = 2 * np.pi
while True:
changed = False
if np.random.uniform() < resource_mutability[0]:
resources[0] = np.random.uniform(-1,1) * 5
# resources[0] += np.random.uniform(-1,1) * 5
# resources[0] += np.random.normal() * 5
# resources[0] += np.random.standard_cauchy() * 5
changed = True
if np.random.uniform() < resource_mutability[1]:
resources[1] = np.random.uniform(-1,1) * 5
# resources[1] += np.random.uniform(-1,1) * 5
# resources[1] += np.random.normal() * 5
# resources[1] += np.random.standard_cauchy() * 5
changed = True
current_res[0] = resources[0]
current_res[1] = resources[1]
if changed:
print('Resources at {} = {} (mutabilitt {})'.format(
t, current_res, resource_mutability))
#current_res[0] = (np.sin((t*two_pi)/300.) + 1.) / 2 + resources[0]
#current_res[1] = (np.cos((t*two_pi)/500.) + 1.) / 2 + resources[1]
# resources[2] = (np.cos((t*two_pi)/800.) + 1.)
# resources[3] = (np.cos((t*two_pi)/1300.) + 1.)
# resources[4] = (np.cos((t*two_pi)/2100.) + 1.)
# resources[5] = (np.cos((t*two_pi)/3400.) + 1.)
if gui == 'pygame':
for event in pygame.event.get():
if event.type == pygame.QUIT:
stop = True
break
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
stop = True
break
if stop:
break
max_energy *= 0.99
step_world(world, current_res, agents)
if not fast:
bitmap = draw_agents_roll(bitmap, world, agents)
else:
draw_agents(bitmap, t % history, world, agents)
if gui == 'console':
show(world, agents)
elif gui == 'pygame':
# pygame.transform.scale(final_surf, (width*scale, height*scale), DISPLAYSURF)
if not fast:
bbitmap = scipy.misc.imresize(bitmap, (display_w, display_h),
interp='nearest')
else:
bbitmap = bitmap
surfarray.blit_array(display, bbitmap)
pygame.display.flip()
if save_video_frames and t % int(history/8) == 0:
img = Image.fromarray(bitmap.swapaxes(0,1))
img = img.resize((img.width * 2, img.height * 2))
img.save('output/loki_frame_t{:09d}.png'.format(t))
if t % history == history - 1:
if save_history_images:
img = Image.fromarray(bitmap.swapaxes(0,1))
img = img.resize((img.width * 2, img.height * 2))
img.save('output/loki_image_t{:09d}.png'.format(t))
if plot_data or save_data:
data = get_data(agents)
agg = True
if agg:
energy_hist.append(stats(data['energy']))
repo_hist.append(stats(data['reproduction_threshold']))
mut_means_hist.append([stats(np.array(data['mut_means'])[:,0])[1],
stats(np.array(data['mut_means'])[:,1])[1]])
mut_sigmas_hist.append(stats(data['mut_sigmas']))
else:
energy_hist.append(data['energy'])
repo_hist.append(data['reproduction_threshold'])
mut_means_hist.append(
np.concatenate(
(np.array(data['mut_means'])[:,0],
np.array(data['mut_means'])[:,1])))
mut_sigmas_hist.append(np.concatenate(
(np.array(data['mut_sigmas'])[:,0],
np.array(data['mut_sigmas'])[:,1])))
max_energy_hist.append(max_energy)
data['energy_hist'] = energy_hist
data['max_energy_hist'] = max_energy_hist
data['repo_hist'] = repo_hist
data['mut_means_hist'] = mut_means_hist
data['mut_sigmas_hist'] = mut_sigmas_hist
if save_data:
with open(
'output/loki_data_t{:09d}.pkl'.format(t), 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
if plot_data:
ax = plt.subplot(3,2,1)
ax.plot(energy_hist)
ax.plot(max_energy_hist)
ax.set_title('energy')
ax = plt.subplot(3,2,2)
ax.plot(repo_hist)
ax.set_title('repro threshold')
ax = plt.subplot(3,2,3)
means = np.array(data['means'])
ax.scatter(means[:,0], means[:,1])
ax.scatter(current_res[0], current_res[1])
ax.set_title('means')
ax = plt.subplot(3,2,4)
sigmas = np.array(data['sigmas'])
ax.scatter(sigmas[:,0], sigmas[:,1])
ax.set_title('sigmas')
ax = plt.subplot(3,2,5)
means = np.array(data['mut_means_hist'])
ax.plot(means)
ax.set_title('mut means')
ax.set_ylim([0, 1])
ax = plt.subplot(3,2,6)
means = np.array(data['mut_sigmas_hist'])
ax.plot(means)
ax.set_title('mut sigmas')
ax.set_ylim([0, 1])
plt.tight_layout()
plt.draw()
plt.pause(0.0001)
plt.savefig('output/loki_plot_t{:09d}.png'.format(t))
plt.clf()
t += 1
if t == 500:
break
| [
"numpy.sqrt",
"pygame.init",
"colorsys.hsv_to_rgb",
"numpy.array",
"pygame.surfarray.blit_array",
"copy.deepcopy",
"pygame.display.set_mode",
"pygame.display.flip",
"scipy.stats.logistic.cdf",
"numpy.exp",
"numpy.random.normal",
"numpy.ones",
"random.shuffle",
"numpy.square",
"matplotlib... | [((260, 273), 'pygame.init', 'pygame.init', ([], {}), '()\n', (271, 273), False, 'import pygame\n'), ((274, 283), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (281, 283), True, 'from matplotlib import pyplot as plt\n'), ((1045, 1068), 'numpy.zeros', 'np.zeros', (['num_resources'], {}), '(num_resources)\n', (1053, 1068), True, 'import numpy as np\n'), ((1177, 1195), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1184, 1195), True, 'import numpy as np\n'), ((6754, 6790), 'numpy.zeros', 'np.zeros', (['(land_size, num_resources)'], {}), '((land_size, num_resources))\n', (6762, 6790), True, 'import numpy as np\n'), ((9843, 9867), 'numpy.zeros_like', 'np.zeros_like', (['resources'], {}), '(resources)\n', (9856, 9867), True, 'import numpy as np\n'), ((1128, 1152), 'numpy.ones', 'np.ones', (['resources.shape'], {}), '(resources.shape)\n', (1135, 1152), True, 'import numpy as np\n'), ((7233, 7256), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (7247, 7256), False, 'import random\n'), ((8482, 8509), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.uint8'}), '(3, dtype=np.uint8)\n', (8490, 8509), True, 'import numpy as np\n'), ((8689, 8715), 'numpy.roll', 'np.roll', (['bitmap', '(1)'], {'axis': '(1)'}), '(bitmap, 1, axis=1)\n', (8696, 8715), True, 'import numpy as np\n'), ((8734, 8761), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.uint8'}), '(3, dtype=np.uint8)\n', (8742, 8761), True, 'import numpy as np\n'), ((9630, 9644), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (9638, 9644), True, 'import numpy as np\n'), ((9966, 10025), 'pickle.dump', 'pickle.dump', (['data', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (9977, 10025), False, 'import pickle\n'), ((1288, 1335), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(display_w, display_h)'], {}), '((display_w, display_h))\n', (1311, 1335), False, 'import pygame\n'), ((1364, 1430), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(display_w, display_h)', 'pygame.FULLSCREEN'], {}), '((display_w, display_h), pygame.FULLSCREEN)\n', (1387, 1430), False, 'import pygame\n'), ((1537, 1574), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'resource_size'}), '(size=resource_size)\n', (1554, 1574), True, 'import numpy as np\n'), ((1658, 1695), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'resource_size'}), '(size=resource_size)\n', (1675, 1695), True, 'import numpy as np\n'), ((1730, 1767), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'resource_size'}), '(size=resource_size)\n', (1747, 1767), True, 'import numpy as np\n'), ((1854, 1882), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3,)'}), '(size=(3,))\n', (1871, 1882), True, 'import numpy as np\n'), ((2007, 2026), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2024, 2026), True, 'import numpy as np\n'), ((2340, 2374), 'numpy.square', 'np.square', (['(self._means - resources)'], {}), '(self._means - resources)\n', (2349, 2374), True, 'import numpy as np\n'), ((3854, 3873), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3867, 3873), False, 'import copy\n'), ((6102, 6134), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'arr.shape'}), '(size=arr.shape)\n', (6118, 6134), True, 'import numpy as np\n'), ((6526, 6544), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (6542, 6544), True, 'import numpy as np\n'), ((6981, 7014), 'numpy.zeros', 'np.zeros', (['(land_size, history, 3)'], {}), '((land_size, history, 3))\n', (6989, 7014), True, 'import numpy as np\n'), ((10085, 10104), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (10102, 10104), True, 'import numpy as np\n'), ((10373, 10392), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (10390, 10392), True, 'import numpy as np\n'), ((11252, 11270), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (11268, 11270), False, 'import pygame\n'), ((1598, 1620), 'numpy.ones', 'np.ones', (['resource_size'], {}), '(resource_size)\n', (1605, 1620), True, 'import numpy as np\n'), ((1807, 1826), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1824, 1826), True, 'import numpy as np\n'), ((2411, 2468), 'numpy.exp', 'np.exp', (['(-dist_squared / (2 * self._sigmas * self._sigmas))'], {}), '(-dist_squared / (2 * self._sigmas * self._sigmas))\n', (2417, 2468), True, 'import numpy as np\n'), ((3139, 3169), 'scipy.stats.logistic.cdf', 'logistic.cdf', (['(self._energy - 6)'], {}), '(self._energy - 6)\n', (3151, 3169), False, 'from scipy.stats import logistic\n'), ((10154, 10178), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (10171, 10178), True, 'import numpy as np\n'), ((10442, 10466), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (10459, 10466), True, 'import numpy as np\n'), ((12100, 12138), 'pygame.surfarray.blit_array', 'surfarray.blit_array', (['display', 'bbitmap'], {}), '(display, bbitmap)\n', (12120, 12138), False, 'from pygame import surfarray\n'), ((12147, 12168), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (12166, 12168), False, 'import pygame\n'), ((14149, 14169), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (14160, 14169), True, 'from matplotlib import pyplot as plt\n'), ((14290, 14310), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (14301, 14310), True, 'from matplotlib import pyplot as plt\n'), ((14401, 14421), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (14412, 14421), True, 'from matplotlib import pyplot as plt\n'), ((14440, 14463), 'numpy.array', 'np.array', (["data['means']"], {}), "(data['means'])\n", (14448, 14463), True, 'import numpy as np\n'), ((14617, 14637), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (14628, 14637), True, 'from matplotlib import pyplot as plt\n'), ((14657, 14681), 'numpy.array', 'np.array', (["data['sigmas']"], {}), "(data['sigmas'])\n", (14665, 14681), True, 'import numpy as np\n'), ((14783, 14803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (14794, 14803), True, 'from matplotlib import pyplot as plt\n'), ((14822, 14854), 'numpy.array', 'np.array', (["data['mut_means_hist']"], {}), "(data['mut_means_hist'])\n", (14830, 14854), True, 'import numpy as np\n'), ((14969, 14989), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (14980, 14989), True, 'from matplotlib import pyplot as plt\n'), ((15008, 15041), 'numpy.array', 'np.array', (["data['mut_sigmas_hist']"], {}), "(data['mut_sigmas_hist'])\n", (15016, 15041), True, 'import numpy as np\n'), ((15152, 15170), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15168, 15170), True, 'from matplotlib import pyplot as plt\n'), ((15183, 15193), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (15191, 15193), True, 'from matplotlib import pyplot as plt\n'), ((15206, 15223), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (15215, 15223), True, 'from matplotlib import pyplot as plt\n'), ((15303, 15312), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15310, 15312), True, 'from matplotlib import pyplot as plt\n'), ((3185, 3204), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3202, 3204), True, 'import numpy as np\n'), ((5941, 5997), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['self._colour[0]', 'self._colour[1]', 'v'], {}), '(self._colour[0], self._colour[1], v)\n', (5960, 5997), False, 'import colorsys\n'), ((14049, 14108), 'pickle.dump', 'pickle.dump', (['data', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (14060, 14108), False, 'import pickle\n'), ((13343, 13370), 'numpy.array', 'np.array', (["data['mut_means']"], {}), "(data['mut_means'])\n", (13351, 13370), True, 'import numpy as np\n'), ((13405, 13432), 'numpy.array', 'np.array', (["data['mut_means']"], {}), "(data['mut_means'])\n", (13413, 13432), True, 'import numpy as np\n'), ((13517, 13545), 'numpy.array', 'np.array', (["data['mut_sigmas']"], {}), "(data['mut_sigmas'])\n", (13525, 13545), True, 'import numpy as np\n'), ((13576, 13604), 'numpy.array', 'np.array', (["data['mut_sigmas']"], {}), "(data['mut_sigmas'])\n", (13584, 13604), True, 'import numpy as np\n'), ((12936, 12963), 'numpy.array', 'np.array', (["data['mut_means']"], {}), "(data['mut_means'])\n", (12944, 12963), True, 'import numpy as np\n'), ((13004, 13031), 'numpy.array', 'np.array', (["data['mut_means']"], {}), "(data['mut_means'])\n", (13012, 13031), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from bregman.suite import *
k = 4
segment_size = 50 # out of 24,526
max_iterations = 100
chromo = tf.placeholder(tf.float32)
max_freqs = tf.argmax(chromo, 0)
def get_chromogram(audio_file):
F = Chromagram(audio_file, nfft=16384, wfft=8192, nhop=2205)
return F.X
def get_dataset(sess, audio_file):
chromo_data = get_chromogram(audio_file)
print('chromo_data', np.shape(chromo_data))
chromo_length = np.shape(chromo_data)[1]
xs = []
for i in range(chromo_length/segment_size):
chromo_segment = chromo_data[:, i*segment_size:(i+1)*segment_size]
x = extract_feature_vector(sess, chromo_segment)
if len(xs) == 0:
xs = x
else:
xs = np.vstack((xs, x))
return xs
def initial_cluster_centroids(X, k):
return X[0:k, :]
# op
def assign_cluster(X, centroids):
expanded_vectors = tf.expand_dims(X, 0)
expanded_centroids = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.sub(expanded_vectors, expanded_centroids)), 2)
mins = tf.argmin(distances, 0)
return mins
# op
def recompute_centroids(X, Y):
sums = tf.unsorted_segment_sum(X, Y, k)
counts = tf.unsorted_segment_sum(tf.ones_like(X), Y, k)
return sums / counts
def extract_feature_vector(sess, chromo_data):
num_features, num_samples = np.shape(chromo_data)
freq_vals = sess.run(max_freqs, feed_dict={chromo: chromo_data})
hist, bins = np.histogram(freq_vals, bins=range(num_features + 1))
return hist.astype(float) / num_samples
with tf.Session() as sess:
X = get_dataset(sess, 'sysk.wav')
print(np.shape(X))
centroids = initial_cluster_centroids(X, k)
i, converged = 0, False
# prev_Y = None
while not converged and i < max_iterations:
i += 1
Y = assign_cluster(X, centroids)
# if prev_Y == Y:
# converged = True
# break
# prev_Y = Y
centroids = sess.run(recompute_centroids(X, Y))
if i % 50 == 0:
print('iteration', i)
segments = sess.run(Y)
for i in range(len(segments)):
seconds = (i * segment_size) / float(10)
min, sec = divmod(seconds, 60)
time_str = str(min) + 'm ' + str(sec) + 's'
print(time_str, segments[i])
| [
"tensorflow.argmin",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.sub",
"tensorflow.argmax",
"tensorflow.unsorted_segment_sum",
"numpy.vstack",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"numpy.shape"
] | [((145, 171), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (159, 171), True, 'import tensorflow as tf\n'), ((184, 204), 'tensorflow.argmax', 'tf.argmax', (['chromo', '(0)'], {}), '(chromo, 0)\n', (193, 204), True, 'import tensorflow as tf\n'), ((916, 936), 'tensorflow.expand_dims', 'tf.expand_dims', (['X', '(0)'], {}), '(X, 0)\n', (930, 936), True, 'import tensorflow as tf\n'), ((962, 990), 'tensorflow.expand_dims', 'tf.expand_dims', (['centroids', '(1)'], {}), '(centroids, 1)\n', (976, 990), True, 'import tensorflow as tf\n'), ((1092, 1115), 'tensorflow.argmin', 'tf.argmin', (['distances', '(0)'], {}), '(distances, 0)\n', (1101, 1115), True, 'import tensorflow as tf\n'), ((1181, 1213), 'tensorflow.unsorted_segment_sum', 'tf.unsorted_segment_sum', (['X', 'Y', 'k'], {}), '(X, Y, k)\n', (1204, 1213), True, 'import tensorflow as tf\n'), ((1380, 1401), 'numpy.shape', 'np.shape', (['chromo_data'], {}), '(chromo_data)\n', (1388, 1401), True, 'import numpy as np\n'), ((1593, 1605), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1603, 1605), True, 'import tensorflow as tf\n'), ((424, 445), 'numpy.shape', 'np.shape', (['chromo_data'], {}), '(chromo_data)\n', (432, 445), True, 'import numpy as np\n'), ((467, 488), 'numpy.shape', 'np.shape', (['chromo_data'], {}), '(chromo_data)\n', (475, 488), True, 'import numpy as np\n'), ((1251, 1266), 'tensorflow.ones_like', 'tf.ones_like', (['X'], {}), '(X)\n', (1263, 1266), True, 'import tensorflow as tf\n'), ((1663, 1674), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1671, 1674), True, 'import numpy as np\n'), ((759, 777), 'numpy.vstack', 'np.vstack', (['(xs, x)'], {}), '((xs, x))\n', (768, 777), True, 'import numpy as np\n'), ((1031, 1075), 'tensorflow.sub', 'tf.sub', (['expanded_vectors', 'expanded_centroids'], {}), '(expanded_vectors, expanded_centroids)\n', (1037, 1075), True, 'import tensorflow as tf\n')] |
# 6.1 <NAME>. Lay Inner Product
# An inner product is a generalization of the dot product. In a vector space, it is a
# way to multiply vectors together, with the result of this multiplication being a scalar.
'''
Example: Compute u.v and v.u for u = col(2, -5, -1), v = col(3, 2, -3)
u.v = transpose(u).v = (3 * 2) + (- 5 * 2) + (-1 * -3) = -1
v.u = 10 Because u.v = v.u
'''
u, v = [2, -5, -1], [4, 2, -3]
inner_prod = 0
for i in range(len(u)):
inner_prod += u[i] * v[i]
print(inner_prod)
# numpy library
import numpy as np
u = np.array([2, -5, -1]);
v = np.array([4, 2, -3]);
inner_prod = np.inner(v, u)
print(inner_prod)
# for 1-d array np.inner(u, v) is same as sum(u[:] * v[:])
inner_prod = sum(u[:] * v[:])
print(inner_prod)
# or simply
inner_prod = u * v
inner_prod = sum(inner_prod)
print(inner_prod)
x = np.array([[1], [2]])
x_trans = np.transpose(x)
out = np.dot(x_trans, x)
print(sum(out)[0])
# for multi-dimentional array
a = np.array([[1,2], [3,4]])
b = np.array([[11, 12], [13, 14]])
print(np.inner(a, b))
'''
In the above case, the inner product is calculated as −
1*11+2*12, 1*13+2*14
3*11+4*12, 3*13+4*14
''' | [
"numpy.array",
"numpy.dot",
"numpy.transpose",
"numpy.inner"
] | [((542, 563), 'numpy.array', 'np.array', (['[2, -5, -1]'], {}), '([2, -5, -1])\n', (550, 563), True, 'import numpy as np\n'), ((569, 589), 'numpy.array', 'np.array', (['[4, 2, -3]'], {}), '([4, 2, -3])\n', (577, 589), True, 'import numpy as np\n'), ((605, 619), 'numpy.inner', 'np.inner', (['v', 'u'], {}), '(v, u)\n', (613, 619), True, 'import numpy as np\n'), ((834, 854), 'numpy.array', 'np.array', (['[[1], [2]]'], {}), '([[1], [2]])\n', (842, 854), True, 'import numpy as np\n'), ((865, 880), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (877, 880), True, 'import numpy as np\n'), ((887, 905), 'numpy.dot', 'np.dot', (['x_trans', 'x'], {}), '(x_trans, x)\n', (893, 905), True, 'import numpy as np\n'), ((961, 987), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (969, 987), True, 'import numpy as np\n'), ((991, 1021), 'numpy.array', 'np.array', (['[[11, 12], [13, 14]]'], {}), '([[11, 12], [13, 14]])\n', (999, 1021), True, 'import numpy as np\n'), ((1030, 1044), 'numpy.inner', 'np.inner', (['a', 'b'], {}), '(a, b)\n', (1038, 1044), True, 'import numpy as np\n')] |
#python3
#steven 04/04/2020
import matplotlib.pyplot as plt
import numpy as np
import math
gSlop1 = math.tanh(math.pi/3)
slopeUp= [math.tanh(math.pi/3),-1*math.tanh(math.pi/3),0]
slopeDown= [0, math.tanh(math.pi/3), -1*math.tanh(math.pi/3)]
def plotXY(x,y):
#plt.plot(x,y,color='g')
plt.plot(x,y)
def DrawTriangleLineByPt(startPt,stopPt,slope):
if startPt[0]>stopPt[0]: #switch
startPt = startPt + stopPt
stopPt = startPt - stopPt
startPt = startPt -stopPt
if 1:
plt.plot([startPt[0],stopPt[0]],[startPt[1],stopPt[1]])
else:
x = np.linspace(startPt[0],stopPt[0],10)
b = startPt[1]-slope*startPt[0]
y = slope*x + b
plotXY(x,y)
def triangle(pt0, pt1,pt2,slopes):
DrawTriangleLineByPt(pt0,pt1,slopes[0])
DrawTriangleLineByPt(pt1,pt2,slopes[1])
DrawTriangleLineByPt(pt2,pt0,slopes[2])
def triangleStart(startPt,lineLen,N):
if N>0:
pt0,pt1,pt2 = np.array([0,0],dtype=np.float64),\
np.array([0,0],dtype=np.float64),\
np.array([0,0],dtype=np.float64)
pt0 = startPt
pt1[0] = pt0[0]+lineLen/2
pt1[1] = pt0[1]+lineLen/2*gSlop1
pt2[0] = pt0[0] + lineLen
pt2[1] = pt0[1]
#print(pt0,pt1,pt2)
triangle(pt0,pt1,pt2,slopeUp)
N_pt0 = pt0.copy()
N_pt0[0] += lineLen/4
N_pt0[1] += (lineLen/4*gSlop1)
N_pt1 = N_pt0.copy()
N_pt1[0] += lineLen/2
N_pt2 = pt0.copy()
N_pt2[0] += lineLen/2
#print(N_pt0, N_pt1, N_pt2)
triangle(N_pt0, N_pt1, N_pt2,slopeDown)
triangleStart(pt0,lineLen/2,N-1)
triangleStart(N_pt0,lineLen/2,N-1)
triangleStart(N_pt2,lineLen/2,N-1)
else:
return
def main():
startPt = np.array([0,0],dtype=np.float64)
startLen=5
recurse = 4#8
triangleStart(startPt, startLen,recurse)
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"math.tanh",
"matplotlib.pyplot.show"
] | [((101, 123), 'math.tanh', 'math.tanh', (['(math.pi / 3)'], {}), '(math.pi / 3)\n', (110, 123), False, 'import math\n'), ((132, 154), 'math.tanh', 'math.tanh', (['(math.pi / 3)'], {}), '(math.pi / 3)\n', (141, 154), False, 'import math\n'), ((195, 217), 'math.tanh', 'math.tanh', (['(math.pi / 3)'], {}), '(math.pi / 3)\n', (204, 217), False, 'import math\n'), ((293, 307), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (301, 307), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1842), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (1816, 1842), True, 'import numpy as np\n'), ((1923, 1933), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1931, 1933), True, 'import matplotlib.pyplot as plt\n'), ((156, 178), 'math.tanh', 'math.tanh', (['(math.pi / 3)'], {}), '(math.pi / 3)\n', (165, 178), False, 'import math\n'), ((220, 242), 'math.tanh', 'math.tanh', (['(math.pi / 3)'], {}), '(math.pi / 3)\n', (229, 242), False, 'import math\n'), ((514, 572), 'matplotlib.pyplot.plot', 'plt.plot', (['[startPt[0], stopPt[0]]', '[startPt[1], stopPt[1]]'], {}), '([startPt[0], stopPt[0]], [startPt[1], stopPt[1]])\n', (522, 572), True, 'import matplotlib.pyplot as plt\n'), ((592, 630), 'numpy.linspace', 'np.linspace', (['startPt[0]', 'stopPt[0]', '(10)'], {}), '(startPt[0], stopPt[0], 10)\n', (603, 630), True, 'import numpy as np\n'), ((954, 988), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (962, 988), True, 'import numpy as np\n'), ((1013, 1047), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (1021, 1047), True, 'import numpy as np\n'), ((1072, 1106), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (1080, 1106), True, 'import numpy as np\n')] |
import json
import os
from glob import glob
import h5py
import numpy as np
import pandas as pd
from batchlib.util import read_table, image_name_to_well_name
from tqdm import tqdm
manuscript_plates = [
"20200417_132123_311",
"20200417_152052_943",
"20200420_164920_764",
"20200420_152417_316",
"plate1_IgM_20200527_125952_707",
"plate2_IgM_20200527_155923_897",
"plate5_IgM_20200528_094947_410",
"plate6_IgM_20200528_111507_585",
"plate9_4_IgM_20200604_212451_328",
"plate9_4rep1_20200604_175423_514",
"plate9_5_IgM_20200605_084742_832",
"plate9_5rep1_20200604_225512_896"
]
def score_images(plate_folder, old_names):
plate_name = os.path.split(plate_folder)[1]
cache_path = f'./plate_stats/{plate_name}.json'
if os.path.exists(cache_path):
with open(cache_path) as f:
return json.load(f)
images = glob(os.path.join(plate_folder, '*.h5'))
table_path = os.path.join(plate_folder, f'{plate_name}_table.hdf5')
with h5py.File(table_path, 'r') as f:
im_cols, im_tab = read_table(f, 'images/default')
well_cols, well_tab = read_table(f, 'wells/default')
im_names = im_tab[:, im_cols.index('image_name')]
im_outliers = {name: iso for name, iso in zip(im_names,
im_tab[:, im_cols.index('IgG_is_outlier')])}
well_names = well_tab[:, well_cols.index('well_name')]
well_outliers = {name: iso for name, iso in zip(well_names,
well_tab[:, well_cols.index('IgG_is_outlier')])}
im_names = []
ratios = []
tab_name = 'cell_classification/cell_segmentation/marker_tophat'
for im in images:
im_name = os.path.splitext(os.path.split(im)[1])[0]
# check for outliers
if im_outliers[im_name] == 1:
continue
well_name = image_name_to_well_name(im_name)
if well_outliers[well_name] == 1:
continue
res_name = f'{plate_name}_{im_name}.h5'
if res_name in old_names:
continue
with h5py.File(im, 'r') as f:
cols, tab = read_table(f, tab_name)
# compute the infected to non infected ratio
n_infected = tab[:, cols.index('is_infected')].sum()
n_control = tab[:, cols.index('is_control')].sum()
if n_infected == 0 or n_control == 0:
continue
ratio = abs(1. - n_infected / float(n_control))
ratios.append(ratio)
im_names.append(im_name)
# sort by the ratio
im_names = np.array(im_names)
ratios = np.array(ratios)
ratio_sorted = np.argsort(ratios)
im_names = im_names[ratio_sorted].tolist()
with open(cache_path, 'w') as f:
json.dump(im_names, f)
return im_names
def select_images(images, n, out_table):
im_id = 0
plate_id = 0
plate_names = list(images.keys())
n_plates = len(plate_names)
for _ in range(n):
plate_name = plate_names[plate_id]
im_name = images[plate_name][im_id]
if im_name.endswith('.h5.h5'):
im_name = im_name[:-3]
out_table.append([plate_name, im_name])
plate_id += 1
if plate_id == n_plates:
plate_id = 0
im_id += 1
return out_table
def make_second_annotation_table(n_new, n_old):
root = '/g/kreshuk/data/covid/data-processed'
old_table = pd.read_excel('./Stacks2proofread.xlsx')
old_plates = old_table['plate'].values
old_file_names = old_table['file'].values
old_names = [f'{plate}_{name}' for plate, name in zip(old_plates, old_file_names)]
images = {}
for plate in tqdm(manuscript_plates):
plate_folder = os.path.join(root, plate)
ims = score_images(plate_folder, old_names)
images[plate] = ims
out_table = []
out_table = select_images(images, n_new, out_table)
old_images = {}
for plate, im_name in zip(old_plates, old_file_names):
if plate in old_images:
old_images[plate].append(im_name)
else:
old_images[plate] = [im_name]
out_table = select_images(old_images, n_old, out_table)
df = pd.DataFrame(out_table, columns=['plate', 'file'])
df.to_excel('./Stacks2proofread_round2.xlsx', index=False)
if __name__ == '__main__':
make_second_annotation_table(50, 10)
| [
"os.path.exists",
"batchlib.util.image_name_to_well_name",
"tqdm.tqdm",
"os.path.join",
"os.path.split",
"numpy.argsort",
"numpy.array",
"h5py.File",
"json.load",
"batchlib.util.read_table",
"pandas.read_excel",
"pandas.DataFrame",
"json.dump"
] | [((778, 804), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (792, 804), False, 'import os\n'), ((946, 1000), 'os.path.join', 'os.path.join', (['plate_folder', 'f"""{plate_name}_table.hdf5"""'], {}), "(plate_folder, f'{plate_name}_table.hdf5')\n", (958, 1000), False, 'import os\n'), ((2604, 2622), 'numpy.array', 'np.array', (['im_names'], {}), '(im_names)\n', (2612, 2622), True, 'import numpy as np\n'), ((2636, 2652), 'numpy.array', 'np.array', (['ratios'], {}), '(ratios)\n', (2644, 2652), True, 'import numpy as np\n'), ((2673, 2691), 'numpy.argsort', 'np.argsort', (['ratios'], {}), '(ratios)\n', (2683, 2691), True, 'import numpy as np\n'), ((3450, 3490), 'pandas.read_excel', 'pd.read_excel', (['"""./Stacks2proofread.xlsx"""'], {}), "('./Stacks2proofread.xlsx')\n", (3463, 3490), True, 'import pandas as pd\n'), ((3701, 3724), 'tqdm.tqdm', 'tqdm', (['manuscript_plates'], {}), '(manuscript_plates)\n', (3705, 3724), False, 'from tqdm import tqdm\n'), ((4216, 4266), 'pandas.DataFrame', 'pd.DataFrame', (['out_table'], {'columns': "['plate', 'file']"}), "(out_table, columns=['plate', 'file'])\n", (4228, 4266), True, 'import pandas as pd\n'), ((687, 714), 'os.path.split', 'os.path.split', (['plate_folder'], {}), '(plate_folder)\n', (700, 714), False, 'import os\n'), ((893, 927), 'os.path.join', 'os.path.join', (['plate_folder', '"""*.h5"""'], {}), "(plate_folder, '*.h5')\n", (905, 927), False, 'import os\n'), ((1011, 1037), 'h5py.File', 'h5py.File', (['table_path', '"""r"""'], {}), "(table_path, 'r')\n", (1020, 1037), False, 'import h5py\n'), ((1070, 1101), 'batchlib.util.read_table', 'read_table', (['f', '"""images/default"""'], {}), "(f, 'images/default')\n", (1080, 1101), False, 'from batchlib.util import read_table, image_name_to_well_name\n'), ((1132, 1162), 'batchlib.util.read_table', 'read_table', (['f', '"""wells/default"""'], {}), "(f, 'wells/default')\n", (1142, 1162), False, 'from batchlib.util import read_table, image_name_to_well_name\n'), ((1917, 1949), 'batchlib.util.image_name_to_well_name', 'image_name_to_well_name', (['im_name'], {}), '(im_name)\n', (1940, 1949), False, 'from batchlib.util import read_table, image_name_to_well_name\n'), ((2785, 2807), 'json.dump', 'json.dump', (['im_names', 'f'], {}), '(im_names, f)\n', (2794, 2807), False, 'import json\n'), ((3749, 3774), 'os.path.join', 'os.path.join', (['root', 'plate'], {}), '(root, plate)\n', (3761, 3774), False, 'import os\n'), ((861, 873), 'json.load', 'json.load', (['f'], {}), '(f)\n', (870, 873), False, 'import json\n'), ((2130, 2148), 'h5py.File', 'h5py.File', (['im', '"""r"""'], {}), "(im, 'r')\n", (2139, 2148), False, 'import h5py\n'), ((2179, 2202), 'batchlib.util.read_table', 'read_table', (['f', 'tab_name'], {}), '(f, tab_name)\n', (2189, 2202), False, 'from batchlib.util import read_table, image_name_to_well_name\n'), ((1784, 1801), 'os.path.split', 'os.path.split', (['im'], {}), '(im)\n', (1797, 1801), False, 'import os\n')] |
#from spiops import data as data
from spiops.utils.time import cal2et
from spiops.utils.time import et_to_datetime
import matplotlib.pyplot as plt
import matplotlib as mpl
import spiceypy
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from bokeh.plotting import figure, output_file, output_notebook, show
from bokeh.models import HoverTool
from bokeh.models import DatetimeTickFormatter
from bokeh.models import Range1d
from tempfile import mkstemp
from shutil import move
import os
import glob
from os import fdopen, remove, chmod, path
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
from spiops.data import images
def valid_url(html_file_name):
"""
This function returns a valid URL for an HTML given a filename.
The filename is checked in such way that URL non valid characters
are replaced by other characters.
This was used due to the fact that we were using the following string:
'67P/CG' -> '67P-CG'
as part of an URL for 2D plotting and it would not work
:param html_file_name: Input filename
:type html_file_name: str
:return: Corrected Input filename without URL non valid characters
:rtype: str
"""""
for element in ['$', '_', '.', '+', '!', '*', '(', ')', '/', '\\']:
if element in ['/', '\\', '!', '*', '$']:
replacement = '-'
else:
replacement = '_'
html_file_name = html_file_name.replace(element,replacement)
return html_file_name
def convert_ESOCorbit2data(orbit_file, support_ker=''):
#orbit = data.Data()
orbit_data = []
time_list = []
distance_list = []
with open(orbit_file, 'r') as f:
read_data = False
for line in f:
if read_data:
line = line.split()
time = cal2et(line[0], 'CAL', support_ker=support_ker)
#TODO: Not including velocity at this point; only distance
distance = np.sqrt(float(line[1])*float(line[1]) +
float(line[2])*float(line[2]) +
float(line[3])*float(line[3]))
time_list.append(time)
distance_list.append(distance)
if 'META_STOP' in line:
read_data = True
if 'META_START' in line:
read_data = False
return [time_list, distance_list]
def convert_OEM2data():
return
def plot(xaxis, yaxis, xaxis_name='Date', yaxis_name='', title='', format='line',
external_data=[], notebook=False, mission='', target='', yaxis_units='',
date_format='TDB', plot_width=975, plot_height=300,
fill_color=[], fill_alpha=0, background_image=False,
line_width=2):
if not isinstance(yaxis_name, list):
yaxis_name = [yaxis_name]
yaxis = [yaxis]
if not title:
title = '{} {}'.format(mission, yaxis_name).title().upper()
html_file_name = 'plot_{}_{}_{}-{}.html'.format('Time', yaxis_name,
mission,
target)
html_file_name = valid_url(html_file_name)
else:
title = title.upper()
if ' ' in title:
html_file_name = title.replace(' ', '_').upper()
else:
html_file_name = title
html_file_name = valid_url(html_file_name)
# TODO: Move this to the time object (convert to datatime)
# Function needs to be vectorised
# x = self.time.window
if xaxis_name == 'Date':
window_dt = []
window = xaxis
for element in window:
window_dt.append(et_to_datetime(element, date_format))
x = window_dt
else:
x = xaxis
y = yaxis
if notebook:
output_notebook()
else:
output_file(html_file_name + '.html')
if xaxis_name == 'Date':
x_axis_type = "datetime"
else:
x_axis_type = "auto"
p = figure(title=title,
plot_width=plot_width,
plot_height=plot_height,
x_axis_label=xaxis_name.upper(),
y_axis_label=yaxis_units,
x_axis_type=x_axis_type)
if xaxis_name == 'Date':
p.xaxis.formatter = DatetimeTickFormatter(
seconds=["%Y-%m-%d %H:%M:%S"],
minsec=["%Y-%m-%d %H:%M:%S"],
minutes=["%Y-%m-%d %H:%M:%S"],
hourmin=["%Y-%m-%d %H:%M:%S"],
hours=["%Y-%m-%d %H:%M:%S"],
days=["%Y-%m-%d %H:%M:%S"],
months=["%Y-%m-%d %H:%M:%S"],
years=["%Y-%m-%d %H:%M:%S"],
)
hover = HoverTool(
tooltips=[(xaxis_name, '@x{0.000}'),
(title, '@y{0.000}')],
formatters={xaxis_name: 'numeral',
title: 'numeral'})
p.add_tools(hover)
if external_data:
window_dt = []
window = external_data[0]
for element in window:
window_dt.append(et_to_datetime(element, 'TDB'))
x_ext = window_dt
y_ext = external_data[1]
if format == 'circle':
p.circle(x_ext, y_ext, size=5, color='red')
elif format == 'line':
p.line(x_ext, y_ext, line_width=2,
color='red')
# add a line renderer with legend and line thickness
color_list = ['red', 'green', 'blue', 'orange', "black", 'darkgoldenrod', 'chocolate', 'aqua', 'coral',
'darkcyan', 'cornflowerblue' 'aquamarine', 'darkturquoise', 'cornsilk']
index = 0
color_idx = 0
if background_image:
if 'TGO' in mission.upper() or 'MEX' in mission.upper():
image = 'Mars_Viking_MDIM21_ClrMosaic_global_1024.jpg'
else:
image = 'Earth_Contemporary_Basic.png'
p.image_url(url=[os.path.join(os.path.dirname(images.__file__), image)], x=-180, y=-90,
w=360, h=180, anchor="bottom_left", global_alpha=0.6)
left, right, bottom, top = -180, 180, -90, 90
p.x_range = Range1d(left, right)
p.y_range = Range1d(bottom, top)
if format == 'scatter':
is_multi_legend = len(y) <= len(yaxis_name)
for idx in range(len(y)):
p.circle([x[idx]], [y[idx]], size=3,
color=color_list[color_idx] if is_multi_legend else color_list[0],
legend=str(yaxis_name[idx]).upper() if is_multi_legend else str(yaxis_name[0]).upper())
color_idx = idx % len(color_list)
else:
for element in y:
if format == 'circle':
p.line(x, element, line_width=line_width, color=color_list[color_idx], legend=str(yaxis_name[index]).upper())
p.circle(x, element, fill_color="white", size=8)
if format == 'circle_only':
p.circle(x, element, size=3, color=color_list[color_idx], legend=str(yaxis_name[index]).upper())
elif format == 'line':
p.line(x, element, line_width=line_width, color=color_list[color_idx], legend=str(yaxis_name[index]).upper())
index += 1
color_idx = index % len(color_list)
p.legend.click_policy = "hide"
# show the results
show(p)
return
def plot3d(data, observer, target):
x, y, z, = [], [], []
for element in data:
x.append(element[0])
y.append(element[1])
z.append(element[2])
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
ax.plot(x, y, z, label= observer.name + ' w.r.t. ' + target.name +
' on ' + observer.trajectory_reference_frame + ' [km]')
ax.legend()
# Make data
u = np.linspace(0, 2 * np.pi, 360)
v = np.linspace(0, np.pi, 360)
x = target.radii[0] * np.outer(np.cos(u), np.sin(v))
y = target.radii[1] * np.outer(np.sin(u), np.sin(v))
z = target.radii[2] * np.outer(np.ones(np.size(u)), np.cos(v))
# Plot the surface
ax.plot_surface(x, y, z, color='r')
plt.show()
return
def plot_attitude_error(error, max_ang_error, title, plot_style, notebook):
print('Avg QX error: ', np.mean(error[:, 1]))
print('Avg QY error: ', np.mean(error[:, 2]))
print('Avg QZ error: ', np.mean(error[:, 3]))
print('Avg QW error: ', np.mean(error[:, 4]))
print('Max angular error [mdeg]: ' + str(max_ang_error))
plot(error[:, 0],
[error[:, 1], error[:, 2], error[:, 3], error[:, 4]],
yaxis_name=['QX', 'QY', 'QZ', 'QW'],
title=title,
format=plot_style,
yaxis_units='Q [-]',
notebook=notebook)
def replace(file_path, pattern, subst):
replaced = False
#Create temp file
fh, abs_path = mkstemp()
with fdopen(fh,'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
updated_line = line.replace(pattern, subst)
new_file.write(updated_line)
#flag for replacing having happened
if updated_line != line:
replaced = True
if replaced:
# Update the permissions
chmod(abs_path, 0o644)
#Move new file
if file_path.isupper():
move(abs_path, file_path.split('.')[0]+'_LOCAL.TM')
else:
move(abs_path, file_path.split('.')[0] + '_local.tm')
return True
return False
def get_latest_kernel(kernel_type, path, pattern, dates=False,
excluded_kernels=False):
kernels = []
kernel_path = os.path.join(path, kernel_type)
#
# Get the kernels of type ``type`` from the ``path``/``type`` directory.
#
kernels_with_path = glob.glob(kernel_path + '/' + pattern)
#
# Include kernels in former_versions if the directory exists except for
# meta-kernel generation
#
if os.path.isdir(kernel_path + '/former_versions'):
kernels_with_path += glob.glob(kernel_path + '/former_versions/' + pattern )
for kernel in kernels_with_path:
kernels.append(kernel.split('/')[-1])
#
# Put the kernels in order
#
kernels.sort()
#
# We remove the kernel if it is included in the excluded kernels list
#
if excluded_kernels:
for kernel in excluded_kernels:
if kernel in kernels:
kernels.remove(kernel)
if not dates:
#
# Return the latest kernel
#
return kernels.pop()
else:
#
# Return all the kernels with a given date
#
previous_kernel = ''
kernels_date = []
for kernel in kernels:
if previous_kernel and previous_kernel.upper().split('_V')[0] == kernel.upper().split('_V')[0]:
kernels_date.remove(previous_kernel)
previous_kernel = kernel
kernels_date.append(kernel)
return kernels_date
def get_sc(kernel):
if 'ROSETTA' in kernel.upper():
return 'ROS'
if 'VENUS-EXPRESS' in kernel.upper():
return 'VEX'
if 'MARS-EXPRESS' in kernel.upper():
return 'MEX'
if 'EXOMARS2016' in kernel.upper():
if 'edm' in kernel:
return 'em16_edm'
else:
return 'em16_tgo'
if 'BEPICOLOMBO' in kernel.upper():
if 'mmo' in kernel:
return 'bc_mmo'
else:
return 'bc_mpo'
if 'JUICE' in kernel.upper():
return 'juice'
if 'SOLAR-ORBITER' in kernel.upper():
return 'solo'
if 'EXOMARSRSP' in kernel.upper():
if '_sp_' in kernel:
return 'emrsp_sp'
else:
return 'emrsp_rm'
def target2frame(target):
if target == '67P/C-G':
target_frame = '67P/C-G_CK'
elif target == '21 LUTETIA':
target_frame = 'LUTETIA_FIXED'
elif target == 'DIDYMOS' or target == 'DIDYMOON':
target_frame = '{}_FIXED'.format(target)
else:
try:
target_frame = 'IAU_' + target.upper()
target_frame_id = spiceypy.namfrm(target_frame)
if target_frame_id == 0:
raise Exception
except:
try:
target_id = str(spiceypy.bodn2c(target))
target_frame = spiceypy.frmnam(int(target_id))
except:
target_id += '000'
target_frame = spiceypy.frmnam(int(target_id))
return target_frame
def findIntersection(x1,y1,x2,y2,x3,y3,x4,y4):
px= ( (x1*y2-y1*x2)*(x3-x4)-(x1-x2)*(x3*y4-y3*x4) ) / ( (x1-x2)*(y3-y4)-(y1-y2)*(x3-x4) )
py= ( (x1*y2-y1*x2)*(y3-y4)-(y1-y2)*(x3*y4-y3*x4) ) / ( (x1-x2)*(y3-y4)-(y1-y2)*(x3-x4) )
return [px, py]
def findNearest(array, value):
"""
Determine for a given value, the element of the array which is closest to
this value.
@param array: Input N-Dimensional Array with values
@type array: Numpy array
@param value: Value that we want to match with one value of the array
@type value: float
@return: Index and value in array closer to value
@rtype: list
"""
array = np.asarray(array)
idx = np.unravel_index(np.argmin(np.abs(array - value)), array.shape)
return idx, array[idx]
| [
"bokeh.models.DatetimeTickFormatter",
"bokeh.models.Range1d",
"numpy.sin",
"numpy.mean",
"spiceypy.namfrm",
"numpy.asarray",
"os.chmod",
"numpy.linspace",
"os.path.isdir",
"spiops.utils.time.cal2et",
"glob.glob",
"spiops.utils.time.et_to_datetime",
"numpy.abs",
"bokeh.plotting.show",
"bo... | [((5170, 5297), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[(xaxis_name, '@x{0.000}'), (title, '@y{0.000}')]", 'formatters': "{xaxis_name: 'numeral', title: 'numeral'}"}), "(tooltips=[(xaxis_name, '@x{0.000}'), (title, '@y{0.000}')],\n formatters={xaxis_name: 'numeral', title: 'numeral'})\n", (5179, 5297), False, 'from bokeh.models import HoverTool\n'), ((7776, 7783), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (7780, 7783), False, 'from bokeh.plotting import figure, output_file, output_notebook, show\n'), ((8025, 8037), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8035, 8037), True, 'import matplotlib.pyplot as plt\n'), ((8084, 8123), 'numpy.linspace', 'np.linspace', (['(-4 * np.pi)', '(4 * np.pi)', '(100)'], {}), '(-4 * np.pi, 4 * np.pi, 100)\n', (8095, 8123), True, 'import numpy as np\n'), ((8306, 8336), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(360)'], {}), '(0, 2 * np.pi, 360)\n', (8317, 8336), True, 'import numpy as np\n'), ((8345, 8371), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(360)'], {}), '(0, np.pi, 360)\n', (8356, 8371), True, 'import numpy as np\n'), ((8622, 8632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8630, 8632), True, 'import matplotlib.pyplot as plt\n'), ((9331, 9340), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (9338, 9340), False, 'from tempfile import mkstemp\n'), ((10157, 10188), 'os.path.join', 'os.path.join', (['path', 'kernel_type'], {}), '(path, kernel_type)\n', (10169, 10188), False, 'import os\n'), ((10303, 10341), 'glob.glob', 'glob.glob', (["(kernel_path + '/' + pattern)"], {}), "(kernel_path + '/' + pattern)\n", (10312, 10341), False, 'import glob\n'), ((10467, 10514), 'os.path.isdir', 'os.path.isdir', (["(kernel_path + '/former_versions')"], {}), "(kernel_path + '/former_versions')\n", (10480, 10514), False, 'import os\n'), ((13690, 13707), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (13700, 13707), True, 'import numpy as np\n'), ((3958, 3975), 'bokeh.plotting.output_notebook', 'output_notebook', ([], {}), '()\n', (3973, 3975), False, 'from bokeh.plotting import figure, output_file, output_notebook, show\n'), ((3994, 4031), 'bokeh.plotting.output_file', 'output_file', (["(html_file_name + '.html')"], {}), "(html_file_name + '.html')\n", (4005, 4031), False, 'from bokeh.plotting import figure, output_file, output_notebook, show\n'), ((4429, 4709), 'bokeh.models.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'seconds': "['%Y-%m-%d %H:%M:%S']", 'minsec': "['%Y-%m-%d %H:%M:%S']", 'minutes': "['%Y-%m-%d %H:%M:%S']", 'hourmin': "['%Y-%m-%d %H:%M:%S']", 'hours': "['%Y-%m-%d %H:%M:%S']", 'days': "['%Y-%m-%d %H:%M:%S']", 'months': "['%Y-%m-%d %H:%M:%S']", 'years': "['%Y-%m-%d %H:%M:%S']"}), "(seconds=['%Y-%m-%d %H:%M:%S'], minsec=[\n '%Y-%m-%d %H:%M:%S'], minutes=['%Y-%m-%d %H:%M:%S'], hourmin=[\n '%Y-%m-%d %H:%M:%S'], hours=['%Y-%m-%d %H:%M:%S'], days=[\n '%Y-%m-%d %H:%M:%S'], months=['%Y-%m-%d %H:%M:%S'], years=[\n '%Y-%m-%d %H:%M:%S'])\n", (4450, 4709), False, 'from bokeh.models import DatetimeTickFormatter\n'), ((6594, 6614), 'bokeh.models.Range1d', 'Range1d', (['left', 'right'], {}), '(left, right)\n', (6601, 6614), False, 'from bokeh.models import Range1d\n'), ((6635, 6655), 'bokeh.models.Range1d', 'Range1d', (['bottom', 'top'], {}), '(bottom, top)\n', (6642, 6655), False, 'from bokeh.models import Range1d\n'), ((8752, 8772), 'numpy.mean', 'np.mean', (['error[:, 1]'], {}), '(error[:, 1])\n', (8759, 8772), True, 'import numpy as np\n'), ((8802, 8822), 'numpy.mean', 'np.mean', (['error[:, 2]'], {}), '(error[:, 2])\n', (8809, 8822), True, 'import numpy as np\n'), ((8852, 8872), 'numpy.mean', 'np.mean', (['error[:, 3]'], {}), '(error[:, 3])\n', (8859, 8872), True, 'import numpy as np\n'), ((8902, 8922), 'numpy.mean', 'np.mean', (['error[:, 4]'], {}), '(error[:, 4])\n', (8909, 8922), True, 'import numpy as np\n'), ((9350, 9365), 'os.fdopen', 'fdopen', (['fh', '"""w"""'], {}), "(fh, 'w')\n", (9356, 9365), False, 'from os import fdopen, remove, chmod, path\n'), ((9748, 9768), 'os.chmod', 'chmod', (['abs_path', '(420)'], {}), '(abs_path, 420)\n', (9753, 9768), False, 'from os import fdopen, remove, chmod, path\n'), ((10545, 10599), 'glob.glob', 'glob.glob', (["(kernel_path + '/former_versions/' + pattern)"], {}), "(kernel_path + '/former_versions/' + pattern)\n", (10554, 10599), False, 'import glob\n'), ((8407, 8416), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (8413, 8416), True, 'import numpy as np\n'), ((8418, 8427), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (8424, 8427), True, 'import numpy as np\n'), ((8464, 8473), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (8470, 8473), True, 'import numpy as np\n'), ((8475, 8484), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (8481, 8484), True, 'import numpy as np\n'), ((8542, 8551), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (8548, 8551), True, 'import numpy as np\n'), ((13745, 13766), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (13751, 13766), True, 'import numpy as np\n'), ((1908, 1955), 'spiops.utils.time.cal2et', 'cal2et', (['line[0]', '"""CAL"""'], {'support_ker': 'support_ker'}), "(line[0], 'CAL', support_ker=support_ker)\n", (1914, 1955), False, 'from spiops.utils.time import cal2et\n'), ((3828, 3864), 'spiops.utils.time.et_to_datetime', 'et_to_datetime', (['element', 'date_format'], {}), '(element, date_format)\n', (3842, 3864), False, 'from spiops.utils.time import et_to_datetime\n'), ((5549, 5579), 'spiops.utils.time.et_to_datetime', 'et_to_datetime', (['element', '"""TDB"""'], {}), "(element, 'TDB')\n", (5563, 5579), False, 'from spiops.utils.time import et_to_datetime\n'), ((8529, 8539), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (8536, 8539), True, 'import numpy as np\n'), ((12629, 12658), 'spiceypy.namfrm', 'spiceypy.namfrm', (['target_frame'], {}), '(target_frame)\n', (12644, 12658), False, 'import spiceypy\n'), ((6388, 6420), 'os.path.dirname', 'os.path.dirname', (['images.__file__'], {}), '(images.__file__)\n', (6403, 6420), False, 'import os\n'), ((12793, 12816), 'spiceypy.bodn2c', 'spiceypy.bodn2c', (['target'], {}), '(target)\n', (12808, 12816), False, 'import spiceypy\n')] |
import numpy as np
import rospy
import time
import sys
import pymap3d as pm
import numba as nb
from lib_ta_py.controller_2D_TA import Controller
from pkg_ta.msg import Control
from pkg_ta.msg import State_EKF_2D
from sensor_msgs.msg import NavSatFix
from sensor_msgs.msg import Imu
yawc_compass = - np.pi/2
# Reference point
lat0, lon0, h0 = -6.8712, 107.5738, 768
@nb.njit()
def wrap_angle(angle):
return (angle + np.pi) % (2 * np.pi) - np.pi
_ = wrap_angle(1)
_ = wrap_angle(0.1)
@nb.njit()
def to_euler(x, y, z, w):
"""Dari Coursera: Return as xyz (roll pitch yaw) Euler angles."""
roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x**2 + y**2))
pitch = np.arcsin(2 * (w * y - z * x))
yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y**2 + z**2))
return np.array([roll, pitch, yaw])
# Compile the to_euler
_ = to_euler(1.5352300785980803e-15, -1.3393747145983517e-15, -0.7692164172827881, 0.638988343698562)
freq = 10 # Hz
# waypoints_np = np.load('waypoints/waypoints/31_agus_wp_lurus.npy')
waypoints_np = np.load('waypoints/waypoints/08_09_wp_lurus.npy')
# waypoints_np = np.load('waypoints/waypoints/09_09_wp_S.npy')
# waypoints_np = np.load('waypoints/waypoints/09_09_wp_belok.npy')
# waypoints_np = np.load('waypoints/waypoints/09_09_wp_belok_besar.npy')
# In the Arduino, CW is positive and CCW is negative
# On the other hand, in the controller algoritm, CCW is positive and CW is negative
max_steer = 35.; min_steer = -28. # For the path following control algoritm ~ degree
max_steer_arduino = 28.; min_steer_arduino = -35. # For the Arduino ~ degree
max_brake = 2.9; max_throttle = 0.25; min_throttle = 0.0; min_throttle_move = 0.08
min_vel_move = 0.5 # m/s
kp = 0.11; ki = 0.3; kd = 0.015
ff_long = np.array([0.0, 0.0]) # no feed-forward
ks = 1.0; kv = 1.0; kff_lat = 1.7; dead_band_limit = 0.025
kv_lat = 1.0; kv_yaw = 4.0; kv_throttle = 2.5 # Speed / Throttle Additional Control
# kv_lat = 3.0; kv_yaw = 4.0; kv_throttle = 2.5 # Speed / Throttle Additional Control
kp_lat = 15. * np.pi / 180.
ki_lat = 0.5 * np.pi / 180.
kd_lat = 30. * np.pi / 180.
lat_max_int = 3. * np.pi / 180.
sat_long = np.array([-np.abs(max_brake), np.abs(max_throttle)])
sat_lat = np.array([-np.abs(min_steer), np.abs(max_steer)])
sat_lat = sat_lat * np.pi / 180.
state = {'x': 0., 'y': 0., 'yaw': 0., 'v': 0.}
RUN = False
RUN_compass = False
RUN_gnss = False
RUN_speed = False
def main():
global RUN, RUN_compass, RUN_gnss, RUN_speed
# Create the controller object
controller = Controller(kp, ki, kd, ff_long, sat_long,
ks, kv, kff_lat, dead_band_limit, sat_lat,
waypoints_np, min_vel_move, max_throttle, min_throttle_move,
kv_yaw, kv_lat, kv_throttle,
kp_lat, ki_lat, kd_lat, lat_max_int)
def callback_gnss(msg_gnss):
global state
global RUN_gnss
gnss_pos = np.array(pm.geodetic2enu(msg_gnss.latitude,
msg_gnss.longitude,
msg_gnss.altitude,
lat0, lon0, h0))
state['x'] = gnss_pos[0]
state['y'] = gnss_pos[1]
RUN_gnss = True
def callback_compass(msg_compass):
global state
global RUN_compass
q = msg_compass.orientation
euler = to_euler(q.x, q.y, q.z, q.w)
# imu_yaw = euler[-1]
state['yaw'] = wrap_angle(euler[-1] - yawc_compass)
RUN_compass = True
def callback_speed(msg_nav):
global state
global RUN_speed
state['v'] = np.sqrt(msg_nav.vx**2 + msg_nav.vy**2)
RUN_speed = True
rospy.init_node('control')
rospy.Subscriber('/fix', NavSatFix, callback_gnss)
rospy.Subscriber('/imu', Imu, callback_compass)
rospy.Subscriber('/state_2d_new', State_EKF_2D, callback_speed)
pub = rospy.Publisher('/control_signal', Control, queue_size=1)
rate = rospy.Rate(freq) # Hz
print("Menunggu data navigasi masuk pertama kali ...")
RUN = False
while not RUN:
RUN = RUN_compass and RUN_gnss and RUN_speed
time.sleep(0.02) # 20 ms
pass
print("Data Navigasi sudah masuk !")
print("Program sudah berjalan !")
msg = Control()
msg.header.frame_id = 'path_following_control'
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
last_time = msg.header.stamp.to_sec() - 1./freq
while not rospy.is_shutdown():
# Calculate the actual sampling time
msg.header.stamp = rospy.Time.now()
delta_t = msg.header.stamp.to_sec() - last_time
last_time = msg.header.stamp.to_sec()
# Calculate the control signal
long, lat = controller.calculate_control_signal(delta_t, state['x'],
state['y'], state['v'],
state['yaw'])
# Get the error profile
err = controller.get_error()
# Get the reference
ref = controller.get_instantaneous_setpoint()
# Send the message
msg.header.seq += 1
msg.action_steer = max(min(-lat*180/np.pi, max_steer_arduino), min_steer_arduino) # lat ~ radian
msg.action_throttle = max(min(long, max_throttle), min_throttle)
#msg.action_brake = max(min(-long, max_brake), 0.)
msg.action_brake = 0.
msg.error_speed = err[0]
msg.error_lateral = err[1]
msg.error_yaw = err[2]
msg.actual_x = state['x']
msg.actual_y = state['y']
msg.actual_yaw = state['yaw']
msg.actual_speed = state['v']
msg.wp_idx = controller.get_closest_index()
msg.ref_x = ref[0]
msg.ref_y = ref[1]
msg.ref_yaw = ref[2]
msg.ref_speed = ref[3]
msg.ref_curvature = ref[4]
msg.deg_ref_yaw = msg.ref_yaw * 180. / np.pi
msg.deg_actual_yaw = msg.actual_yaw * 180. / np.pi
msg.deg_error_yaw = msg.error_yaw * 180. / np.pi
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| [
"numpy.abs",
"rospy.Publisher",
"numpy.sqrt",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"numpy.arcsin",
"numba.njit",
"lib_ta_py.controller_2D_TA.Controller",
"time.sleep",
"rospy.Time.now",
"numpy.array",
"pymap3d.geodetic2enu",
"numpy.arctan2",
"rospy.Rate",
"numpy.... | [((368, 377), 'numba.njit', 'nb.njit', ([], {}), '()\n', (375, 377), True, 'import numba as nb\n'), ((490, 499), 'numba.njit', 'nb.njit', ([], {}), '()\n', (497, 499), True, 'import numba as nb\n'), ((1035, 1084), 'numpy.load', 'np.load', (['"""waypoints/waypoints/08_09_wp_lurus.npy"""'], {}), "('waypoints/waypoints/08_09_wp_lurus.npy')\n", (1042, 1084), True, 'import numpy as np\n'), ((1739, 1759), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1747, 1759), True, 'import numpy as np\n'), ((607, 665), 'numpy.arctan2', 'np.arctan2', (['(2 * (w * x + y * z))', '(1 - 2 * (x ** 2 + y ** 2))'], {}), '(2 * (w * x + y * z), 1 - 2 * (x ** 2 + y ** 2))\n', (617, 665), True, 'import numpy as np\n'), ((674, 704), 'numpy.arcsin', 'np.arcsin', (['(2 * (w * y - z * x))'], {}), '(2 * (w * y - z * x))\n', (683, 704), True, 'import numpy as np\n'), ((715, 773), 'numpy.arctan2', 'np.arctan2', (['(2 * (w * z + x * y))', '(1 - 2 * (y ** 2 + z ** 2))'], {}), '(2 * (w * z + x * y), 1 - 2 * (y ** 2 + z ** 2))\n', (725, 773), True, 'import numpy as np\n'), ((781, 809), 'numpy.array', 'np.array', (['[roll, pitch, yaw]'], {}), '([roll, pitch, yaw])\n', (789, 809), True, 'import numpy as np\n'), ((2510, 2729), 'lib_ta_py.controller_2D_TA.Controller', 'Controller', (['kp', 'ki', 'kd', 'ff_long', 'sat_long', 'ks', 'kv', 'kff_lat', 'dead_band_limit', 'sat_lat', 'waypoints_np', 'min_vel_move', 'max_throttle', 'min_throttle_move', 'kv_yaw', 'kv_lat', 'kv_throttle', 'kp_lat', 'ki_lat', 'kd_lat', 'lat_max_int'], {}), '(kp, ki, kd, ff_long, sat_long, ks, kv, kff_lat, dead_band_limit,\n sat_lat, waypoints_np, min_vel_move, max_throttle, min_throttle_move,\n kv_yaw, kv_lat, kv_throttle, kp_lat, ki_lat, kd_lat, lat_max_int)\n', (2520, 2729), False, 'from lib_ta_py.controller_2D_TA import Controller\n'), ((3716, 3742), 'rospy.init_node', 'rospy.init_node', (['"""control"""'], {}), "('control')\n", (3731, 3742), False, 'import rospy\n'), ((3747, 3797), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/fix"""', 'NavSatFix', 'callback_gnss'], {}), "('/fix', NavSatFix, callback_gnss)\n", (3763, 3797), False, 'import rospy\n'), ((3802, 3849), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/imu"""', 'Imu', 'callback_compass'], {}), "('/imu', Imu, callback_compass)\n", (3818, 3849), False, 'import rospy\n'), ((3854, 3917), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/state_2d_new"""', 'State_EKF_2D', 'callback_speed'], {}), "('/state_2d_new', State_EKF_2D, callback_speed)\n", (3870, 3917), False, 'import rospy\n'), ((3928, 3985), 'rospy.Publisher', 'rospy.Publisher', (['"""/control_signal"""', 'Control'], {'queue_size': '(1)'}), "('/control_signal', Control, queue_size=1)\n", (3943, 3985), False, 'import rospy\n'), ((3997, 4013), 'rospy.Rate', 'rospy.Rate', (['freq'], {}), '(freq)\n', (4007, 4013), False, 'import rospy\n'), ((4304, 4313), 'pkg_ta.msg.Control', 'Control', ([], {}), '()\n', (4311, 4313), False, 'from pkg_ta.msg import Control\n'), ((4411, 4427), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4425, 4427), False, 'import rospy\n'), ((2164, 2184), 'numpy.abs', 'np.abs', (['max_throttle'], {}), '(max_throttle)\n', (2170, 2184), True, 'import numpy as np\n'), ((2227, 2244), 'numpy.abs', 'np.abs', (['max_steer'], {}), '(max_steer)\n', (2233, 2244), True, 'import numpy as np\n'), ((3646, 3688), 'numpy.sqrt', 'np.sqrt', (['(msg_nav.vx ** 2 + msg_nav.vy ** 2)'], {}), '(msg_nav.vx ** 2 + msg_nav.vy ** 2)\n', (3653, 3688), True, 'import numpy as np\n'), ((4175, 4191), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (4185, 4191), False, 'import time\n'), ((4495, 4514), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (4512, 4514), False, 'import rospy\n'), ((4588, 4604), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4602, 4604), False, 'import rospy\n'), ((2145, 2162), 'numpy.abs', 'np.abs', (['max_brake'], {}), '(max_brake)\n', (2151, 2162), True, 'import numpy as np\n'), ((2208, 2225), 'numpy.abs', 'np.abs', (['min_steer'], {}), '(min_steer)\n', (2214, 2225), True, 'import numpy as np\n'), ((2942, 3035), 'pymap3d.geodetic2enu', 'pm.geodetic2enu', (['msg_gnss.latitude', 'msg_gnss.longitude', 'msg_gnss.altitude', 'lat0', 'lon0', 'h0'], {}), '(msg_gnss.latitude, msg_gnss.longitude, msg_gnss.altitude,\n lat0, lon0, h0)\n', (2957, 3035), True, 'import pymap3d as pm\n')] |
# -*- coding:utf8 -*-
# ==============================================================================
# Copyright 2018 Hisense, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import tensorflow.contrib as tc
import numpy as np
import logging
import time
from .layer import *
import os
import json
from sklearn import metrics
from .rcnn import RCNN
from .rnn import RNN
from .cnn import CNN
class Model(object):
def __init__(self, args, word_vocab, character_vocab=None):
self.logger = logging.getLogger("alibaba")
self.vocab = word_vocab
self.character_vocab = character_vocab
self.args = args
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self._build_graph()
self.saver = tf.train.Saver()
# initialize the model
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
def _build_graph(self):
start_time = time.time()
self._build_setup_placeholders()
self._embed()
self._encode()
self._match()
self._compute_loss()
self._create_train_op()
self.logger.info("Time to build graph: {} s".format(time.time() - start_time))
param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])
self.logger.info("There are {} parameters in the model".format(param_num))
def _build_setup_placeholders(self):
self.document1 = tf.placeholder(tf.int32, [None, self.args.max_document_len])
self.document2 = tf.placeholder(tf.int32, [None, self.args.max_document_len])
self.document1_character = tf.placeholder(
tf.int32, [None, self.args.max_document_len, self.args.max_word_len]
)
self.document2_character = tf.placeholder(
tf.int32, [None, self.args.max_document_len, self.args.max_word_len]
)
self.label = tf.placeholder(tf.int64, [None, 2])
def _embed(self):
with tf.variable_scope("embedding"):
with tf.device("cpu:0"):
self.word_embeddings = tf.Variable(
tf.random_uniform(
[self.vocab.size(), self.args.embedding_size], -1.0, 1.0
),
name="word_embeddings",
trainable=True,
)
self.character_embeddings = tf.Variable(
tf.random_uniform(
[
self.character_vocab.size(),
self.args.character_embedding_size,
]
),
name="character_embeddings",
)
"""
self.word_embeddings = tf.get_variable(
'word_embeddings',
shape=(self.vocab.size(), self.args.embedding_size),
initializer=tf.constant_initializer(self.vocab.embedding),
trainable=True
)
if self.args.char:
self.character_embeddigns = tf.get_variable(
'character_embeddings',
shape=(self.character_vocab.size(), self.args.character_embedding_size),
initializer=tf.constant_initializer(self.character_vocab.embedding),
trainable=True
)
"""
self.document1_emb = tf.nn.embedding_lookup(
self.word_embeddings, self.document1
)
self.document2_emb = tf.nn.embedding_lookup(
self.word_embeddings, self.document2
)
if self.args.char:
self.document1_character_emb = tf.nn.embedding_lookup(
self.character_embeddings, self.document1_character
)
self.document2_character_emb = tf.nn.embedding_lookup(
self.character_embeddings, self.document2_character
)
self.document1_character_emb = tf.reshape(
self.document1_character_emb,
[-1, self.args.max_word_len, self.args.character_embedding_size],
)
self.document2_character_emb = tf.reshape(
self.document2_character_emb,
[-1, self.args.max_word_len, self.args.character_embedding_size],
)
document1_character_conv = conv(
self.document1_character_emb,
self.args.hidden_size,
bias=True,
activation=tf.nn.relu,
kernel_size=5,
name="char_conv",
reuse=None,
)
document1_character_conv = tf.reduce_max(
document1_character_conv, axis=1
)
document1_character_conv = tf.reshape(
document1_character_conv,
[-1, self.args.max_document_len, self.args.hidden_size],
)
document2_character_conv = conv(
self.document2_character_emb,
self.args.hidden_size,
bias=True,
activation=tf.nn.relu,
kernel_size=5,
name="char_conv",
reuse=True,
)
document2_character_conv = tf.reduce_max(
document2_character_conv, axis=1
)
document2_character_conv = tf.reshape(
document2_character_conv,
[-1, self.args.max_document_len, self.args.hidden_size],
)
self.doc1 = highway(
tf.concat([self.document1_emb, document1_character_conv], axis=2),
size=self.args.hidden_size,
scope="highway",
dropout=self.args.dropout,
)
self.doc2 = highway(
tf.concat([self.document2_emb, document2_character_conv], axis=2),
size=self.args.hidden_size,
scope="highway",
dropout=self.args.dropout,
reuse=True,
)
else:
self.doc1 = self.document1_emb
self.doc2 = self.document2_emb
def _encode(self):
if self.args.class_model == "rcnn":
self.model = RCNN(doc1=self.doc1, doc2=self.doc2, args=self.args)
elif self.args.class_model == "rnn":
self.model = RNN(doc1=self.doc1, doc2=self.doc2, args=self.args)
elif self.args.class_model == "cnn":
self.model = CNN(doc1=self.doc1, doc2=self.doc2, args=self.args)
else:
raise NotImplementedError(
"Do not implement {} model".format(self.args.class_model)
)
self.document1_represent, self.document2_represent = self.model.build_graph()
def _match(self):
with tf.variable_scope("match"):
self.vector = tf.concat(
[self.document1_represent, self.document2_represent], 1
)
self.score = tc.layers.fully_connected(
self.vector, num_outputs=2, activation_fn=tf.nn.tanh
)
"""
document1_len = tf.sqrt(tf.reduce_sum(tf.multiply(self.document1_represent, self.document1_represent), 1))
document2_len = tf.sqrt(tf.reduce_sum(tf.multiply(self.document2_represent, self.document2_represent), 1))
mul = tf.reduce_sum(tf.multiply(self.document1_represent, self.document2_represent), 1)
tf.reduce_sum(tf.multiply(self.document1_represent, self.document2_represent), 1)
self.score = tf.div(mul, tf.multiply(document1_len,document2_len), name="score1")
"""
with tf.variable_scope("predict"):
self.predict = tf.argmax(self.score, axis=1)
with tf.variable_scope("accuracy"):
correct_predictions = tf.equal(self.predict, tf.argmax(self.label, 1))
self.accuracy = tf.reduce_mean(
tf.cast(correct_predictions, "float"), name="accuracy"
)
def _compute_loss(self):
with tf.variable_scope("loss"):
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=self.score, labels=self.label
)
)
self.all_params = tf.trainable_variables()
if self.args.weight_decay > 0:
with tf.variable_scope("l2_loss"):
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.all_params])
self.loss += self.args.weight_decay * l2_loss
def _create_train_op(self):
if self.args.optim == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(self.args.learning_rate)
elif self.args.optim == "adam":
self.optimizer = tf.train.AdamOptimizer(self.args.learning_rate)
elif self.args.optim == "rprop":
self.optimizer = tf.train.RMSPropOptimizer(self.args.learning_rate)
elif self.args.optim == "sgd":
self.optimizer = tf.train.GradientDescentOptimizer(self.args.learning_rate)
else:
raise NotImplementedError(
"Unsupported optimizer: {}".format(self.args.optim_type)
)
self.train_op = self.optimizer.minimize(self.loss)
def _train_epoch(self, train_batch):
"""
Trains the model for a single epoch.
Args:
train_batches: iterable batch data for training
"""
total_num, total_loss = 0, 0.0
index = 0
predicts = []
labels = []
for idx, batch in enumerate(train_batch, 1):
feed_dict = {
self.document1: batch["document1_ids"],
self.document2: batch["document2_ids"],
self.document1_character: batch["document1_character_ids"],
self.document2_character: batch["document2_character_ids"],
self.label: batch["label"],
}
_, loss, accuracy, score, vector, predict, doc1, doc2, a, b = self.sess.run(
[
self.train_op,
self.loss,
self.accuracy,
self.score,
self.vector,
self.predict,
self.document1_represent,
self.document2_represent,
self.document1_emb,
self.document2_emb,
],
feed_dict,
)
score_list = score.tolist()
vector_list = vector.tolist()
predict_list = predict.tolist()
doc1_list = doc1.tolist()
doc2_list = doc2.tolist()
a_list = a.tolist()
b_list = b.tolist()
total_loss += loss
predicts.extend(predict.tolist())
labels.extend(batch["label"])
# total_accuracy += accuracy * len(batch['raw_data'])
# total_auc += auc * len(batch['raw_data'])
index += 1
self.logger.info(
"batch {}, loss {}, accuracy {}".format(idx, loss, accuracy)
)
return 1.0 * total_loss / float(index), predicts, labels
def train(
self,
data,
epochs,
batch_size,
save_dir,
save_prefix,
evaluate=True,
character=False,
):
"""
Training the model with data.
Args:
data: The VIDAA Classification Data
epochs: The number of training epochs
batch_size:
save_dir: The director to save model
save_prefix: The prefix indicating the model type
evaluate: Whether to evaluate the model on test set after each epoch
character: Whether nor not to use character feature
"""
max_f1score = 0.0
for epoch in range(1, epochs + 1):
self.logger.info("Training the model for epoch {}".format(epoch))
train_batchs = data.get_mini_batchs(
batch_size=batch_size, set_name="train", shuffle=True
)
train_loss, predicts, labels = self._train_epoch(train_batchs)
self.logger.info(
"Average train loss for epoch {} is {}".format(epoch, train_loss)
)
self.logger.info(
"classification_report: \n {}".format(
metrics.classification_report(
np.argmax(labels, axis=1), np.array(predicts)
)
)
)
self.logger.info(
"混淆矩阵为: \n {}".format(
metrics.confusion_matrix(
np.argmax(labels, axis=1), np.array(predicts)
)
)
)
self.logger.info(
"completeness_score: {}".format(
metrics.completeness_score(
np.argmax(labels, axis=1), np.array(predicts)
)
)
)
if evaluate:
if data.dev_set is not None:
dev_batches = data.get_mini_batchs(
batch_size=batch_size, set_name="dev"
)
loss_, accuracy_, predicts, labels = self.evaluate(dev_batches)
f1score = metrics.f1_score(np.argmax(labels, axis=1), np.array(predicts))
self.logger.info("Dev eval loss {}".format(loss_))
self.logger.info("Dev eval accuracy {}".format(accuracy_))
self.logger.info(
"classification_report: \n {}".format(
metrics.classification_report(
np.argmax(labels, axis=1), np.array(predicts)
)
)
)
self.logger.info(
"混淆矩阵为: \n {}".format(
metrics.confusion_matrix(
np.argmax(labels, axis=1), np.array(predicts)
)
)
)
self.logger.info(
"completeness_score: {}".format(
metrics.completeness_score(
np.argmax(labels, axis=1), np.array(predicts)
)
)
)
if f1score >= max_f1score:
max_f1score = f1score
self.save(save_dir, save_prefix)
else:
self.save(save_dir, save_prefix)
def evaluate(
self, batch_data, result_dir=None, result_prefix=None, save_predict_label=False
):
"""
Evaluate the model with data
Args:
batch_data: iterable batch data
result_dir: the director to save the predict answers ,
answers will not save if None
result_prefix: the prefix of file for saving the predict answers,
answers will not save if None
save_predict_label: if True, the pred_answers will be added to raw sample and saved
character: use character feature
"""
if save_predict_label:
result = []
total_loss, total_num, total_accuracy = 0.0, 0, 0.0
index = 0
labels = []
predicts = []
for idx, batch in enumerate(batch_data):
feed_dict = {
self.document1: batch["document1_ids"],
self.document2: batch["document2_ids"],
self.document1_character: batch["document1_character_ids"],
self.document2_character: batch["document2_character_ids"],
self.label: batch["label"],
}
loss, accuracy, predict = self.sess.run(
[self.loss, self.accuracy, self.predict], feed_dict
)
index += 1
total_loss += loss * len(batch["raw_data"])
total_accuracy += accuracy
predicts.extend(predict.tolist())
labels.extend(batch["label"])
"""
total_auc += auc * len(batch['raw_data'])
total_accuracy += accuracy * len(batch['raw_data'])
"""
total_num += len(batch["raw_data"])
if save_predict_label:
for idx, sample in enumerate(batch["raw_data"]):
result.append(
{
"id": sample["id"],
"document1": "".join(sample["document1"]),
"document2": "".join(sample["document2"]),
"label": np.argmax(sample["label"]),
"predict": predict[idx],
}
)
if save_predict_label:
if result_dir is not None and result_prefix is not None:
result_file = os.path.join(result_dir, result_prefix + ".json")
self.logger.info("Write predict label to {}".format(result_file))
with open(result_file, "w") as fout:
fout.write("id\tdoc1\tdoc2\tpredict\tlabel\n")
for tmp in result:
fout.write(self._json_2_string(tmp) + "\n")
return (
total_loss / float(total_num),
total_accuracy / float(index),
predicts,
labels,
)
def predictiton(
self, batch_data, result_file, save_predict_label=False
):
"""
Evaluate the model with data
Args:
batch_data: iterable batch data
result_dir: the director to save the predict answers ,
answers will not save if None
result_prefix: the prefix of file for saving the predict answers,
answers will not save if None
save_predict_label: if True, the pred_answers will be added to raw sample and saved
character: use character feature
"""
if save_predict_label:
result = []
index = 0
predicts = []
for idx, batch in enumerate(batch_data):
feed_dict = {
self.document1: batch["document1_ids"],
self.document2: batch["document2_ids"],
self.document1_character: batch["document1_character_ids"],
self.document2_character: batch["document2_character_ids"],
}
predict = self.sess.run(
[self.predict], feed_dict
)
index += 1
predict = predict[0]
predicts.extend(predict.tolist())
if save_predict_label:
for idx, sample in enumerate(batch["raw_data"]):
result.append(
{
"id": sample["id"],
# "document1": "".join(sample["document1"]),
# "document2": "".join(sample["document2"]),
"predict": predict[idx],
}
)
if save_predict_label:
self.logger.info("Write predict label to {}".format(result_file))
with open(result_file, "w") as fout:
for tmp in result:
fout.write(self._json_2_string(tmp, predict=True) + "\n")
return predicts,
def save(self, model_dir, model_prefix):
"""
Saves the model into model_dir with model_prefix as the model indicator
"""
self.saver.save(self.sess, os.path.join(model_dir, model_prefix))
self.logger.info(
"Model saved in {}, with prefix {}.".format(model_dir, model_prefix)
)
def restore(self, model_dir, model_prefix):
"""
Restores the model into model_dir from model_prefix as the model indicator
"""
self.saver.restore(self.sess, os.path.join(model_dir, model_prefix))
self.logger.info(
"Model restored from {}, with prefix {}".format(model_dir, model_prefix)
)
def _json_2_string(self, json_obj, predict=False):
if predict:
s = json_obj['id'] + '\t' + str(json_obj['predict'])
else:
s = (
json_obj["id"]
+ "\t"
+ json_obj["document1"]
+ "\t"
+ json_obj["document2"]
+ "\t"
+ str(json_obj["predict"])
)
return s
| [
"logging.getLogger",
"tensorflow.local_variables_initializer",
"tensorflow.shape",
"numpy.array",
"tensorflow.cast",
"tensorflow.nn.embedding_lookup",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.concat",
"tensorflow.nn.softmax_cross_en... | [((1129, 1157), 'logging.getLogger', 'logging.getLogger', (['"""alibaba"""'], {}), "('alibaba')\n", (1146, 1157), False, 'import logging\n'), ((1285, 1301), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1299, 1301), True, 'import tensorflow as tf\n'), ((1374, 1404), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_config'}), '(config=sess_config)\n', (1384, 1404), True, 'import tensorflow as tf\n'), ((1456, 1472), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1470, 1472), True, 'import tensorflow as tf\n'), ((1668, 1679), 'time.time', 'time.time', ([], {}), '()\n', (1677, 1679), False, 'import time\n'), ((2174, 2234), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.args.max_document_len]'], {}), '(tf.int32, [None, self.args.max_document_len])\n', (2188, 2234), True, 'import tensorflow as tf\n'), ((2260, 2320), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.args.max_document_len]'], {}), '(tf.int32, [None, self.args.max_document_len])\n', (2274, 2320), True, 'import tensorflow as tf\n'), ((2356, 2445), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.args.max_document_len, self.args.max_word_len]'], {}), '(tf.int32, [None, self.args.max_document_len, self.args.\n max_word_len])\n', (2370, 2445), True, 'import tensorflow as tf\n'), ((2498, 2587), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.args.max_document_len, self.args.max_word_len]'], {}), '(tf.int32, [None, self.args.max_document_len, self.args.\n max_word_len])\n', (2512, 2587), True, 'import tensorflow as tf\n'), ((2626, 2661), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None, 2]'], {}), '(tf.int64, [None, 2])\n', (2640, 2661), True, 'import tensorflow as tf\n'), ((1527, 1560), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1558, 1560), True, 'import tensorflow as tf\n'), ((1584, 1616), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (1614, 1616), True, 'import tensorflow as tf\n'), ((2698, 2728), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding"""'], {}), "('embedding')\n", (2715, 2728), True, 'import tensorflow as tf\n'), ((4196, 4256), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.word_embeddings', 'self.document1'], {}), '(self.word_embeddings, self.document1)\n', (4218, 4256), True, 'import tensorflow as tf\n'), ((4320, 4380), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.word_embeddings', 'self.document2'], {}), '(self.word_embeddings, self.document2)\n', (4342, 4380), True, 'import tensorflow as tf\n'), ((7865, 7891), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""match"""'], {}), "('match')\n", (7882, 7891), True, 'import tensorflow as tf\n'), ((7920, 7986), 'tensorflow.concat', 'tf.concat', (['[self.document1_represent, self.document2_represent]', '(1)'], {}), '([self.document1_represent, self.document2_represent], 1)\n', (7929, 7986), True, 'import tensorflow as tf\n'), ((8042, 8121), 'tensorflow.contrib.layers.fully_connected', 'tc.layers.fully_connected', (['self.vector'], {'num_outputs': '(2)', 'activation_fn': 'tf.nn.tanh'}), '(self.vector, num_outputs=2, activation_fn=tf.nn.tanh)\n', (8067, 8121), True, 'import tensorflow.contrib as tc\n'), ((8716, 8744), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""predict"""'], {}), "('predict')\n", (8733, 8744), True, 'import tensorflow as tf\n'), ((8773, 8802), 'tensorflow.argmax', 'tf.argmax', (['self.score'], {'axis': '(1)'}), '(self.score, axis=1)\n', (8782, 8802), True, 'import tensorflow as tf\n'), ((8817, 8846), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (8834, 8846), True, 'import tensorflow as tf\n'), ((9104, 9129), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (9121, 9129), True, 'import tensorflow as tf\n'), ((9347, 9371), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (9369, 9371), True, 'import tensorflow as tf\n'), ((9715, 9765), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['self.args.learning_rate'], {}), '(self.args.learning_rate)\n', (9740, 9765), True, 'import tensorflow as tf\n'), ((20919, 20956), 'os.path.join', 'os.path.join', (['model_dir', 'model_prefix'], {}), '(model_dir, model_prefix)\n', (20931, 20956), False, 'import os\n'), ((21269, 21306), 'os.path.join', 'os.path.join', (['model_dir', 'model_prefix'], {}), '(model_dir, model_prefix)\n', (21281, 21306), False, 'import os\n'), ((2747, 2765), 'tensorflow.device', 'tf.device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (2756, 2765), True, 'import tensorflow as tf\n'), ((4489, 4564), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.character_embeddings', 'self.document1_character'], {}), '(self.character_embeddings, self.document1_character)\n', (4511, 4564), True, 'import tensorflow as tf\n'), ((4650, 4725), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.character_embeddings', 'self.document2_character'], {}), '(self.character_embeddings, self.document2_character)\n', (4672, 4725), True, 'import tensorflow as tf\n'), ((4811, 4922), 'tensorflow.reshape', 'tf.reshape', (['self.document1_character_emb', '[-1, self.args.max_word_len, self.args.character_embedding_size]'], {}), '(self.document1_character_emb, [-1, self.args.max_word_len, self.\n args.character_embedding_size])\n', (4821, 4922), True, 'import tensorflow as tf\n'), ((5024, 5135), 'tensorflow.reshape', 'tf.reshape', (['self.document2_character_emb', '[-1, self.args.max_word_len, self.args.character_embedding_size]'], {}), '(self.document2_character_emb, [-1, self.args.max_word_len, self.\n args.character_embedding_size])\n', (5034, 5135), True, 'import tensorflow as tf\n'), ((5573, 5620), 'tensorflow.reduce_max', 'tf.reduce_max', (['document1_character_conv'], {'axis': '(1)'}), '(document1_character_conv, axis=1)\n', (5586, 5620), True, 'import tensorflow as tf\n'), ((5702, 5800), 'tensorflow.reshape', 'tf.reshape', (['document1_character_conv', '[-1, self.args.max_document_len, self.args.hidden_size]'], {}), '(document1_character_conv, [-1, self.args.max_document_len, self.\n args.hidden_size])\n', (5712, 5800), True, 'import tensorflow as tf\n'), ((6238, 6285), 'tensorflow.reduce_max', 'tf.reduce_max', (['document2_character_conv'], {'axis': '(1)'}), '(document2_character_conv, axis=1)\n', (6251, 6285), True, 'import tensorflow as tf\n'), ((6367, 6465), 'tensorflow.reshape', 'tf.reshape', (['document2_character_conv', '[-1, self.args.max_document_len, self.args.hidden_size]'], {}), '(document2_character_conv, [-1, self.args.max_document_len, self.\n args.hidden_size])\n', (6377, 6465), True, 'import tensorflow as tf\n'), ((8905, 8929), 'tensorflow.argmax', 'tf.argmax', (['self.label', '(1)'], {}), '(self.label, 1)\n', (8914, 8929), True, 'import tensorflow as tf\n'), ((8991, 9028), 'tensorflow.cast', 'tf.cast', (['correct_predictions', '"""float"""'], {}), "(correct_predictions, 'float')\n", (8998, 9028), True, 'import tensorflow as tf\n'), ((9187, 9264), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'self.score', 'labels': 'self.label'}), '(logits=self.score, labels=self.label)\n', (9226, 9264), True, 'import tensorflow as tf\n'), ((9835, 9882), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.args.learning_rate'], {}), '(self.args.learning_rate)\n', (9857, 9882), True, 'import tensorflow as tf\n'), ((18209, 18258), 'os.path.join', 'os.path.join', (['result_dir', "(result_prefix + '.json')"], {}), "(result_dir, result_prefix + '.json')\n", (18221, 18258), False, 'import os\n'), ((1909, 1920), 'time.time', 'time.time', ([], {}), '()\n', (1918, 1920), False, 'import time\n'), ((6577, 6642), 'tensorflow.concat', 'tf.concat', (['[self.document1_emb, document1_character_conv]'], {'axis': '(2)'}), '([self.document1_emb, document1_character_conv], axis=2)\n', (6586, 6642), True, 'import tensorflow as tf\n'), ((6851, 6916), 'tensorflow.concat', 'tf.concat', (['[self.document2_emb, document2_character_conv]'], {'axis': '(2)'}), '([self.document2_emb, document2_character_conv], axis=2)\n', (6860, 6916), True, 'import tensorflow as tf\n'), ((9436, 9464), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""l2_loss"""'], {}), "('l2_loss')\n", (9453, 9464), True, 'import tensorflow as tf\n'), ((9953, 10003), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.args.learning_rate'], {}), '(self.args.learning_rate)\n', (9978, 10003), True, 'import tensorflow as tf\n'), ((1983, 1994), 'tensorflow.shape', 'tf.shape', (['v'], {}), '(v)\n', (1991, 1994), True, 'import tensorflow as tf\n'), ((10072, 10130), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.args.learning_rate'], {}), '(self.args.learning_rate)\n', (10105, 10130), True, 'import tensorflow as tf\n'), ((13556, 13581), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (13565, 13581), True, 'import numpy as np\n'), ((13583, 13601), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (13591, 13601), True, 'import numpy as np\n'), ((13795, 13820), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (13804, 13820), True, 'import numpy as np\n'), ((13822, 13840), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (13830, 13840), True, 'import numpy as np\n'), ((14046, 14071), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (14055, 14071), True, 'import numpy as np\n'), ((14073, 14091), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (14081, 14091), True, 'import numpy as np\n'), ((14487, 14512), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (14496, 14512), True, 'import numpy as np\n'), ((14514, 14532), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (14522, 14532), True, 'import numpy as np\n'), ((9506, 9522), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (9519, 9522), True, 'import tensorflow as tf\n'), ((17949, 17975), 'numpy.argmax', 'np.argmax', (["sample['label']"], {}), "(sample['label'])\n", (17958, 17975), True, 'import numpy as np\n'), ((14876, 14901), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (14885, 14901), True, 'import numpy as np\n'), ((14903, 14921), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (14911, 14921), True, 'import numpy as np\n'), ((15171, 15196), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (15180, 15196), True, 'import numpy as np\n'), ((15198, 15216), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (15206, 15216), True, 'import numpy as np\n'), ((15478, 15503), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (15487, 15503), True, 'import numpy as np\n'), ((15505, 15523), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (15513, 15523), True, 'import numpy as np\n')] |
import numpy as np
import unittest
from chainer import testing
from chainercv.links.model.ssd import resize_with_random_interpolation
try:
import cv2 # NOQA
optional_modules = True
except ImportError:
optional_modules = False
class TestResizeWithRandomInterpolation(unittest.TestCase):
def test_resize_color(self):
if not optional_modules:
return
img = np.random.uniform(size=(3, 24, 32))
out = resize_with_random_interpolation(img, size=(32, 64))
self.assertEqual(out.shape, (3, 32, 64))
def test_resize_grayscale(self):
if not optional_modules:
return
img = np.random.uniform(size=(1, 24, 32))
out = resize_with_random_interpolation(img, size=(32, 64))
self.assertEqual(out.shape, (1, 32, 64))
testing.run_module(__name__, __file__)
| [
"chainer.testing.run_module",
"chainercv.links.model.ssd.resize_with_random_interpolation",
"numpy.random.uniform"
] | [((813, 851), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (831, 851), False, 'from chainer import testing\n'), ((403, 438), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3, 24, 32)'}), '(size=(3, 24, 32))\n', (420, 438), True, 'import numpy as np\n'), ((453, 505), 'chainercv.links.model.ssd.resize_with_random_interpolation', 'resize_with_random_interpolation', (['img'], {'size': '(32, 64)'}), '(img, size=(32, 64))\n', (485, 505), False, 'from chainercv.links.model.ssd import resize_with_random_interpolation\n'), ((659, 694), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, 24, 32)'}), '(size=(1, 24, 32))\n', (676, 694), True, 'import numpy as np\n'), ((709, 761), 'chainercv.links.model.ssd.resize_with_random_interpolation', 'resize_with_random_interpolation', (['img'], {'size': '(32, 64)'}), '(img, size=(32, 64))\n', (741, 761), False, 'from chainercv.links.model.ssd import resize_with_random_interpolation\n')] |
# Should match the matlab version
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
A = np.array([[0.0, 1.0], [0.0, -10.0/100]])
B = np.array([[0.0], [1.0/100.0]])
C = np.array([1.0, 0.0])
D = np.array([0.0])
K = np.array([30.0, 70.0])
X = np.array([10, 0.0])
sys = signal.StateSpace(A-B*K, B, C, D)
t, y = signal.step(sys, X0=X)
plt.plot(t, y, 'b--', label="State Space")
plt.show()
| [
"scipy.signal.step",
"matplotlib.pyplot.plot",
"numpy.array",
"scipy.signal.StateSpace",
"matplotlib.pyplot.show"
] | [((115, 157), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, -10.0 / 100]]'], {}), '([[0.0, 1.0], [0.0, -10.0 / 100]])\n', (123, 157), True, 'import numpy as np\n'), ((160, 192), 'numpy.array', 'np.array', (['[[0.0], [1.0 / 100.0]]'], {}), '([[0.0], [1.0 / 100.0]])\n', (168, 192), True, 'import numpy as np\n'), ((195, 215), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (203, 215), True, 'import numpy as np\n'), ((220, 235), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (228, 235), True, 'import numpy as np\n'), ((240, 262), 'numpy.array', 'np.array', (['[30.0, 70.0]'], {}), '([30.0, 70.0])\n', (248, 262), True, 'import numpy as np\n'), ((267, 286), 'numpy.array', 'np.array', (['[10, 0.0]'], {}), '([10, 0.0])\n', (275, 286), True, 'import numpy as np\n'), ((294, 331), 'scipy.signal.StateSpace', 'signal.StateSpace', (['(A - B * K)', 'B', 'C', 'D'], {}), '(A - B * K, B, C, D)\n', (311, 331), False, 'from scipy import signal\n'), ((335, 357), 'scipy.signal.step', 'signal.step', (['sys'], {'X0': 'X'}), '(sys, X0=X)\n', (346, 357), False, 'from scipy import signal\n'), ((359, 401), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y', '"""b--"""'], {'label': '"""State Space"""'}), "(t, y, 'b--', label='State Space')\n", (367, 401), True, 'import matplotlib.pyplot as plt\n'), ((402, 412), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (410, 412), True, 'import matplotlib.pyplot as plt\n')] |
from pdb import set_trace as br
import csv
import numpy as np
ages=np.array([])
with open('input.txt') as f:
c = csv.reader(f, delimiter=' ',skipinitialspace=True)
for a in c:
ages=a[0].split(',')
for [i,a] in enumerate(ages):
ages[i] = int(a)
for t in range(0,80):
for [i,a] in enumerate(ages):
if a>0:
ages[i]-=1
continue
elif a==0:
ages[i]=6
ages.append(9)
continue
# print(t)
print(len(ages)) | [
"numpy.array",
"csv.reader"
] | [((68, 80), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (76, 80), True, 'import numpy as np\n'), ((118, 169), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""" """', 'skipinitialspace': '(True)'}), "(f, delimiter=' ', skipinitialspace=True)\n", (128, 169), False, 'import csv\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to load, preprocess and train on CIFAR-10."""
from absl import app
from absl import flags
from absl import logging
import functools
import os
import pickle
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from do_wide_and_deep_networks_learn_the_same_things.resnet_cifar import ResNet_CIFAR
from do_wide_and_deep_networks_learn_the_same_things.shake_shake import build_shake_shake_model
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
#Define training hyperparameters
flags.DEFINE_integer('batch_size', 128, 'Batch size')
flags.DEFINE_float('learning_rate', 0.01, 'Learning rate')
flags.DEFINE_integer('epochs', 300, 'Number of epochs to train for')
flags.DEFINE_float('weight_decay', 0.0001, 'L2 regularization')
#Define model & data hyperparameters
flags.DEFINE_integer('depth', 56, 'No. of layers to use in the ResNet model')
flags.DEFINE_integer(
'width_multiplier', 1,
'How much to scale the width of the standard ResNet model by')
flags.DEFINE_integer(
'copy', 0,
'If the same model configuration has been run before, train another copy with a different random initialization'
)
flags.DEFINE_string('base_dir', None,
'Where the trained model will be saved')
flags.DEFINE_string('data_path', '',
'Directory where CIFAR subsampled dataset is stored')
flags.DEFINE_string('dataset_name', 'cifar10',
'Name of dataset used (CIFAR-10 of CIFAR-100)')
flags.DEFINE_boolean('use_residual', True,
'Whether to include residual connections in the model')
flags.DEFINE_boolean('randomize_labels', False,
'Whether to randomize labels during training')
flags.DEFINE_string('pretrain_dir', '',
'Directory where the pretrained model is saved')
flags.DEFINE_boolean(
'partial_init', False,
'Whether to initialize only the first few layers with pretrained weights')
flags.DEFINE_boolean('shake_shake', False, 'Whether to use shake shake model')
flags.DEFINE_boolean('distort_color', False,
'Whether to apply color distortion augmentation')
flags.DEFINE_integer('epoch_save_freq', 0, 'Frequency at which ckpts are saved')
flags.DEFINE_boolean(
'save_image', False,
'Whether to save metadata of images used for each minibatch')
def find_stack_markers(model):
"""Finds the layers where a new stack starts."""
stack_markers = []
old_shape = None
for i, layer in enumerate(model.layers):
if i == 0:
continue
if 'conv' in layer.name:
conv_weights_shape = layer.get_weights()[0].shape
if conv_weights_shape[-1] != conv_weights_shape[-2] and conv_weights_shape[
0] != 1 and conv_weights_shape[-2] % 16 == 0:
stack_markers.append(i)
assert (len(stack_markers) == 2)
return stack_markers
def random_apply(transform_fn, image, p):
"""Randomly apply with probability p a transformation to an image"""
if tf.random.uniform([]) < p:
return transform_fn(image)
else:
return image
def color_distortion(image, s=1.0):
"""Color distortion data augmentation"""
# image is a tensor with value range in [0, 1].
# s is the strength of color distortion.
def color_jitter(x):
# one can also shuffle the order of following augmentations
# each time they are applied.
x = tf.image.random_brightness(x, max_delta=0.8 * s)
x = tf.image.random_contrast(x, lower=1 - 0.8 * s, upper=1 + 0.8 * s)
x = tf.image.random_saturation(x, lower=1 - 0.8 * s, upper=1 + 0.8 * s)
x = tf.image.random_hue(x, max_delta=0.2 * s)
x = tf.clip_by_value(x, 0, 1)
return x
def color_drop(x):
x = tf.image.rgb_to_grayscale(x)
x = tf.tile(x, [1, 1, 3])
return x
# randomly apply transformation with probability p.
image = random_apply(color_jitter, image, p=0.8)
image = random_apply(color_drop, image, p=0.2)
return image
def preprocess_data(image, label, is_training):
"""CIFAR data preprocessing"""
image = tf.image.convert_image_dtype(image, tf.float32)
if is_training:
crop_padding = 4
image = tf.pad(image, [[crop_padding, crop_padding],
[crop_padding, crop_padding], [0, 0]], 'REFLECT')
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
if FLAGS.distort_color:
image = color_distortion(image, s=1.0)
else:
image = tf.image.resize_with_crop_or_pad(image, 32, 32) # central crop
return image, label
def preprocess_data_with_id(data, is_training):
"""CIFAR data preprocessing when image ids are included in the data loader"""
image = data['image']
image = tf.image.convert_image_dtype(image, tf.float32)
if is_training:
crop_padding = 4
image = tf.pad(image, [[crop_padding, crop_padding],
[crop_padding, crop_padding], [0, 0]], 'REFLECT')
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
else:
image = tf.image.resize_with_crop_or_pad(image, 32, 32) # central crop
return data['id'], image, data['label']
def load_train_data(batch_size,
data_path='',
dataset_name='cifar10',
n_data=50000,
randomize_labels=False,
as_supervised=True):
"""Load CIFAR training data"""
if not data_path:
train_dataset = tfds.load(
name=dataset_name, split='train', as_supervised=as_supervised)
else:
if 'tiny' in data_path: # load about 1/16 of the data
train_dataset = tfds.load(
name=dataset_name, split='train[:6%]', as_supervised=as_supervised)
elif 'half' in data_path: # load half of the data
train_dataset = tfds.load(
name=dataset_name, split='train[:50%]', as_supervised=as_supervised)
else: # load 1/4 of the data
train_dataset = tfds.load(
name=dataset_name, split='train[:25%]', as_supervised=as_supervised)
if randomize_labels:
all_labels = []
all_images = []
for images, labels in train_dataset:
all_labels.extend([labels.numpy()])
all_images.append(images.numpy()[np.newaxis, :, :, :])
all_images = np.vstack(all_images)
np.random.seed(FLAGS.copy)
np.random.shuffle(all_labels)
train_dataset = tf.data.Dataset.from_tensor_slices(
(tf.convert_to_tensor(all_images, dtype=tf.float32),
tf.convert_to_tensor(all_labels, dtype=tf.int64)))
train_dataset = train_dataset.shuffle(buffer_size=n_data)
if as_supervised:
train_dataset = train_dataset.map(
functools.partial(preprocess_data, is_training=True))
else:
train_dataset = train_dataset.map(
functools.partial(preprocess_data_with_id, is_training=True))
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset
def load_test_data(batch_size,
shuffle=False,
data_path='',
dataset_name='cifar10',
n_data=10000,
as_supervised=True):
"""Load CIFAR test data"""
if 'random' in dataset_name:
np.random.seed(0)
test_labels = np.zeros((n_data,), dtype=np.int64)
test_data = np.random.rand(n_data, 32, 32, 3)
test_dataset = tf.data.Dataset.from_tensor_slices((test_data, test_labels))
else:
test_dataset = tfds.load(
name=dataset_name, split='test', as_supervised=as_supervised)
if as_supervised:
test_dataset = test_dataset.map(
functools.partial(preprocess_data, is_training=False))
else:
test_dataset = test_dataset.map(
functools.partial(preprocess_data_with_id, is_training=False))
if shuffle:
test_dataset = test_dataset.shuffle(buffer_size=n_data)
test_dataset = test_dataset.batch(batch_size, drop_remainder=False)
return test_dataset
def main(argv):
if FLAGS.data_path:
if 'tiny' in FLAGS.data_path:
n_data = int(50000 * 6 / 100)
elif 'half' in FLAGS.data_path:
n_data = 50000 // 2
elif 'subsampled' in FLAGS.data_path:
n_data = 50000 // 4
else:
n_data = 50000
train_dataset = load_train_data(
FLAGS.batch_size,
dataset_name=FLAGS.dataset_name,
n_data=n_data,
data_path=FLAGS.data_path,
randomize_labels=FLAGS.randomize_labels,
as_supervised=not FLAGS.save_image)
test_dataset = load_test_data(
FLAGS.batch_size, dataset_name=FLAGS.dataset_name, n_data=10000)
steps_per_epoch = n_data // FLAGS.batch_size
optimizer = tf.keras.optimizers.SGD(FLAGS.learning_rate, momentum=0.9)
schedule = tf.keras.experimental.CosineDecay(FLAGS.learning_rate,
FLAGS.epochs)
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(schedule)
if FLAGS.dataset_name == 'cifar100':
num_classes = 100
else:
num_classes = 10
if FLAGS.shake_shake:
model = build_shake_shake_model(num_classes, FLAGS.depth,
FLAGS.width_multiplier, FLAGS.weight_decay)
else:
model = ResNet_CIFAR(
FLAGS.depth,
FLAGS.width_multiplier,
FLAGS.weight_decay,
num_classes=num_classes,
use_residual=FLAGS.use_residual,
save_image=FLAGS.save_image)
if FLAGS.pretrain_dir:
pretrained_model = tf.keras.models.load_model(FLAGS.pretrain_dir)
n_layers = len(model.layers)
if FLAGS.partial_init:
stack_marker = find_stack_markers(pretrained_model)[0]
for i in range(
stack_marker
): # use pretrained weights for only layers from the first stage
model.layers[i].set_weights(pretrained_model.layers[i].get_weights())
else:
for i in range(
n_layers -
1): # use pretrained weights for all layers except the last
model.layers[i].set_weights(pretrained_model.layers[i].get_weights())
if FLAGS.save_image:
model.compile(
optimizer,
tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['acc'],
run_eagerly=True)
else:
model.compile(
optimizer,
tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['acc'])
model_dir = 'cifar-depth-%d-width-%d-bs-%d-lr-%f-reg-%f/' % \
(FLAGS.depth, FLAGS.width_multiplier, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.weight_decay)
experiment_dir = os.path.join(FLAGS.base_dir, model_dir)
if FLAGS.copy > 0:
experiment_dir = '%s/cifar-depth-%d-width-%d-bs-%d-lr-%f-reg-%f-copy-%d/' % \
(FLAGS.base_dir, FLAGS.depth, FLAGS.width_multiplier, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.weight_decay, FLAGS.copy)
if FLAGS.epoch_save_freq > 0:
tf.keras.models.save_model(
model, experiment_dir, overwrite=True,
include_optimizer=False) # Save initialization
checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=experiment_dir + 'weights.{epoch:02d}.ckpt',
monitor='val_acc',
verbose=1,
save_best_only=False,
save_freq='epoch',
period=FLAGS.epoch_save_freq)
else:
checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=experiment_dir,
monitor='val_acc',
verbose=1,
save_best_only=True)
hist = model.fit(
train_dataset,
batch_size=FLAGS.batch_size,
epochs=FLAGS.epochs,
validation_data=test_dataset,
verbose=1,
steps_per_epoch=steps_per_epoch,
callbacks=[checkpoint, lr_scheduler])
if FLAGS.save_image:
pickle.dump(model.all_ids,
tf.io.gfile.GFile(
os.path.join(experiment_dir, 'image_ids.pkl'), 'wb'))
best_model = tf.keras.models.load_model(experiment_dir)
best_model.compile(
'sgd',
tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['acc'])
test_metrics = best_model.evaluate(test_dataset, verbose=1)
logging.info('Test accuracy: %.4f', test_metrics[1])
if __name__ == '__main__':
app.run(main)
| [
"numpy.random.rand",
"tensorflow.compat.v2.keras.optimizers.SGD",
"absl.logging.info",
"tensorflow.compat.v2.image.random_saturation",
"tensorflow.compat.v2.keras.callbacks.LearningRateScheduler",
"absl.flags.DEFINE_float",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.keras.losses.S... | [((1046, 1069), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (1067, 1069), True, 'import tensorflow.compat.v2 as tf\n'), ((1124, 1177), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""', '(128)', '"""Batch size"""'], {}), "('batch_size', 128, 'Batch size')\n", (1144, 1177), False, 'from absl import flags\n'), ((1178, 1236), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(0.01)', '"""Learning rate"""'], {}), "('learning_rate', 0.01, 'Learning rate')\n", (1196, 1236), False, 'from absl import flags\n'), ((1237, 1305), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""epochs"""', '(300)', '"""Number of epochs to train for"""'], {}), "('epochs', 300, 'Number of epochs to train for')\n", (1257, 1305), False, 'from absl import flags\n'), ((1306, 1369), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""weight_decay"""', '(0.0001)', '"""L2 regularization"""'], {}), "('weight_decay', 0.0001, 'L2 regularization')\n", (1324, 1369), False, 'from absl import flags\n'), ((1407, 1484), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""depth"""', '(56)', '"""No. of layers to use in the ResNet model"""'], {}), "('depth', 56, 'No. of layers to use in the ResNet model')\n", (1427, 1484), False, 'from absl import flags\n'), ((1485, 1595), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""width_multiplier"""', '(1)', '"""How much to scale the width of the standard ResNet model by"""'], {}), "('width_multiplier', 1,\n 'How much to scale the width of the standard ResNet model by')\n", (1505, 1595), False, 'from absl import flags\n'), ((1601, 1755), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""copy"""', '(0)', '"""If the same model configuration has been run before, train another copy with a different random initialization"""'], {}), "('copy', 0,\n 'If the same model configuration has been run before, train another copy with a different random initialization'\n )\n", (1621, 1755), False, 'from absl import flags\n'), ((1757, 1835), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""base_dir"""', 'None', '"""Where the trained model will be saved"""'], {}), "('base_dir', None, 'Where the trained model will be saved')\n", (1776, 1835), False, 'from absl import flags\n'), ((1856, 1950), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_path"""', '""""""', '"""Directory where CIFAR subsampled dataset is stored"""'], {}), "('data_path', '',\n 'Directory where CIFAR subsampled dataset is stored')\n", (1875, 1950), False, 'from absl import flags\n'), ((1967, 2065), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset_name"""', '"""cifar10"""', '"""Name of dataset used (CIFAR-10 of CIFAR-100)"""'], {}), "('dataset_name', 'cifar10',\n 'Name of dataset used (CIFAR-10 of CIFAR-100)')\n", (1986, 2065), False, 'from absl import flags\n'), ((2082, 2184), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_residual"""', '(True)', '"""Whether to include residual connections in the model"""'], {}), "('use_residual', True,\n 'Whether to include residual connections in the model')\n", (2102, 2184), False, 'from absl import flags\n'), ((2202, 2300), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""randomize_labels"""', '(False)', '"""Whether to randomize labels during training"""'], {}), "('randomize_labels', False,\n 'Whether to randomize labels during training')\n", (2222, 2300), False, 'from absl import flags\n'), ((2318, 2410), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""pretrain_dir"""', '""""""', '"""Directory where the pretrained model is saved"""'], {}), "('pretrain_dir', '',\n 'Directory where the pretrained model is saved')\n", (2337, 2410), False, 'from absl import flags\n'), ((2427, 2549), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""partial_init"""', '(False)', '"""Whether to initialize only the first few layers with pretrained weights"""'], {}), "('partial_init', False,\n 'Whether to initialize only the first few layers with pretrained weights')\n", (2447, 2549), False, 'from absl import flags\n'), ((2555, 2633), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""shake_shake"""', '(False)', '"""Whether to use shake shake model"""'], {}), "('shake_shake', False, 'Whether to use shake shake model')\n", (2575, 2633), False, 'from absl import flags\n'), ((2634, 2732), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""distort_color"""', '(False)', '"""Whether to apply color distortion augmentation"""'], {}), "('distort_color', False,\n 'Whether to apply color distortion augmentation')\n", (2654, 2732), False, 'from absl import flags\n'), ((2750, 2835), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""epoch_save_freq"""', '(0)', '"""Frequency at which ckpts are saved"""'], {}), "('epoch_save_freq', 0, 'Frequency at which ckpts are saved'\n )\n", (2770, 2835), False, 'from absl import flags\n'), ((2831, 2938), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""save_image"""', '(False)', '"""Whether to save metadata of images used for each minibatch"""'], {}), "('save_image', False,\n 'Whether to save metadata of images used for each minibatch')\n", (2851, 2938), False, 'from absl import flags\n'), ((4621, 4668), 'tensorflow.compat.v2.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4649, 4668), True, 'import tensorflow.compat.v2 as tf\n'), ((5290, 5337), 'tensorflow.compat.v2.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (5318, 5337), True, 'import tensorflow.compat.v2 as tf\n'), ((9253, 9311), 'tensorflow.compat.v2.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['FLAGS.learning_rate'], {'momentum': '(0.9)'}), '(FLAGS.learning_rate, momentum=0.9)\n', (9276, 9311), True, 'import tensorflow.compat.v2 as tf\n'), ((9325, 9393), 'tensorflow.compat.v2.keras.experimental.CosineDecay', 'tf.keras.experimental.CosineDecay', (['FLAGS.learning_rate', 'FLAGS.epochs'], {}), '(FLAGS.learning_rate, FLAGS.epochs)\n', (9358, 9393), True, 'import tensorflow.compat.v2 as tf\n'), ((9458, 9508), 'tensorflow.compat.v2.keras.callbacks.LearningRateScheduler', 'tf.keras.callbacks.LearningRateScheduler', (['schedule'], {}), '(schedule)\n', (9498, 9508), True, 'import tensorflow.compat.v2 as tf\n'), ((11124, 11163), 'os.path.join', 'os.path.join', (['FLAGS.base_dir', 'model_dir'], {}), '(FLAGS.base_dir, model_dir)\n', (11136, 11163), False, 'import os\n'), ((12411, 12453), 'tensorflow.compat.v2.keras.models.load_model', 'tf.keras.models.load_model', (['experiment_dir'], {}), '(experiment_dir)\n', (12437, 12453), True, 'import tensorflow.compat.v2 as tf\n'), ((12649, 12701), 'absl.logging.info', 'logging.info', (['"""Test accuracy: %.4f"""', 'test_metrics[1]'], {}), "('Test accuracy: %.4f', test_metrics[1])\n", (12661, 12701), False, 'from absl import logging\n'), ((12733, 12746), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (12740, 12746), False, 'from absl import app\n'), ((3574, 3595), 'tensorflow.compat.v2.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (3591, 3595), True, 'import tensorflow.compat.v2 as tf\n'), ((3960, 4008), 'tensorflow.compat.v2.image.random_brightness', 'tf.image.random_brightness', (['x'], {'max_delta': '(0.8 * s)'}), '(x, max_delta=0.8 * s)\n', (3986, 4008), True, 'import tensorflow.compat.v2 as tf\n'), ((4017, 4082), 'tensorflow.compat.v2.image.random_contrast', 'tf.image.random_contrast', (['x'], {'lower': '(1 - 0.8 * s)', 'upper': '(1 + 0.8 * s)'}), '(x, lower=1 - 0.8 * s, upper=1 + 0.8 * s)\n', (4041, 4082), True, 'import tensorflow.compat.v2 as tf\n'), ((4091, 4158), 'tensorflow.compat.v2.image.random_saturation', 'tf.image.random_saturation', (['x'], {'lower': '(1 - 0.8 * s)', 'upper': '(1 + 0.8 * s)'}), '(x, lower=1 - 0.8 * s, upper=1 + 0.8 * s)\n', (4117, 4158), True, 'import tensorflow.compat.v2 as tf\n'), ((4167, 4208), 'tensorflow.compat.v2.image.random_hue', 'tf.image.random_hue', (['x'], {'max_delta': '(0.2 * s)'}), '(x, max_delta=0.2 * s)\n', (4186, 4208), True, 'import tensorflow.compat.v2 as tf\n'), ((4217, 4242), 'tensorflow.compat.v2.clip_by_value', 'tf.clip_by_value', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (4233, 4242), True, 'import tensorflow.compat.v2 as tf\n'), ((4286, 4314), 'tensorflow.compat.v2.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['x'], {}), '(x)\n', (4311, 4314), True, 'import tensorflow.compat.v2 as tf\n'), ((4323, 4344), 'tensorflow.compat.v2.tile', 'tf.tile', (['x', '[1, 1, 3]'], {}), '(x, [1, 1, 3])\n', (4330, 4344), True, 'import tensorflow.compat.v2 as tf\n'), ((4721, 4819), 'tensorflow.compat.v2.pad', 'tf.pad', (['image', '[[crop_padding, crop_padding], [crop_padding, crop_padding], [0, 0]]', '"""REFLECT"""'], {}), "(image, [[crop_padding, crop_padding], [crop_padding, crop_padding],\n [0, 0]], 'REFLECT')\n", (4727, 4819), True, 'import tensorflow.compat.v2 as tf\n'), ((4855, 4895), 'tensorflow.compat.v2.image.random_crop', 'tf.image.random_crop', (['image', '[32, 32, 3]'], {}), '(image, [32, 32, 3])\n', (4875, 4895), True, 'import tensorflow.compat.v2 as tf\n'), ((4908, 4946), 'tensorflow.compat.v2.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (4939, 4946), True, 'import tensorflow.compat.v2 as tf\n'), ((5040, 5087), 'tensorflow.compat.v2.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['image', '(32)', '(32)'], {}), '(image, 32, 32)\n', (5072, 5087), True, 'import tensorflow.compat.v2 as tf\n'), ((5390, 5488), 'tensorflow.compat.v2.pad', 'tf.pad', (['image', '[[crop_padding, crop_padding], [crop_padding, crop_padding], [0, 0]]', '"""REFLECT"""'], {}), "(image, [[crop_padding, crop_padding], [crop_padding, crop_padding],\n [0, 0]], 'REFLECT')\n", (5396, 5488), True, 'import tensorflow.compat.v2 as tf\n'), ((5524, 5564), 'tensorflow.compat.v2.image.random_crop', 'tf.image.random_crop', (['image', '[32, 32, 3]'], {}), '(image, [32, 32, 3])\n', (5544, 5564), True, 'import tensorflow.compat.v2 as tf\n'), ((5577, 5615), 'tensorflow.compat.v2.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (5608, 5615), True, 'import tensorflow.compat.v2 as tf\n'), ((5636, 5683), 'tensorflow.compat.v2.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['image', '(32)', '(32)'], {}), '(image, 32, 32)\n', (5668, 5683), True, 'import tensorflow.compat.v2 as tf\n'), ((6046, 6118), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': '"""train"""', 'as_supervised': 'as_supervised'}), "(name=dataset_name, split='train', as_supervised=as_supervised)\n", (6055, 6118), True, 'import tensorflow_datasets as tfds\n'), ((6844, 6865), 'numpy.vstack', 'np.vstack', (['all_images'], {}), '(all_images)\n', (6853, 6865), True, 'import numpy as np\n'), ((6870, 6896), 'numpy.random.seed', 'np.random.seed', (['FLAGS.copy'], {}), '(FLAGS.copy)\n', (6884, 6896), True, 'import numpy as np\n'), ((6901, 6930), 'numpy.random.shuffle', 'np.random.shuffle', (['all_labels'], {}), '(all_labels)\n', (6918, 6930), True, 'import numpy as np\n'), ((7853, 7870), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7867, 7870), True, 'import numpy as np\n'), ((7889, 7924), 'numpy.zeros', 'np.zeros', (['(n_data,)'], {'dtype': 'np.int64'}), '((n_data,), dtype=np.int64)\n', (7897, 7924), True, 'import numpy as np\n'), ((7941, 7974), 'numpy.random.rand', 'np.random.rand', (['n_data', '(32)', '(32)', '(3)'], {}), '(n_data, 32, 32, 3)\n', (7955, 7974), True, 'import numpy as np\n'), ((7994, 8054), 'tensorflow.compat.v2.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(test_data, test_labels)'], {}), '((test_data, test_labels))\n', (8028, 8054), True, 'import tensorflow.compat.v2 as tf\n'), ((8082, 8153), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': '"""test"""', 'as_supervised': 'as_supervised'}), "(name=dataset_name, split='test', as_supervised=as_supervised)\n", (8091, 8153), True, 'import tensorflow_datasets as tfds\n'), ((9637, 9734), 'do_wide_and_deep_networks_learn_the_same_things.shake_shake.build_shake_shake_model', 'build_shake_shake_model', (['num_classes', 'FLAGS.depth', 'FLAGS.width_multiplier', 'FLAGS.weight_decay'], {}), '(num_classes, FLAGS.depth, FLAGS.width_multiplier,\n FLAGS.weight_decay)\n', (9660, 9734), False, 'from do_wide_and_deep_networks_learn_the_same_things.shake_shake import build_shake_shake_model\n'), ((9787, 9952), 'do_wide_and_deep_networks_learn_the_same_things.resnet_cifar.ResNet_CIFAR', 'ResNet_CIFAR', (['FLAGS.depth', 'FLAGS.width_multiplier', 'FLAGS.weight_decay'], {'num_classes': 'num_classes', 'use_residual': 'FLAGS.use_residual', 'save_image': 'FLAGS.save_image'}), '(FLAGS.depth, FLAGS.width_multiplier, FLAGS.weight_decay,\n num_classes=num_classes, use_residual=FLAGS.use_residual, save_image=\n FLAGS.save_image)\n', (9799, 9952), False, 'from do_wide_and_deep_networks_learn_the_same_things.resnet_cifar import ResNet_CIFAR\n'), ((10042, 10088), 'tensorflow.compat.v2.keras.models.load_model', 'tf.keras.models.load_model', (['FLAGS.pretrain_dir'], {}), '(FLAGS.pretrain_dir)\n', (10068, 10088), True, 'import tensorflow.compat.v2 as tf\n'), ((11435, 11529), 'tensorflow.compat.v2.keras.models.save_model', 'tf.keras.models.save_model', (['model', 'experiment_dir'], {'overwrite': '(True)', 'include_optimizer': '(False)'}), '(model, experiment_dir, overwrite=True,\n include_optimizer=False)\n', (11461, 11529), True, 'import tensorflow.compat.v2 as tf\n'), ((11583, 11780), 'tensorflow.compat.v2.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': "(experiment_dir + 'weights.{epoch:02d}.ckpt')", 'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(False)', 'save_freq': '"""epoch"""', 'period': 'FLAGS.epoch_save_freq'}), "(filepath=experiment_dir +\n 'weights.{epoch:02d}.ckpt', monitor='val_acc', verbose=1,\n save_best_only=False, save_freq='epoch', period=FLAGS.epoch_save_freq)\n", (11617, 11780), True, 'import tensorflow.compat.v2 as tf\n'), ((11847, 11962), 'tensorflow.compat.v2.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'experiment_dir', 'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=experiment_dir, monitor=\n 'val_acc', verbose=1, save_best_only=True)\n", (11881, 11962), True, 'import tensorflow.compat.v2 as tf\n'), ((12495, 12558), 'tensorflow.compat.v2.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (12540, 12558), True, 'import tensorflow.compat.v2 as tf\n'), ((6217, 6294), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': '"""train[:6%]"""', 'as_supervised': 'as_supervised'}), "(name=dataset_name, split='train[:6%]', as_supervised=as_supervised)\n", (6226, 6294), True, 'import tensorflow_datasets as tfds\n'), ((7236, 7288), 'functools.partial', 'functools.partial', (['preprocess_data'], {'is_training': '(True)'}), '(preprocess_data, is_training=True)\n', (7253, 7288), False, 'import functools\n'), ((7345, 7405), 'functools.partial', 'functools.partial', (['preprocess_data_with_id'], {'is_training': '(True)'}), '(preprocess_data_with_id, is_training=True)\n', (7362, 7405), False, 'import functools\n'), ((10677, 10740), 'tensorflow.compat.v2.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (10722, 10740), True, 'import tensorflow.compat.v2 as tf\n'), ((10847, 10910), 'tensorflow.compat.v2.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (10892, 10910), True, 'import tensorflow.compat.v2 as tf\n'), ((6383, 6461), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': '"""train[:50%]"""', 'as_supervised': 'as_supervised'}), "(name=dataset_name, split='train[:50%]', as_supervised=as_supervised)\n", (6392, 6461), True, 'import tensorflow_datasets as tfds\n'), ((6529, 6607), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': '"""train[:25%]"""', 'as_supervised': 'as_supervised'}), "(name=dataset_name, split='train[:25%]', as_supervised=as_supervised)\n", (6538, 6607), True, 'import tensorflow_datasets as tfds\n'), ((6996, 7046), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['all_images'], {'dtype': 'tf.float32'}), '(all_images, dtype=tf.float32)\n', (7016, 7046), True, 'import tensorflow.compat.v2 as tf\n'), ((7057, 7105), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['all_labels'], {'dtype': 'tf.int64'}), '(all_labels, dtype=tf.int64)\n', (7077, 7105), True, 'import tensorflow.compat.v2 as tf\n'), ((8234, 8287), 'functools.partial', 'functools.partial', (['preprocess_data'], {'is_training': '(False)'}), '(preprocess_data, is_training=False)\n', (8251, 8287), False, 'import functools\n'), ((8348, 8409), 'functools.partial', 'functools.partial', (['preprocess_data_with_id'], {'is_training': '(False)'}), '(preprocess_data_with_id, is_training=False)\n', (8365, 8409), False, 'import functools\n'), ((12341, 12386), 'os.path.join', 'os.path.join', (['experiment_dir', '"""image_ids.pkl"""'], {}), "(experiment_dir, 'image_ids.pkl')\n", (12353, 12386), False, 'import os\n')] |
from typing import Union, Sequence
import numpy as np
from dpipe.itertools import lmap
from ..checks import join
AxesLike = Union[int, Sequence[int]]
AxesParams = Union[float, Sequence[float]]
def fill_by_indices(target, values, indices):
"""Replace the values in ``target`` located at ``indices`` by the ones from ``values``."""
indices = expand_axes(indices, values)
target = np.array(target)
target[list(indices)] = values
return tuple(target)
def broadcast_to_axes(axes: Union[AxesLike, None], *arrays: AxesParams):
"""Broadcast ``arrays`` to the length of ``axes``. Axes are inferred from the arrays if necessary."""
if not arrays:
raise ValueError('No arrays provided.')
arrays = lmap(np.atleast_1d, arrays)
lengths = lmap(len, arrays)
if axes is None:
axes = list(range(-max(lengths), 0))
axes = check_axes(axes)
if not all(len(axes) == x or x == 1 for x in lengths):
raise ValueError(f'Axes and arrays are not broadcastable: {len(axes)} vs {join(lengths)}.')
arrays = [np.repeat(x, len(axes) // len(x), 0) for x in arrays]
return (axes, *arrays)
def check_axes(axes) -> tuple:
axes = np.atleast_1d(axes)
if axes.ndim != 1:
raise ValueError(f'Axes must be 1D, but {axes.ndim}D provided.')
if not np.issubdtype(axes.dtype, np.integer):
raise ValueError(f'Axes must be integer, but {axes.dtype} provided.')
axes = tuple(axes)
if len(axes) != len(set(axes)):
raise ValueError(f'Axes contain duplicates: {axes}.')
return axes
def expand_axes(axes, values) -> tuple:
return broadcast_to_axes(axes, values)[0]
def ndim2spatial_axes(ndim):
"""
>>> ndim2spatial_axes(3)
(-3, -2, -1)
>>> ndim2spatial_axes(1)
(-1,)
"""
return tuple(range(-ndim, 0))
| [
"numpy.issubdtype",
"numpy.array",
"dpipe.itertools.lmap",
"numpy.atleast_1d"
] | [((395, 411), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (403, 411), True, 'import numpy as np\n'), ((734, 761), 'dpipe.itertools.lmap', 'lmap', (['np.atleast_1d', 'arrays'], {}), '(np.atleast_1d, arrays)\n', (738, 761), False, 'from dpipe.itertools import lmap\n'), ((776, 793), 'dpipe.itertools.lmap', 'lmap', (['len', 'arrays'], {}), '(len, arrays)\n', (780, 793), False, 'from dpipe.itertools import lmap\n'), ((1188, 1207), 'numpy.atleast_1d', 'np.atleast_1d', (['axes'], {}), '(axes)\n', (1201, 1207), True, 'import numpy as np\n'), ((1315, 1352), 'numpy.issubdtype', 'np.issubdtype', (['axes.dtype', 'np.integer'], {}), '(axes.dtype, np.integer)\n', (1328, 1352), True, 'import numpy as np\n')] |
import numpy as np
class Frame(object):
def __init__(self, resolution=(0,0)):
self.timestamp = 0
self._resolution = resolution
self.data = self.createData()
def createData(self, scale = 1):
return np.empty((self._resolution[0], self._resolution[1], scale), dtype=np.uint8)
def scale3(self, output):
output.timestamp = self.timestamp
if not output.data.size == self.data.size * 3:
output.data = self.createData(3)
output.data[:,:,0] = self.data[:,:,0]
output.data[:,:,1] = self.data[:,:,0]
output.data[:,:,2] = self.data[:,:,0]
def split(self, left, right):
temp_data = self.data.reshape((160,320,1))
left.data, right.data = np.hsplit(temp_data, 2)
left.timestamp = self.timestamp
right.timestamp = self.timestamp | [
"numpy.hsplit",
"numpy.empty"
] | [((209, 284), 'numpy.empty', 'np.empty', (['(self._resolution[0], self._resolution[1], scale)'], {'dtype': 'np.uint8'}), '((self._resolution[0], self._resolution[1], scale), dtype=np.uint8)\n', (217, 284), True, 'import numpy as np\n'), ((657, 680), 'numpy.hsplit', 'np.hsplit', (['temp_data', '(2)'], {}), '(temp_data, 2)\n', (666, 680), True, 'import numpy as np\n')] |
"""fif convertors."""
from ephypype.aux_tools import nostdout
def ep2ts(fif_file):
"""Read fif file with raw data or epochs and save timeseries to .npy."""
from mne import read_epochs
from numpy import save
import os.path as op
with nostdout():
epochs = read_epochs(fif_file)
epochs_meg = epochs.pick_types(meg=True, eeg=False, eog=False, ecg=False)
data = epochs_meg.get_data()
save_path = op.abspath('ts_epochs.npy')
save(save_path, data)
return save_path
| [
"os.path.abspath",
"ephypype.aux_tools.nostdout",
"mne.read_epochs",
"numpy.save"
] | [((438, 465), 'os.path.abspath', 'op.abspath', (['"""ts_epochs.npy"""'], {}), "('ts_epochs.npy')\n", (448, 465), True, 'import os.path as op\n'), ((470, 491), 'numpy.save', 'save', (['save_path', 'data'], {}), '(save_path, data)\n', (474, 491), False, 'from numpy import save\n'), ((258, 268), 'ephypype.aux_tools.nostdout', 'nostdout', ([], {}), '()\n', (266, 268), False, 'from ephypype.aux_tools import nostdout\n'), ((287, 308), 'mne.read_epochs', 'read_epochs', (['fif_file'], {}), '(fif_file)\n', (298, 308), False, 'from mne import read_epochs\n')] |
import time
import requests
import argparse
import urllib3
import progressbar
from string import Template
import numpy as np
import json
from itertools import combinations
from bitstring import BitArray
urllib3.disable_warnings()
pb_widgets = [progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]
es_index_tpl_str = """{
"settings": {
"number_of_shards": 5
},
"mappings": {
"data": {
"properties": $properties
}
}
}
"""
def binstr2uint(s):
"""
Convert binary string to unsigned integer
:param s: string
:return: unsigned int
"""
b = BitArray(bin=s)
return b.uint
def gen_masks(l=16, d=2):
"""
Generate mask for binary strings of length l
:param l: length of binary string
:param d: hamming distance
:return: list of masks as binary strings
"""
combs = []
ids = range(l)
for r in range(1, d + 1):
combs += list(combinations(ids, r))
masks = np.zeros((len(combs), l), dtype=np.int)
for i, c in enumerate(combs):
masks[i, c] = 1
masks_str = [(np.uint16)(binstr2uint("0" * l))] + [(np.uint16)(binstr2uint("".join(m))) for m in masks.astype(str)]
return masks_str
def get_nbs(q, masks):
"""
Compute neighbors by applying masks to query
:param q: query string
:param masks: list of binary strings
:return: list of neighbors as binary strings
"""
return np.bitwise_xor(q, masks, dtype=int)
def es_drop_and_create_index():
# Drop index
requests.delete(es_url + "/" + es_index, verify=False)
# Create index
s = Template(es_index_tpl_str)
s = s.substitute(properties=json.dumps(el_fields))
requests.put(es_url + "/" + es_index, s, headers={'Content-Type': 'application/json'})
# No read-only
requests.put(es_url + "/" + es_index + "/_settings",
"""{"index": {"blocks": {"read_only_allow_delete": "false"}}}""",
headers={'Content-Type': 'application/json'})
def es_add_batch_to_index(batch):
s = ""
for id in batch:
code_dict = {"nbs": get_nbs(id, masks).tolist()}
s += """{ "index": { "_id":"%s", "_index":"%s", "_type" : "data" } }
""" % (id, es_index,)
s += json.dumps(code_dict).replace('\n', ' ') + "\n" # Needs to be 1 line for ES bulk api!
r = requests.post(es_url + "/" + es_index + "/_bulk", s, headers={"Content-Type": "application/x-ndjson"})
jr = json.loads(r.text)
if "error" in jr:
print (jr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create ES index with all possible d1 and d2 neighbors for 16 bit codes')
parser.add_argument(
'--es_url',
default="http://elasticsearch:9200",
type=str,
help='Elastic Search URL with port (default: http://elasticsearch:9200)'
)
args = parser.parse_args()
# Try: int
el_fields = {
"nbs": {"type": "keyword"}, #, "eager_global_ordinals": True, "index_options:": "offsets"},
}
sc_len = 16
d = 2
num_ids = 2 ** sc_len
ids = range(num_ids)
es_index = "nbs"
es_url = args.es_url
es_drop_and_create_index()
print("""
-------------------------------------------------------
Entries: %d
-------------------------------------------------------
""" % (num_ids,))
num_lines_per_request = 500
masks = gen_masks(sc_len, d)
s = time.time()
print("Processing ids...")
bar = progressbar.ProgressBar(maxval=num_ids, \
widgets=pb_widgets)
bar.start()
for start in range(0, num_ids, num_lines_per_request):
bar.update(start)
end = start + num_lines_per_request
end = end if end <= num_ids else num_ids
batch = ids[start:end]
es_add_batch_to_index(batch)
bar.finish()
duration = time.time() - s
print("""
-------------------------------------------------------
Total time: %0.2fs for %d entries
Time per entry: %0.4fs
""" % (duration, num_ids, duration / num_ids))
| [
"progressbar.Bar",
"json.loads",
"requests.post",
"string.Template",
"argparse.ArgumentParser",
"numpy.bitwise_xor",
"json.dumps",
"requests.delete",
"itertools.combinations",
"urllib3.disable_warnings",
"progressbar.Percentage",
"bitstring.BitArray",
"requests.put",
"time.time",
"progre... | [((204, 230), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (228, 230), False, 'import urllib3\n'), ((246, 276), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (261, 276), False, 'import progressbar\n'), ((283, 307), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (305, 307), False, 'import progressbar\n'), ((597, 612), 'bitstring.BitArray', 'BitArray', ([], {'bin': 's'}), '(bin=s)\n', (605, 612), False, 'from bitstring import BitArray\n'), ((1414, 1449), 'numpy.bitwise_xor', 'np.bitwise_xor', (['q', 'masks'], {'dtype': 'int'}), '(q, masks, dtype=int)\n', (1428, 1449), True, 'import numpy as np\n'), ((1504, 1558), 'requests.delete', 'requests.delete', (["(es_url + '/' + es_index)"], {'verify': '(False)'}), "(es_url + '/' + es_index, verify=False)\n", (1519, 1558), False, 'import requests\n'), ((1586, 1612), 'string.Template', 'Template', (['es_index_tpl_str'], {}), '(es_index_tpl_str)\n', (1594, 1612), False, 'from string import Template\n'), ((1672, 1762), 'requests.put', 'requests.put', (["(es_url + '/' + es_index)", 's'], {'headers': "{'Content-Type': 'application/json'}"}), "(es_url + '/' + es_index, s, headers={'Content-Type':\n 'application/json'})\n", (1684, 1762), False, 'import requests\n'), ((1782, 1951), 'requests.put', 'requests.put', (["(es_url + '/' + es_index + '/_settings')", '"""{"index": {"blocks": {"read_only_allow_delete": "false"}}}"""'], {'headers': "{'Content-Type': 'application/json'}"}), '(es_url + \'/\' + es_index + \'/_settings\',\n \'{"index": {"blocks": {"read_only_allow_delete": "false"}}}\', headers={\n \'Content-Type\': \'application/json\'})\n', (1794, 1951), False, 'import requests\n'), ((2318, 2425), 'requests.post', 'requests.post', (["(es_url + '/' + es_index + '/_bulk')", 's'], {'headers': "{'Content-Type': 'application/x-ndjson'}"}), "(es_url + '/' + es_index + '/_bulk', s, headers={\n 'Content-Type': 'application/x-ndjson'})\n", (2331, 2425), False, 'import requests\n'), ((2430, 2448), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (2440, 2448), False, 'import json\n'), ((2541, 2655), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create ES index with all possible d1 and d2 neighbors for 16 bit codes"""'}), "(description=\n 'Create ES index with all possible d1 and d2 neighbors for 16 bit codes')\n", (2564, 2655), False, 'import argparse\n'), ((3436, 3447), 'time.time', 'time.time', ([], {}), '()\n', (3445, 3447), False, 'import time\n'), ((3491, 3550), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'maxval': 'num_ids', 'widgets': 'pb_widgets'}), '(maxval=num_ids, widgets=pb_widgets)\n', (3514, 3550), False, 'import progressbar\n'), ((3885, 3896), 'time.time', 'time.time', ([], {}), '()\n', (3894, 3896), False, 'import time\n'), ((923, 943), 'itertools.combinations', 'combinations', (['ids', 'r'], {}), '(ids, r)\n', (935, 943), False, 'from itertools import combinations\n'), ((1645, 1666), 'json.dumps', 'json.dumps', (['el_fields'], {}), '(el_fields)\n', (1655, 1666), False, 'import json\n'), ((2223, 2244), 'json.dumps', 'json.dumps', (['code_dict'], {}), '(code_dict)\n', (2233, 2244), False, 'import json\n')] |
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from clean_data import dist
if __name__ == '__main__':
anchors = [
[0, 0, 1300],
[5000, 0, 1700],
[0, 5000, 1700],
[5000, 5000, 1300]
]
C = '正常'
# C = '异常'
files = range(324) # range(324)
tags = np.loadtxt('tags.txt', dtype=np.int)
errs = [[] for i in range(4)]
measure_err = []
noise_err = []
for n in tqdm(files):
mat = np.loadtxt(os.path.join(f"{C}数据_clean", f"{n+1}.{C}.txt"), dtype=np.int)
label = np.loadtxt(os.path.join(f"{C}数据_clean", f"{n+1}.{C}.label.txt"), dtype=np.int)
for i in range(len(mat)):
for j in range(4):
errs[j].append(mat[i][j] - dist(tags[n], anchors[j]))
if j+1 == label[i]:
noise_err.append(mat[i][j] + 45 - dist(tags[n], anchors[j]))
else:
measure_err.append(mat[i][j] - dist(tags[n], anchors[j]))
errst = np.array(errs).T
print(f"measure err: mean {np.mean(measure_err):.2f} std {np.std(measure_err):.2f}")
if C == '异常':
print(f"noise err: mean {np.mean(noise_err):.2f} std {np.std(noise_err):.2f}")
plt.figure()
for j in range(4):
ax = plt.subplot(2, 2, j+1)
ax.hist(errs[j], bins=20)
plt.savefig(f"{C}_noise_hist.svg", format='svg', dpi=300, transparent=True, bbox_inches='tight')
plt.show()
covar = np.corrcoef(np.array(errs))
plt.figure()
cmap = sns.light_palette((260, 75, 60), input="husl")
sns.heatmap(covar, annot=True, cmap=cmap, cbar=True)
plt.savefig(f"{C}_cov.svg", format='svg', dpi=300, transparent=True, bbox_inches='tight')
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"seaborn.light_palette",
"tqdm.tqdm",
"os.path.join",
"seaborn.heatmap",
"numpy.array",
"matplotlib.pyplot.figure",
"clean_data.dist",
"numpy.std",
"numpy.loadtxt",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((360, 396), 'numpy.loadtxt', 'np.loadtxt', (['"""tags.txt"""'], {'dtype': 'np.int'}), "('tags.txt', dtype=np.int)\n", (370, 396), True, 'import numpy as np\n'), ((484, 495), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (488, 495), False, 'from tqdm import tqdm\n'), ((1259, 1271), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1269, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1469), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{C}_noise_hist.svg"""'], {'format': '"""svg"""', 'dpi': '(300)', 'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(f'{C}_noise_hist.svg', format='svg', dpi=300, transparent=True,\n bbox_inches='tight')\n", (1380, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1480), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1478, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1526, 1538), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1536, 1538), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1596), 'seaborn.light_palette', 'sns.light_palette', (['(260, 75, 60)'], {'input': '"""husl"""'}), "((260, 75, 60), input='husl')\n", (1567, 1596), True, 'import seaborn as sns\n'), ((1601, 1653), 'seaborn.heatmap', 'sns.heatmap', (['covar'], {'annot': '(True)', 'cmap': 'cmap', 'cbar': '(True)'}), '(covar, annot=True, cmap=cmap, cbar=True)\n', (1612, 1653), True, 'import seaborn as sns\n'), ((1658, 1751), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{C}_cov.svg"""'], {'format': '"""svg"""', 'dpi': '(300)', 'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(f'{C}_cov.svg', format='svg', dpi=300, transparent=True,\n bbox_inches='tight')\n", (1669, 1751), True, 'import matplotlib.pyplot as plt\n'), ((1752, 1762), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1760, 1762), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1057), 'numpy.array', 'np.array', (['errs'], {}), '(errs)\n', (1051, 1057), True, 'import numpy as np\n'), ((1308, 1332), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(j + 1)'], {}), '(2, 2, j + 1)\n', (1319, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1520), 'numpy.array', 'np.array', (['errs'], {}), '(errs)\n', (1514, 1520), True, 'import numpy as np\n'), ((522, 570), 'os.path.join', 'os.path.join', (['f"""{C}数据_clean"""', 'f"""{n + 1}.{C}.txt"""'], {}), "(f'{C}数据_clean', f'{n + 1}.{C}.txt')\n", (534, 570), False, 'import os\n'), ((611, 665), 'os.path.join', 'os.path.join', (['f"""{C}数据_clean"""', 'f"""{n + 1}.{C}.label.txt"""'], {}), "(f'{C}数据_clean', f'{n + 1}.{C}.label.txt')\n", (623, 665), False, 'import os\n'), ((1091, 1111), 'numpy.mean', 'np.mean', (['measure_err'], {}), '(measure_err)\n', (1098, 1111), True, 'import numpy as np\n'), ((1122, 1141), 'numpy.std', 'np.std', (['measure_err'], {}), '(measure_err)\n', (1128, 1141), True, 'import numpy as np\n'), ((1200, 1218), 'numpy.mean', 'np.mean', (['noise_err'], {}), '(noise_err)\n', (1207, 1218), True, 'import numpy as np\n'), ((1229, 1246), 'numpy.std', 'np.std', (['noise_err'], {}), '(noise_err)\n', (1235, 1246), True, 'import numpy as np\n'), ((787, 812), 'clean_data.dist', 'dist', (['tags[n]', 'anchors[j]'], {}), '(tags[n], anchors[j])\n', (791, 812), False, 'from clean_data import dist\n'), ((904, 929), 'clean_data.dist', 'dist', (['tags[n]', 'anchors[j]'], {}), '(tags[n], anchors[j])\n', (908, 929), False, 'from clean_data import dist\n'), ((1004, 1029), 'clean_data.dist', 'dist', (['tags[n]', 'anchors[j]'], {}), '(tags[n], anchors[j])\n', (1008, 1029), False, 'from clean_data import dist\n')] |
from PyQt5 import QtCore, QtGui, QtWidgets
from dialogError import Ui_DialogError
import yfinance as yf
import matplotlib as mpl
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import pandas as pd
import pandas_datareader as pdr
import numpy as np
import chart_studio.plotly as py
from datetime import datetime
from datetime import date
import pyfolio as pf
from pyfolio import plotting as pa
import icons
class Ui_TabWidgetPortfolio(object):
def openError(self):
self.dialog=QtWidgets.QDialog()
self.uiError=Ui_DialogError()
self.uiError.setupUi(self.dialog)
self.dialog.show()
def setupUi(self, TabWidgetPortfolio):
TabWidgetPortfolio.setObjectName("TabWidgetPortfolio")
TabWidgetPortfolio.resize(1124, 854)
TabWidgetPortfolio.setMinimumSize(QtCore.QSize(1124, 854))
TabWidgetPortfolio.setMaximumSize(QtCore.QSize(1124, 854))
font = QtGui.QFont()
font.setPointSize(18)
TabWidgetPortfolio.setFont(font)
TabWidgetPortfolio.setTabPosition(QtWidgets.QTabWidget.North)
TabWidgetPortfolio.setIconSize(QtCore.QSize(26, 26))
TabWidgetPortfolio.setElideMode(QtCore.Qt.ElideNone)
self.tab_portfolio = QtWidgets.QWidget()
self.tab_portfolio.setObjectName("tab_portfolio")
self.listWidget = QtWidgets.QListWidget(self.tab_portfolio)
self.listWidget.setGeometry(QtCore.QRect(80, 210, 241, 341))
self.listWidget.setStyleSheet("background-color: rgb(255, 255, 255);")
self.listWidget.setFrameShadow(QtWidgets.QFrame.Sunken)
self.listWidget.setObjectName("listWidget")
self.comboBoxSymbol = QtWidgets.QComboBox(self.tab_portfolio)
self.comboBoxSymbol.setGeometry(QtCore.QRect(80, 130, 241, 51))
self.comboBoxSymbol.setStyleSheet("font: 16pt \"MS Shell Dlg 2\";")
self.comboBoxSymbol.setEditable(True)
self.comboBoxSymbol.setObjectName("comboBoxSymbol")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.labelPeriod = QtWidgets.QLabel(self.tab_portfolio)
self.labelPeriod.setGeometry(QtCore.QRect(610, 130, 111, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(18)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelPeriod.setFont(font)
self.labelPeriod.setStyleSheet("")
self.labelPeriod.setObjectName("labelPeriod")
self.labelDateRange = QtWidgets.QLabel(self.tab_portfolio)
self.labelDateRange.setGeometry(QtCore.QRect(640, 180, 171, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelDateRange.setFont(font)
self.labelDateRange.setStyleSheet("")
self.labelDateRange.setObjectName("labelDateRange")
self.label_3 = QtWidgets.QLabel(self.tab_portfolio)
self.label_3.setGeometry(QtCore.QRect(640, 220, 81, 41))
font = QtGui.QFont()
font.setPointSize(15)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.dateStart = QtWidgets.QDateEdit(self.tab_portfolio)
self.dateStart.setGeometry(QtCore.QRect(640, 260, 181, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.dateStart.setFont(font)
self.dateStart.setStyleSheet("background-color: rgb(216, 216, 216);")
self.dateStart.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateStart.setDate(QtCore.QDate(2019, 1, 1))
self.dateStart.setObjectName("dateStart")
self.labelParameter = QtWidgets.QLabel(self.tab_portfolio)
self.labelParameter.setGeometry(QtCore.QRect(930, 180, 161, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelParameter.setFont(font)
self.labelParameter.setStyleSheet("")
self.labelParameter.setObjectName("labelParameter")
self.comboBoxPeriod = QtWidgets.QComboBox(self.tab_portfolio)
self.comboBoxPeriod.setGeometry(QtCore.QRect(930, 220, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxPeriod.setFont(font)
self.comboBoxPeriod.setStyleSheet("background-color: rgb(216, 216, 216);")
self.comboBoxPeriod.setObjectName("comboBoxPeriod")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.labelInterval = QtWidgets.QLabel(self.tab_portfolio)
self.labelInterval.setGeometry(QtCore.QRect(620, 400, 141, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(18)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelInterval.setFont(font)
self.labelInterval.setStyleSheet("")
self.labelInterval.setObjectName("labelInterval")
self.comboBoxInterval = QtWidgets.QComboBox(self.tab_portfolio)
self.comboBoxInterval.setGeometry(QtCore.QRect(640, 450, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxInterval.setFont(font)
self.comboBoxInterval.setStyleSheet("background-color: rgb(216, 216, 216);")
self.comboBoxInterval.setObjectName("comboBoxInterval")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.radioButtonRange = QtWidgets.QRadioButton(self.tab_portfolio)
self.radioButtonRange.setGeometry(QtCore.QRect(610, 180, 21, 20))
self.radioButtonRange.setText("")
self.radioButtonRange.setChecked(True)
self.radioButtonRange.setObjectName("radioButtonRange")
self.radioButtonPeriod = QtWidgets.QRadioButton(self.tab_portfolio)
self.radioButtonPeriod.setGeometry(QtCore.QRect(900, 180, 16, 21))
self.radioButtonPeriod.setText("")
self.radioButtonPeriod.setObjectName("radioButtonPeriod")
self.label_2 = QtWidgets.QLabel(self.tab_portfolio)
self.label_2.setGeometry(QtCore.QRect(640, 300, 51, 31))
font = QtGui.QFont()
font.setPointSize(15)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.dateEnd = QtWidgets.QDateEdit(self.tab_portfolio)
self.dateEnd.setGeometry(QtCore.QRect(640, 330, 181, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.dateEnd.setFont(font)
self.dateEnd.setStyleSheet("background-color: rgb(216, 216, 216);")
self.dateEnd.setDateTime(QtCore.QDateTime(QtCore.QDate(2020, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEnd.setDate(QtCore.QDate(2020, 1, 1))
self.dateEnd.setObjectName("dateEnd")
self.AddButton = QtWidgets.QPushButton(self.tab_portfolio)
self.AddButton.setGeometry(QtCore.QRect(350, 140, 91, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.AddButton.setFont(font)
self.AddButton.setStyleSheet("font: 87 15pt \"Arial Black\";\n"
"background-color: rgb(206, 206, 206);")
self.AddButton.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/add/add.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.AddButton.setIcon(icon)
self.AddButton.setIconSize(QtCore.QSize(34, 34))
self.AddButton.setObjectName("AddButton")
self.DeleteButton = QtWidgets.QPushButton(self.tab_portfolio)
self.DeleteButton.setGeometry(QtCore.QRect(350, 210, 91, 41))
self.DeleteButton.setStyleSheet("font: 87 12pt \"Arial Black\";\n"
"background-color: rgba(206, 206, 206, 206);")
self.DeleteButton.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/delete/minus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.DeleteButton.setIcon(icon1)
self.DeleteButton.setIconSize(QtCore.QSize(34, 34))
self.DeleteButton.setObjectName("DeleteButton")
self.candlestickButton = QtWidgets.QPushButton(self.tab_portfolio)
self.candlestickButton.setGeometry(QtCore.QRect(200, 760, 151, 41))
self.candlestickButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/candlestick/candlestick.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.candlestickButton.setIcon(icon2)
self.candlestickButton.setIconSize(QtCore.QSize(34, 34))
self.candlestickButton.setObjectName("candlestickButton")
self.OHLCButton = QtWidgets.QPushButton(self.tab_portfolio)
self.OHLCButton.setGeometry(QtCore.QRect(360, 760, 141, 41))
self.OHLCButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/ohlc/ohlc.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.OHLCButton.setIcon(icon3)
self.OHLCButton.setIconSize(QtCore.QSize(24, 24))
self.OHLCButton.setObjectName("OHLCButton")
self.timeSeriesDataButton = QtWidgets.QPushButton(self.tab_portfolio)
self.timeSeriesDataButton.setGeometry(QtCore.QRect(10, 760, 181, 41))
self.timeSeriesDataButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/timeseries/timeser.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.timeSeriesDataButton.setIcon(icon4)
self.timeSeriesDataButton.setIconSize(QtCore.QSize(34, 34))
self.timeSeriesDataButton.setObjectName("timeSeriesDataButton")
self.dailyPercentageChangeButton = QtWidgets.QPushButton(self.tab_portfolio)
self.dailyPercentageChangeButton.setGeometry(QtCore.QRect(660, 760, 191, 41))
self.dailyPercentageChangeButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/histogram/chart-histogram-512.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.dailyPercentageChangeButton.setIcon(icon5)
self.dailyPercentageChangeButton.setIconSize(QtCore.QSize(34, 34))
self.dailyPercentageChangeButton.setObjectName("dailyPercentageChangeButton")
self.volatilityButton = QtWidgets.QPushButton(self.tab_portfolio)
self.volatilityButton.setGeometry(QtCore.QRect(510, 760, 141, 41))
self.volatilityButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/volatility/volatility-512.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.volatilityButton.setIcon(icon6)
self.volatilityButton.setIconSize(QtCore.QSize(34, 34))
self.volatilityButton.setObjectName("volatilityButton")
self.volumeButton = QtWidgets.QPushButton(self.tab_portfolio)
self.volumeButton.setGeometry(QtCore.QRect(860, 760, 141, 41))
self.volumeButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/area/area.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.volumeButton.setIcon(icon7)
self.volumeButton.setIconSize(QtCore.QSize(34, 34))
self.volumeButton.setObjectName("volumeButton")
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/portfolio/portfolio.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
TabWidgetPortfolio.addTab(self.tab_portfolio, icon8, "")
self.tab_advisor = QtWidgets.QWidget()
self.tab_advisor.setObjectName("tab_advisor")
self.toolBoxAdvisor = QtWidgets.QToolBox(self.tab_advisor)
self.toolBoxAdvisor.setGeometry(QtCore.QRect(0, 220, 1121, 581))
self.toolBoxAdvisor.setObjectName("toolBoxAdvisor")
self.pageNaive = QtWidgets.QWidget()
self.pageNaive.setGeometry(QtCore.QRect(0, 0, 1121, 428))
self.pageNaive.setObjectName("pageNaive")
self.comboBoxThreshold = QtWidgets.QComboBox(self.pageNaive)
self.comboBoxThreshold.setGeometry(QtCore.QRect(160, 30, 111, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxThreshold.setFont(font)
self.comboBoxThreshold.setStyleSheet("")
self.comboBoxThreshold.setObjectName("comboBoxThreshold")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.labelThreshold = QtWidgets.QLabel(self.pageNaive)
self.labelThreshold.setGeometry(QtCore.QRect(30, 20, 121, 51))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelThreshold.setFont(font)
self.labelThreshold.setStyleSheet("")
self.labelThreshold.setObjectName("labelThreshold")
self.naiveButton = QtWidgets.QPushButton(self.pageNaive)
self.naiveButton.setGeometry(QtCore.QRect(500, 100, 121, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.naiveButton.setFont(font)
self.naiveButton.setStyleSheet("font: 87 15pt \"Arial Black\";")
self.naiveButton.setObjectName("naiveButton")
self.toolBoxAdvisor.addItem(self.pageNaive, "")
self.pageMAC = QtWidgets.QWidget()
self.pageMAC.setGeometry(QtCore.QRect(0, 0, 1121, 428))
self.pageMAC.setObjectName("pageMAC")
self.labelShort = QtWidgets.QLabel(self.pageMAC)
self.labelShort.setGeometry(QtCore.QRect(30, 40, 261, 31))
self.labelShort.setStyleSheet("font: 15pt \"MS Shell Dlg 2\";")
self.labelShort.setObjectName("labelShort")
self.comboBoxShort = QtWidgets.QComboBox(self.pageMAC)
self.comboBoxShort.setGeometry(QtCore.QRect(290, 40, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxShort.setFont(font)
self.comboBoxShort.setStyleSheet("")
self.comboBoxShort.setObjectName("comboBoxShort")
self.comboBoxShort.addItem("")
self.comboBoxShort.addItem("")
self.comboBoxShort.addItem("")
self.labelLong = QtWidgets.QLabel(self.pageMAC)
self.labelLong.setGeometry(QtCore.QRect(30, 100, 251, 31))
self.labelLong.setStyleSheet("font: 15pt \"MS Shell Dlg 2\";")
self.labelLong.setObjectName("labelLong")
self.comboBoxLong = QtWidgets.QComboBox(self.pageMAC)
self.comboBoxLong.setGeometry(QtCore.QRect(290, 100, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxLong.setFont(font)
self.comboBoxLong.setStyleSheet("")
self.comboBoxLong.setObjectName("comboBoxLong")
self.comboBoxLong.addItem("")
self.comboBoxLong.addItem("")
self.comboBoxLong.addItem("")
self.MACButton = QtWidgets.QPushButton(self.pageMAC)
self.MACButton.setGeometry(QtCore.QRect(500, 190, 131, 31))
self.MACButton.setStyleSheet("font: 87 15pt \"Arial Black\";")
self.MACButton.setObjectName("MACButton")
self.toolBoxAdvisor.addItem(self.pageMAC, "")
self.pageTurtle = QtWidgets.QWidget()
self.pageTurtle.setGeometry(QtCore.QRect(0, 0, 1121, 428))
self.pageTurtle.setObjectName("pageTurtle")
self.labelBreakout = QtWidgets.QLabel(self.pageTurtle)
self.labelBreakout.setGeometry(QtCore.QRect(30, 30, 121, 41))
font = QtGui.QFont()
font.setPointSize(15)
self.labelBreakout.setFont(font)
self.labelBreakout.setObjectName("labelBreakout")
self.comboBoxBreakout = QtWidgets.QComboBox(self.pageTurtle)
self.comboBoxBreakout.setGeometry(QtCore.QRect(170, 40, 71, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxBreakout.setFont(font)
self.comboBoxBreakout.setObjectName("comboBoxBreakout")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.turtleButton = QtWidgets.QPushButton(self.pageTurtle)
self.turtleButton.setGeometry(QtCore.QRect(500, 100, 121, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.turtleButton.setFont(font)
self.turtleButton.setStyleSheet("font: 87 15pt \"Arial Black\";")
self.turtleButton.setObjectName("turtleButton")
self.toolBoxAdvisor.addItem(self.pageTurtle, "")
self.labelInitialCapital = QtWidgets.QLabel(self.tab_advisor)
self.labelInitialCapital.setGeometry(QtCore.QRect(30, 70, 241, 51))
self.labelInitialCapital.setStyleSheet("font: 87 18pt \"Arial Black\";")
self.labelInitialCapital.setObjectName("labelInitialCapital")
self.comboBoxInitialCapital = QtWidgets.QComboBox(self.tab_advisor)
self.comboBoxInitialCapital.setGeometry(QtCore.QRect(270, 80, 131, 31))
self.comboBoxInitialCapital.setStyleSheet("font: 16pt \"MS Shell Dlg 2\";")
self.comboBoxInitialCapital.setEditable(True)
self.comboBoxInitialCapital.setObjectName("comboBoxInitialCapital")
self.comboBoxInitialCapital.addItem("")
self.comboBoxInitialCapital.addItem("")
self.comboBoxInitialCapital.addItem("")
self.comboBoxInitialCapital.addItem("")
self.labelCommission = QtWidgets.QLabel(self.tab_advisor)
self.labelCommission.setGeometry(QtCore.QRect(540, 70, 221, 41))
self.labelCommission.setStyleSheet("font: 87 18pt \"Arial Black\";")
self.labelCommission.setObjectName("labelCommission")
self.comboBoxCommission = QtWidgets.QComboBox(self.tab_advisor)
self.comboBoxCommission.setGeometry(QtCore.QRect(760, 80, 101, 31))
self.comboBoxCommission.setStyleSheet("font: 16pt \"MS Shell Dlg 2\";")
self.comboBoxCommission.setEditable(True)
self.comboBoxCommission.setObjectName("comboBoxCommission")
self.comboBoxCommission.addItem("")
self.comboBoxCommission.addItem("")
self.comboBoxCommission.addItem("")
self.comboBoxCommission.addItem("")
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/advisor/advisor.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
TabWidgetPortfolio.addTab(self.tab_advisor, icon9, "")
self.retranslateUi(TabWidgetPortfolio)
TabWidgetPortfolio.setCurrentIndex(0)
self.toolBoxAdvisor.setCurrentIndex(0)
self.toolBoxAdvisor.layout().setSpacing(7)
QtCore.QMetaObject.connectSlotsByName(TabWidgetPortfolio)
self.AddButton.clicked.connect(self.addToList)
self.DeleteButton.clicked.connect(self.removeItems)
self.comboBoxPeriod.setDisabled(True)
self.timeSeriesDataButton.clicked.connect(self.plotTimeSeries)
self.candlestickButton.clicked.connect(self.plotCandlestick)
self.OHLCButton.clicked.connect(self.plotOHLC)
self.volatilityButton.clicked.connect(self.plotVolatility)
self.dailyPercentageChangeButton.clicked.connect(self.calculatePctChange)
self.volumeButton.clicked.connect(self.plotVolume)
self.naiveButton.clicked.connect(self.buildTableNaive)
self.MACButton.clicked.connect(self.buildTableMAC)
self.turtleButton.clicked.connect(self.turtleStrategy)
self.radioButtonRange.clicked.connect(self.disablePeriod)
self.radioButtonPeriod.clicked.connect(self.disableRange)
self.comboBoxPeriod.currentIndexChanged.connect(self.changeIntervalOptionsForPeriod)
self.radioButtonRange.clicked.connect(self.changeIntervalOptionsForRange)
self.radioButtonPeriod.clicked.connect(self.changeIntervalOptionsForPeriod)
self.dateEnd.dateChanged.connect(self.startDateLowerThenEnd)
def retranslateUi(self, TabWidgetPortfolio):
_translate = QtCore.QCoreApplication.translate
TabWidgetPortfolio.setWindowTitle(_translate("TabWidgetPortfolio", "TabWidget"))
self.comboBoxSymbol.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p><span style=\" font-size:12pt;\">Type/ Select Symbol</span></p></body></html>"))
self.comboBoxSymbol.setCurrentText(_translate("TabWidgetPortfolio", "Enter Symbol"))
self.comboBoxSymbol.setItemText(0, _translate("TabWidgetPortfolio", "Enter Symbol"))
self.comboBoxSymbol.setItemText(1, _translate("TabWidgetPortfolio", "Microsoft"))
self.comboBoxSymbol.setItemText(2, _translate("TabWidgetPortfolio", "Apple"))
self.comboBoxSymbol.setItemText(3, _translate("TabWidgetPortfolio", "Amazon"))
self.comboBoxSymbol.setItemText(4, _translate("TabWidgetPortfolio", "Alphabet"))
self.comboBoxSymbol.setItemText(5, _translate("TabWidgetPortfolio", "Alibaba"))
self.comboBoxSymbol.setItemText(6, _translate("TabWidgetPortfolio", "Facebook"))
self.comboBoxSymbol.setItemText(7, _translate("TabWidgetPortfolio", "Visa"))
self.comboBoxSymbol.setItemText(8, _translate("TabWidgetPortfolio", "Walmart"))
self.labelPeriod.setText(_translate("TabWidgetPortfolio", "Period:"))
self.labelDateRange.setText(_translate("TabWidgetPortfolio", "By Date Range"))
self.label_3.setText(_translate("TabWidgetPortfolio", "Start:"))
self.dateStart.setDisplayFormat(_translate("TabWidgetPortfolio", "yyyy-MM-dd"))
self.labelParameter.setText(_translate("TabWidgetPortfolio", "By Parameter"))
self.comboBoxPeriod.setItemText(0, _translate("TabWidgetPortfolio", "1d"))
self.comboBoxPeriod.setItemText(1, _translate("TabWidgetPortfolio", "5d"))
self.comboBoxPeriod.setItemText(2, _translate("TabWidgetPortfolio", "7d"))
self.comboBoxPeriod.setItemText(3, _translate("TabWidgetPortfolio", "1mo"))
self.comboBoxPeriod.setItemText(4, _translate("TabWidgetPortfolio", "3mo"))
self.comboBoxPeriod.setItemText(5, _translate("TabWidgetPortfolio", "6mo"))
self.comboBoxPeriod.setItemText(6, _translate("TabWidgetPortfolio", "1y"))
self.comboBoxPeriod.setItemText(7, _translate("TabWidgetPortfolio", "2y"))
self.comboBoxPeriod.setItemText(8, _translate("TabWidgetPortfolio", "5y"))
self.comboBoxPeriod.setItemText(9, _translate("TabWidgetPortfolio", "10y"))
self.labelInterval.setText(_translate("TabWidgetPortfolio", "Interval:"))
self.comboBoxInterval.setItemText(0, _translate("TabWidgetPortfolio", "1d"))
self.comboBoxInterval.setItemText(1, _translate("TabWidgetPortfolio", "5d"))
self.comboBoxInterval.setItemText(2, _translate("TabWidgetPortfolio", "1wk"))
self.comboBoxInterval.setItemText(3, _translate("TabWidgetPortfolio", "1mo"))
self.comboBoxInterval.setItemText(4, _translate("TabWidgetPortfolio", "3mo"))
self.label_2.setText(_translate("TabWidgetPortfolio", "End:"))
self.dateEnd.setDisplayFormat(_translate("TabWidgetPortfolio", "yyyy-MM-dd"))
self.AddButton.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p><span style=\" font-size:12pt;\">Add Symbol</span></p></body></html>"))
self.DeleteButton.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p>Delete Symbol</p></body></html>"))
self.candlestickButton.setText(_translate("TabWidgetPortfolio", "Candlestick"))
self.OHLCButton.setText(_translate("TabWidgetPortfolio", "OHLC"))
self.timeSeriesDataButton.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p><br/></p></body></html>"))
self.timeSeriesDataButton.setText(_translate("TabWidgetPortfolio", "Time Series Data"))
self.dailyPercentageChangeButton.setText(_translate("TabWidgetPortfolio", "Percentage Change "))
self.volatilityButton.setText(_translate("TabWidgetPortfolio", "Volatility"))
self.volumeButton.setText(_translate("TabWidgetPortfolio", "Volume"))
TabWidgetPortfolio.setTabText(TabWidgetPortfolio.indexOf(self.tab_portfolio), _translate("TabWidgetPortfolio", "Portfolio"))
self.comboBoxThreshold.setItemText(0, _translate("TabWidgetPortfolio", "5"))
self.comboBoxThreshold.setItemText(1, _translate("TabWidgetPortfolio", "7"))
self.comboBoxThreshold.setItemText(2, _translate("TabWidgetPortfolio", "10"))
self.comboBoxThreshold.setItemText(3, _translate("TabWidgetPortfolio", "15"))
self.comboBoxThreshold.setItemText(4, _translate("TabWidgetPortfolio", "20"))
self.labelThreshold.setText(_translate("TabWidgetPortfolio", "Threshold:"))
self.naiveButton.setText(_translate("TabWidgetPortfolio", "Advise"))
self.toolBoxAdvisor.setItemText(self.toolBoxAdvisor.indexOf(self.pageNaive), _translate("TabWidgetPortfolio", "Naive Trading Strategy"))
self.labelShort.setText(_translate("TabWidgetPortfolio", "Short Moving Average:"))
self.comboBoxShort.setItemText(0, _translate("TabWidgetPortfolio", "5"))
self.comboBoxShort.setItemText(1, _translate("TabWidgetPortfolio", "10"))
self.comboBoxShort.setItemText(2, _translate("TabWidgetPortfolio", "25"))
self.labelLong.setText(_translate("TabWidgetPortfolio", "Long Moving Average:"))
self.comboBoxLong.setItemText(0, _translate("TabWidgetPortfolio", "50"))
self.comboBoxLong.setItemText(1, _translate("TabWidgetPortfolio", "100"))
self.comboBoxLong.setItemText(2, _translate("TabWidgetPortfolio", "200"))
self.MACButton.setText(_translate("TabWidgetPortfolio", "Advise"))
self.toolBoxAdvisor.setItemText(self.toolBoxAdvisor.indexOf(self.pageMAC), _translate("TabWidgetPortfolio", "Two Moving Average Crossover Strategy"))
self.labelBreakout.setText(_translate("TabWidgetPortfolio", "Breakout:"))
self.comboBoxBreakout.setCurrentText(_translate("TabWidgetPortfolio", "35"))
self.comboBoxBreakout.setItemText(0, _translate("TabWidgetPortfolio", "35"))
self.comboBoxBreakout.setItemText(1, _translate("TabWidgetPortfolio", "40"))
self.comboBoxBreakout.setItemText(2, _translate("TabWidgetPortfolio", "45"))
self.comboBoxBreakout.setItemText(3, _translate("TabWidgetPortfolio", "50"))
self.comboBoxBreakout.setItemText(4, _translate("TabWidgetPortfolio", "55"))
self.turtleButton.setText(_translate("TabWidgetPortfolio", "Advise"))
self.toolBoxAdvisor.setItemText(self.toolBoxAdvisor.indexOf(self.pageTurtle), _translate("TabWidgetPortfolio", "Turtle Strategy"))
self.labelInitialCapital.setText(_translate("TabWidgetPortfolio", "Initial Capital:"))
self.comboBoxInitialCapital.setCurrentText(_translate("TabWidgetPortfolio", "100000"))
self.comboBoxInitialCapital.setItemText(0, _translate("TabWidgetPortfolio", "100000"))
self.comboBoxInitialCapital.setItemText(1, _translate("TabWidgetPortfolio", "150000"))
self.comboBoxInitialCapital.setItemText(2, _translate("TabWidgetPortfolio", "200000"))
self.comboBoxInitialCapital.setItemText(3, _translate("TabWidgetPortfolio", "250000"))
self.labelCommission.setText(_translate("TabWidgetPortfolio", "Commission:"))
self.comboBoxCommission.setItemText(0, _translate("TabWidgetPortfolio", "0.00"))
self.comboBoxCommission.setItemText(1, _translate("TabWidgetPortfolio", "0.02"))
self.comboBoxCommission.setItemText(2, _translate("TabWidgetPortfolio", "0.03"))
self.comboBoxCommission.setItemText(3, _translate("TabWidgetPortfolio", "0.04"))
TabWidgetPortfolio.setTabText(TabWidgetPortfolio.indexOf(self.tab_advisor), _translate("TabWidgetPortfolio", "Trade Advisor"))
#############################SET#####################################################################################################
self.dateEnd.setMaximumDate(date.today())
self.dateStart.setMaximumDate(self.dateEnd.date())
def startDateLowerThenEnd(self):
self.dateStart.setMaximumDate(self.dateEnd.date())
def disablePeriod(self):
self.comboBoxPeriod.setDisabled(True)
self.dateEnd.setEnabled(True)
self.dateStart.setEnabled(True)
def disableRange(self):
self.dateEnd.setDisabled(True)
self.dateStart.setDisabled(True)
self.comboBoxPeriod.setEnabled(True)
def changeIntervalOptionsForPeriod(self):
if ((self.comboBoxPeriod.currentText()=="1mo")|(self.comboBoxPeriod.currentText()=="60d")):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"2m")
self.comboBoxInterval.setItemText(1,"5m")
self.comboBoxInterval.setItemText(2,"15m")
self.comboBoxInterval.setItemText(3,"30m")
self.comboBoxInterval.setItemText(4,"60m")
self.comboBoxInterval.setItemText(5,"90m")
self.comboBoxInterval.setItemText(6,"1d")
self.comboBoxInterval.setItemText(7,"5d")
self.comboBoxInterval.setItemText(8,"1wk")
self.comboBoxInterval.setItemText(9,"1mo")
self.comboBoxInterval.setItemText(10,"3mo")
if((self.comboBoxPeriod.currentText()=="1d")|(self.comboBoxPeriod.currentText()=="5d")|(self.comboBoxPeriod.currentText()=="7d")):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"1m")
self.comboBoxInterval.setItemText(1,"2m")
self.comboBoxInterval.setItemText(2,"5m")
self.comboBoxInterval.setItemText(3,"15m")
self.comboBoxInterval.setItemText(4,"30m")
self.comboBoxInterval.setItemText(5,"60m")
self.comboBoxInterval.setItemText(6,"90m")
self.comboBoxInterval.setItemText(7,"1d")
self.comboBoxInterval.setItemText(8,"5d")
self.comboBoxInterval.setItemText(9,"1wk")
self.comboBoxInterval.setItemText(10,"1mo")
self.comboBoxInterval.setItemText(11,"3mo")
if((self.comboBoxPeriod.currentText()=="3mo")|(self.comboBoxPeriod.currentText()=="1y")|(self.comboBoxPeriod.currentText()=="2y")|(self.comboBoxPeriod.currentText()=="5y")|(self.comboBoxPeriod.currentText()=="10y")):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"1d")
self.comboBoxInterval.setItemText(1,"5d")
self.comboBoxInterval.setItemText(2,"1wk")
self.comboBoxInterval.setItemText(3,"1mo")
self.comboBoxInterval.setItemText(4,"3mo")
def changeIntervalOptionsForRange(self):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"1d")
self.comboBoxInterval.setItemText(1,"5d")
self.comboBoxInterval.setItemText(2,"1wk")
self.comboBoxInterval.setItemText(3,"1mo")
self.comboBoxInterval.setItemText(4,"3mo")
###################################DATA############################################################################################
def addToList(self):
item=str(self.comboBoxSymbol.currentText())
item=self.showSymbol(item)
checkIfExist=yf.download(item,period="5d",interval="1m")
x=checkIfExist.empty
if x!=True:
self.listWidget.insertItem(0,item)
self.comboBoxSymbol.setCurrentIndex(0)
else:
self.openError()
def showSymbol(self,item):
if item=='Microsoft':return 'MSFT'
if item=='Apple': return 'AAPL'
if item=='Amazon': return 'AMZN'
if item=='Alphabet': return 'GOOG'
if item=='Alibaba': return 'BABA'
if item=='Facebook': return 'FB'
if item=='Visa': return 'V'
if item=='Walmart': return 'WMT'
else: return item
def removeItems(self):
for item in self.listWidget.selectedItems():
self.listWidget.takeItem(self.listWidget.row(item))
def getList(self):
lw = self.listWidget
listSymbols=[]
for x in range(lw.count()):
listItem=str(lw.item(x).text())
listSymbols.append(listItem)
i=0
while i< len(listSymbols):
listSymbols[i]=listSymbols[i].upper()
i=i+1
return (listSymbols)
def getForRange(self,tickers, startdate, enddate):
def data(ticker):
return(yf.download(ticker, start=startdate, end=enddate,interval=self.comboBoxInterval.currentText()))
datas = map (data, tickers)
return(pd.concat(datas, keys=tickers, names=['Ticker', 'Date']))
def getForPeriod(self,tickers,period):
def data(ticker):
return(yf.download(ticker,period=self.comboBoxPeriod.currentText(),interval=self.comboBoxInterval.currentText()))
datas = map (data, tickers)
return(pd.concat(datas, keys=tickers, names=['Ticker', 'Date']))
####################################CHARTS###############################################################################
def plotCandlestick(self):
listSymbols=self.getList()
if(self.radioButtonRange.isChecked()):
df=yf.download(listSymbols,start=self.dateStart.text(),end=self.dateEnd.text(),interval=self.comboBoxInterval.currentText())
if(self.radioButtonPeriod.isChecked()):
df=yf.download(listSymbols,period=self.comboBoxPeriod.currentText(),interval=self.comboBoxInterval.currentText())
i=0
if len(listSymbols)>1:
while i< len(listSymbols):
figu = go.Figure(data=[go.Candlestick(x=df.index,open=df['Open'][listSymbols[i]], high=df['High'][listSymbols[i]],low=df['Low'][listSymbols[i]],
close=df['Close'][listSymbols[i]])],layout_title_text=listSymbols[i])
figu.update_layout(xaxis_rangeslider_visible=False)
figu.show()
i=i+1
if len(listSymbols)==1:
figu = go.Figure(data=[go.Candlestick(x=df.index,open=df['Open'], high=df['High'],low=df['Low'],
close=df['Close'])],layout_title_text=listSymbols[i])
figu.update_layout(xaxis_rangeslider_visible=False)
figu.show()
def plotOHLC(self):
listSymbols=self.getList()
if(self.radioButtonRange.isChecked()):
df=yf.download(listSymbols,start=self.dateStart.text(),end=self.dateEnd.text(),interval=self.comboBoxInterval.currentText())
if(self.radioButtonPeriod.isChecked()):
df=yf.download(listSymbols,period=self.comboBoxPeriod.currentText(),interval=self.comboBoxInterval.currentText())
i=0
if len(listSymbols)>1:
while i< len(listSymbols):
figu = go.Figure(data=[go.Ohlc(x=df.index,open=df['Open'][listSymbols[i]], high=df['High'][listSymbols[i]],low=df['Low'][listSymbols[i]],
close=df['Close'][listSymbols[i]])],layout_title_text=listSymbols[i])
figu.update_layout(xaxis_rangeslider_visible=False)
figu.show()
i=i+1
if len(listSymbols)==1:
figu = go.Figure(data=[go.Ohlc(x=df.index,open=df['Open'], high=df['High'],low=df['Low'], close=df['Close'])],layout_title_text=listSymbols[i])
figu.update_layout(xaxis_rangeslider_visible=False)
figu.show()
def calculatePctChange(self):
listSymbols=self.getList()
if (self.radioButtonRange.isChecked()):
all_data = self.getForRange(listSymbols,self.dateStart.text(),self.dateEnd.text())
if(self.radioButtonPeriod.isChecked()):
all_data=self.getForPeriod(listSymbols,self.comboBoxPeriod.currentText())
daily_close_px= all_data[['Adj Close']].reset_index().pivot('Date', 'Ticker', 'Adj Close')
daily_pct_change = daily_close_px.pct_change()
daily_pct_change.hist(bins=50, sharex=False, figsize=(12,8))
plt.show()
def plotTimeSeries(self):
listSymbols=self.getList()
if (self.radioButtonRange.isChecked()):
all_data = self.getForRange(listSymbols,self.dateStart.text(),self.dateEnd.text())
if(self.radioButtonPeriod.isChecked()):
all_data=self.getForPeriod(listSymbols,self.comboBoxPeriod.currentText())
time_series= all_data[['Close']].reset_index().pivot('Date', 'Ticker', 'Close')
time_series.plot(subplots=True,sharex=False)
plt.show()
def plotVolume(self):
listSymbols=self.getList()
if (self.radioButtonRange.isChecked()):
all_data = self.getForRange(listSymbols,self.dateStart.text(),self.dateEnd.text())
if(self.radioButtonPeriod.isChecked()):
all_data=self.getForPeriod(listSymbols,self.comboBoxPeriod.currentText())
vol=all_data[['Volume']].reset_index().pivot('Date', 'Ticker', 'Volume')
vol.plot.area(subplots=True,sharex=False,lw=3,grid=True)
plt.show()
def plotVolatility(self):
listSymbols=self.getList()
if (self.radioButtonRange.isChecked()):
all_data = self.getForRange(listSymbols,self.dateStart.text(),self.dateEnd.text())
if(self.radioButtonPeriod.isChecked()):
all_data=self.getForPeriod(listSymbols,self.comboBoxPeriod.currentText())
daily_close_px= all_data[['Adj Close']].reset_index().pivot('Date', 'Ticker', 'Adj Close')
daily_pct_change = daily_close_px.pct_change()
if len(daily_pct_change)>500:
min_periods = 252
else: min_periods = 21
vol = daily_pct_change.rolling(min_periods).std() * np.sqrt(min_periods)
vol.plot(figsize=(10, 8))
plt.show()
#################################################advisor####################################################################
def buildDFAdvisor(self,listSymbols):
if(self.radioButtonRange.isChecked()):
df=yf.download(listSymbols,start=self.dateStart.text(),end=self.dateEnd.text(),interval=self.comboBoxInterval.currentText())
if(self.radioButtonPeriod.isChecked()):
df=yf.download(listSymbols,period=self.comboBoxPeriod.currentText(),interval=self.comboBoxInterval.currentText())
return (df)
def buildTableMAC(self):
listSymbols=self.getList()
df=self.buildDFAdvisor(listSymbols)
short_window = int(self.comboBoxShort.currentText())
long_window = int(self.comboBoxLong.currentText())
i=0
if len(df)>short_window:
while i< len(listSymbols):
symbol=listSymbols[i]
signals = pd.DataFrame(index=df.index)
signals['symbol']=symbol
signals['signal'] = 0.0
if len(listSymbols)>1:
listLength=2
signals['short_mavg'] = df['Close'][symbol].rolling(window=short_window, min_periods=1, center=False).mean()
signals['long_mavg'] = df['Close'][symbol].rolling(window=long_window, min_periods=1, center=False).mean()
signals['signal'][short_window:] = np.where(signals['short_mavg'][short_window:] > signals['long_mavg'][short_window:], 1.0, 0.0)
signals['positions'] = signals['signal'].diff()
signals['price']=df['Close'][symbol]
if len(listSymbols)==1:
listLength=1
signals['short_mavg'] = df['Close'].rolling(window=short_window, min_periods=1, center=False).mean()
signals['long_mavg'] = df['Close'].rolling(window=long_window, min_periods=1, center=False).mean()
signals['signal'][short_window:] = np.where(signals['short_mavg'][short_window:] > signals['long_mavg'][short_window:], 1.0, 0.0)
signals['positions'] = signals['signal'].diff()
signals['price']=df['Close']
signals=self.addSharesColumn(signals)
self.plotMAcrossover(df,symbol,signals,listLength)
self.backtestTable(signals,symbol)
i=i+1
def plotMAcrossover(self,df,symbol,signals,listLength):
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Price')
if listLength==2:
df['Close'][symbol].plot(ax=ax1, color='r', lw=2.)
if listLength==1:
df['Close'].plot(ax=ax1, color='r', lw=2.)
signals[['short_mavg', 'long_mavg']].plot(ax=ax1, lw=2.)
ax1.plot(signals.loc[signals.positions == 1.0].index, signals.short_mavg[signals.positions == 1.0],'^', markersize=10, color='k')
ax1.plot(signals.loc[signals.positions == -1.0].index,signals.short_mavg[signals.positions == -1.0],'v', markersize=10, color='k')
plt.legend(["Price",'short_mavg', 'long_mavg',"Buy","Sell"])
plt.title("Two Moving Average Crossover For "+symbol)
plt.show()
def buildTableNaive(self):
listSymbols=self.getList()
df=self.buildDFAdvisor(listSymbols)
nb_conseq_days=int(self.comboBoxThreshold.currentText())
i=0
while (i< len(listSymbols)):
symbol=listSymbols[i]
signals = pd.DataFrame(index=df.index)
signals['symbol']=symbol
if len(listSymbols)>1:
listLength=2
signals['price']=df['Adj Close'][symbol]
if len(listSymbols)==1:
listLength=1
signals['price']=df['Adj Close']
signals['positions'] = 0
cons_day=0
prior_price=0
init=True
for k in range(len(signals['price'])):
price=signals['price'][k]
if init:
init=False
elif price>prior_price:
if cons_day<0:
cons_day=0
cons_day+=1
elif price<prior_price:
if cons_day>0:
cons_day=0
cons_day-=1
if cons_day==nb_conseq_days:
signals['positions'][k]=1
elif cons_day == -nb_conseq_days:
signals['positions'][k]=-1
prior_price=price
signals=self.addSharesColumn(signals)
signals=self.addArrowsColumnNaive(signals)
self.plotNaive(df,symbol,signals,listLength)
self.backtestTable(signals,symbol)
i=i+1
def plotNaive(self,df,symbol,signals,listLength):
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='price')
if listLength==1:
df["Adj Close"].plot(ax=ax1, color='g', lw=.5)
ax1.plot(signals.loc[signals.arrows== 1.0].index,df["Adj Close"][signals.arrows == 1],'^', markersize=7, color='k')
ax1.plot(signals.loc[signals.arrows== -1.0].index,df["Adj Close"][signals.arrows == -1],'v', markersize=7, color='k')
if listLength==2:
df["Adj Close"][symbol].plot(ax=ax1, color='g', lw=.5)
ax1.plot(signals.loc[signals.arrows== 1.0].index,df["Adj Close"][symbol][signals.arrows == 1],'^', markersize=7, color='k')
ax1.plot(signals.loc[signals.arrows== -1.0].index,df["Adj Close"][symbol][signals.arrows == -1],'v', markersize=7, color='k')
plt.legend(["Price","Buy","Sell"])
plt.title("Naive Trading Strategy for "+symbol)
plt.show()
def turtleStrategy(self):
listSymbols=self.getList()
df=self.buildDFAdvisor(listSymbols)
i=0
while (i< len(listSymbols)):
symbol=listSymbols[i]
signals = pd.DataFrame(index=df.index)
signals['symbol']=symbol
if len(listSymbols)>1:
listLength=2
signals['price']=df['Close'][symbol]
signals['high'] = df['Close'][symbol].shift(1).rolling(window=int(self.comboBoxBreakout.currentText())).max()
signals['low'] = df['Close'][symbol].shift(1).rolling(window=int(self.comboBoxBreakout.currentText())).min()
signals['avg'] = df['Close'][symbol].shift(1).rolling(window=int(self.comboBoxBreakout.currentText())).mean()
signals['long_entry'] = df['Close'][symbol] > signals.high
signals['short_entry'] = df['Close'][symbol] < signals.low
signals['long_exit'] =df['Close'][symbol] < signals.avg
signals['short_exit'] =df['Close'][symbol] > signals.avg
if len(listSymbols)==1:
listLength=1
signals['price']=df['Close']
signals['high'] = df['Close'].shift(1).rolling(window=int(self.comboBoxBreakout.currentText())).max()
signals['low'] = df['Close'].shift(1).rolling(window=int(self.comboBoxBreakout.currentText())).min()
signals['avg'] = df['Close'].shift(1).rolling(window=int(self.comboBoxBreakout.currentText())).mean()
signals['long_entry'] = df['Close'] > signals.high
signals['short_entry'] = df['Close'] < signals.low
signals['long_exit'] =df['Close'] < signals.avg
signals['short_exit'] =df['Close'] > signals.avg
signals['positions_long'] = 0
signals.loc[signals.long_entry,'positions_long']= 1
signals.loc[signals.long_exit,'positions_long']= 0
signals['positions_short'] = 0
signals.loc[signals.short_entry,'positions_short']= -1
signals.loc[signals.short_exit,'positions_short']= 0
signals['positions'] = signals.positions_long + signals.positions_short
signals['shares']=signals['positions']
signals=self.addArrowsColumnTurtle(signals)
self.plotTurtle(df,symbol,signals,listLength)
self.backtestTable(signals,symbol)
i=i+1
def plotTurtle(self,df,symbol,signals,listLength):
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='price')
if listLength==1:
df["Close"].plot(ax=ax1, color='g', lw=.5)
signals.low.plot(ax=ax1, color='r', lw=.5)
signals.high.plot(ax=ax1, color='b', lw=.5)
ax1.plot(signals.loc[signals['arrows']== 1].index, df["Close"][signals['arrows']== 1],'^', markersize=7, color='k')
ax1.plot(signals.loc[signals['arrows']==-1].index, df["Close"][signals['arrows']== -1],'v', markersize=7, color='k')
if listLength==2:
df["Close"][symbol].plot(ax=ax1, color='g', lw=.5)
signals.low.plot(ax=ax1, color='r', lw=.5)
signals.high.plot(ax=ax1, color='b', lw=.5)
ax1.plot(signals.loc[signals['arrows']== 1].index, df["Close"][symbol][signals['arrows']== 1],'^', markersize=7, color='k')
ax1.plot(signals.loc[signals['arrows']==-1].index, df["Close"][symbol][signals['arrows']== -1],'v', markersize=7, color='k')
plt.legend(["Price","low","high"])
plt.title("Turtle Trading Strategy for "+symbol)
plt.show()
def addSharesColumn(self,signals):
signals['shares']=0
sign=0.0
for k in range(len(signals)):
if signals['positions'][k]==1.0:
sign=1.0
elif signals['positions'][k]==-1.0:
sign=0.0
signals['shares'][k]=sign
return(signals)
def addArrowsColumnTurtle(self,signals):
signals['arrows']=0
prior_arrow=0.0
for k in range(len(signals)):
if signals['positions'][k]==1:
if prior_arrow==1:
signals['arrows'][k]=0.0
else: signals['arrows'][k]=1
prior_arrow=1
elif signals['positions'][k]==-1:
if prior_arrow==-1:
signals['arrows'][k]=0.0
else: signals['arrows'][k]=-1
prior_arrow=-1
elif signals['positions'][k]==0.0:
if prior_arrow==1:
signals['arrows'][k]=-1
if prior_arrow==-1:
signals['arrows'][k]=1
prior_arrow=0
return(signals)
def addArrowsColumnNaive(self,signals):
signals['arrows']=0
prior_arrow=0.0
for k in range(len(signals)):
if signals['positions'][k]==1:
if prior_arrow==1:
signals['arrows'][k]=0.0
else: signals['arrows'][k]=1
prior_arrow=1
elif signals['positions'][k]==-1:
if prior_arrow==-1:
signals['arrows'][k]=0.0
else: signals['arrows'][k]=-1
prior_arrow=-1
return(signals)
def backtestTable(self,signals,symbol):
initial_capital= float(self.comboBoxInitialCapital.currentText())
portfolio = pd.DataFrame(index=signals.index).fillna(0.0)
portfolio['positions']=signals['positions']
portfolio['shares']=100*signals['shares']
portfolio['price']= signals['price']
portfolio['holdings']=0
portfolio['cash']=0
pos_diff = portfolio['shares'].diff()
commissionPayment=(portfolio['positions']*portfolio['shares']*portfolio['price']*float(self.comboBoxCommission.currentText())).cumsum()
portfolio['holdings'] = portfolio['shares']*portfolio['price']
portfolio['cash'] = initial_capital - (pos_diff*portfolio['price']).cumsum()-commissionPayment
portfolio['total'] = portfolio['cash'] + portfolio['holdings']
portfolio['returns'] = portfolio['total'].pct_change()
self.portfolioTable(portfolio,symbol)
self.graphsForBacktesting(portfolio['returns'],portfolio['total'],symbol)
def graphsForBacktesting(self,returns,total,symbol):
total.plot()
plt.ylim(total.min(),total.max())
plt.ylabel("Portfolio value")
plt.title("Portfolio"+" ("+symbol+")")
plt.show()
pa.show_perf_stats(returns)
pa.plot_returns(returns)
plt.title("Returns"+" ("+symbol+")")
plt.show()
pa.plot_rolling_returns(returns)
plt.title("Cumulative Returns"+" ("+symbol+")")
plt.show()
pa.plot_rolling_volatility(returns)
plt.title("Rolling Volatility (6-month)"+" -"+symbol)
plt.show()
pa.plot_rolling_sharpe(returns)
plt.title("Rolling Sharpe Ratio (6-month)"+" -"+symbol)
plt.show()
pa.plot_return_quantiles(returns)
plt.title("Return Quantiles"+" ("+symbol+")")
plt.show()
pa.plot_monthly_returns_dist(returns)
plt.title("Distribution of Monthly Returns"+" ("+symbol+")")
plt.show()
pa.plot_monthly_returns_timeseries(returns)
plt.title("Monthly Returns"+" -"+symbol)
plt.show()
pa.plot_monthly_returns_heatmap(returns)
plt.title("Monthly Returns (%)"+" -"+symbol)
plt.show()
pa.plot_annual_returns(returns)
plt.title("Annual Returns (%)"+" -"+symbol)
plt.show()
pa.show_worst_drawdown_periods(returns)
pa.plot_drawdown_periods(returns)
plt.title("Top 10 Drawdown Periods"+" ("+symbol+")")
plt.show()
pa.plot_drawdown_underwater(returns)
plt.title("Underwater Plot"+" ("+symbol+")")
plt.show()
def portfolioTable(self,df,symbol):
fig = go.Figure(data=[go.Table(header=dict(values=['Date','Positions','Shares','Price','Holdings','Cash','Total','Return'],
fill_color='pink',align='left'),cells=dict(values=[df.index,
df['positions'],df['shares'],df['price'],df['holdings'],df['cash'],df['total'],
df['returns']],fill_color='lavender',align='left'))],layout_title_text="Portfolio("+symbol+")")
fig.show()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
TabWidgetPortfolio = QtWidgets.QTabWidget()
ui = Ui_TabWidgetPortfolio()
ui.setupUi(TabWidgetPortfolio)
TabWidgetPortfolio.show()
sys.exit(app.exec_())
| [
"PyQt5.QtGui.QIcon",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"plotly.graph_objects.Candlestick",
"pyfolio.plotting.plot_rolling_volatility",
"PyQt5.QtCore.QTime",
"yfinance.download",
"pyfolio.plotting.plot_monthly_returns_dist",
"pyfolio.plotting.plot_return_quantiles",
"PyQt5.QtWidgets.QApplic... | [((55967, 55999), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (55989, 55999), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((56026, 56048), 'PyQt5.QtWidgets.QTabWidget', 'QtWidgets.QTabWidget', ([], {}), '()\n', (56046, 56048), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((531, 550), 'PyQt5.QtWidgets.QDialog', 'QtWidgets.QDialog', ([], {}), '()\n', (548, 550), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((573, 589), 'dialogError.Ui_DialogError', 'Ui_DialogError', ([], {}), '()\n', (587, 589), False, 'from dialogError import Ui_DialogError\n'), ((969, 982), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (980, 982), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1281, 1300), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1298, 1300), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1387, 1428), 'PyQt5.QtWidgets.QListWidget', 'QtWidgets.QListWidget', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (1408, 1428), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1728, 1767), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (1747, 1767), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2423, 2459), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (2439, 2459), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2547, 2560), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2558, 2560), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2889, 2925), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (2905, 2925), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3016, 3029), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3027, 3029), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3366, 3402), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (3382, 3402), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3485, 3498), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3496, 3498), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3639, 3678), 'PyQt5.QtWidgets.QDateEdit', 'QtWidgets.QDateEdit', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (3658, 3678), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3764, 3777), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3775, 3777), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4169, 4205), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (4185, 4205), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4296, 4309), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4307, 4309), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4653, 4692), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (4672, 4692), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4782, 4795), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4793, 4795), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5455, 5491), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (5471, 5491), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5581, 5594), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5592, 5594), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5933, 5972), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (5952, 5972), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6064, 6077), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (6075, 6077), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6553, 6595), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (6575, 6595), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6861, 6903), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (6883, 6903), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7115, 7151), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (7131, 7151), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7234, 7247), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (7245, 7247), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7386, 7425), 'PyQt5.QtWidgets.QDateEdit', 'QtWidgets.QDateEdit', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (7405, 7425), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7509, 7522), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (7520, 7522), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7897, 7938), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (7918, 7938), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8023, 8036), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (8034, 8036), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8400, 8413), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8411, 8413), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8683, 8724), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (8704, 8724), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8976, 8989), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8987, 8989), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9283, 9324), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (9304, 9324), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9504, 9517), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (9515, 9517), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9835, 9876), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (9856, 9876), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10042, 10055), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (10053, 10055), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10341, 10382), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (10362, 10382), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10567, 10580), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (10578, 10580), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10922, 10963), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (10943, 10963), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11163, 11176), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (11174, 11176), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11546, 11587), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (11567, 11587), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11765, 11778), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (11776, 11778), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12096, 12137), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_portfolio'], {}), '(self.tab_portfolio)\n', (12117, 12137), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12307, 12320), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (12318, 12320), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12594, 12607), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (12605, 12607), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12808, 12827), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (12825, 12827), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12914, 12950), 'PyQt5.QtWidgets.QToolBox', 'QtWidgets.QToolBox', (['self.tab_advisor'], {}), '(self.tab_advisor)\n', (12932, 12950), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13112, 13131), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (13129, 13131), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13284, 13319), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.pageNaive'], {}), '(self.pageNaive)\n', (13303, 13319), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13412, 13425), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (13423, 13425), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13871, 13903), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.pageNaive'], {}), '(self.pageNaive)\n', (13887, 13903), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13992, 14005), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (14003, 14005), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14346, 14383), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.pageNaive'], {}), '(self.pageNaive)\n', (14367, 14383), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14471, 14484), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (14482, 14484), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14893, 14912), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (14910, 14912), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15052, 15082), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.pageMAC'], {}), '(self.pageMAC)\n', (15068, 15082), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15307, 15340), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.pageMAC'], {}), '(self.pageMAC)\n', (15326, 15340), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15428, 15441), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (15439, 15441), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15766, 15796), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.pageMAC'], {}), '(self.pageMAC)\n', (15782, 15796), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16017, 16050), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.pageMAC'], {}), '(self.pageMAC)\n', (16036, 16050), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16138, 16151), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (16149, 16151), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16469, 16504), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.pageMAC'], {}), '(self.pageMAC)\n', (16490, 16504), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16779, 16798), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (16796, 16798), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16950, 16983), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.pageTurtle'], {}), '(self.pageTurtle)\n', (16966, 16983), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17071, 17084), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (17082, 17084), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17250, 17286), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.pageTurtle'], {}), '(self.pageTurtle)\n', (17269, 17286), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17377, 17390), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (17388, 17390), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17776, 17814), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.pageTurtle'], {}), '(self.pageTurtle)\n', (17797, 17814), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17903, 17916), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (17914, 17916), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18342, 18376), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_advisor'], {}), '(self.tab_advisor)\n', (18358, 18376), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18646, 18683), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.tab_advisor'], {}), '(self.tab_advisor)\n', (18665, 18683), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((19210, 19244), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_advisor'], {}), '(self.tab_advisor)\n', (19226, 19244), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((19495, 19532), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.tab_advisor'], {}), '(self.tab_advisor)\n', (19514, 19532), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((20008, 20021), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (20019, 20021), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((20394, 20451), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['TabWidgetPortfolio'], {}), '(TabWidgetPortfolio)\n', (20431, 20451), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((34694, 34739), 'yfinance.download', 'yf.download', (['item'], {'period': '"""5d"""', 'interval': '"""1m"""'}), "(item, period='5d', interval='1m')\n", (34705, 34739), True, 'import yfinance as yf\n'), ((36100, 36156), 'pandas.concat', 'pd.concat', (['datas'], {'keys': 'tickers', 'names': "['Ticker', 'Date']"}), "(datas, keys=tickers, names=['Ticker', 'Date'])\n", (36109, 36156), True, 'import pandas as pd\n'), ((36409, 36465), 'pandas.concat', 'pd.concat', (['datas'], {'keys': 'tickers', 'names': "['Ticker', 'Date']"}), "(datas, keys=tickers, names=['Ticker', 'Date'])\n", (36418, 36465), True, 'import pandas as pd\n'), ((39548, 39558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39556, 39558), True, 'import matplotlib.pyplot as plt\n'), ((40077, 40087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40085, 40087), True, 'import matplotlib.pyplot as plt\n'), ((40615, 40625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40623, 40625), True, 'import matplotlib.pyplot as plt\n'), ((41381, 41391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (41389, 41391), True, 'import matplotlib.pyplot as plt\n'), ((43969, 43981), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (43979, 43981), True, 'import matplotlib.pyplot as plt\n'), ((44582, 44645), 'matplotlib.pyplot.legend', 'plt.legend', (["['Price', 'short_mavg', 'long_mavg', 'Buy', 'Sell']"], {}), "(['Price', 'short_mavg', 'long_mavg', 'Buy', 'Sell'])\n", (44592, 44645), True, 'import matplotlib.pyplot as plt\n'), ((44654, 44709), 'matplotlib.pyplot.title', 'plt.title', (["('Two Moving Average Crossover For ' + symbol)"], {}), "('Two Moving Average Crossover For ' + symbol)\n", (44663, 44709), True, 'import matplotlib.pyplot as plt\n'), ((44719, 44729), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44727, 44729), True, 'import matplotlib.pyplot as plt\n'), ((46385, 46397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (46395, 46397), True, 'import matplotlib.pyplot as plt\n'), ((47177, 47213), 'matplotlib.pyplot.legend', 'plt.legend', (["['Price', 'Buy', 'Sell']"], {}), "(['Price', 'Buy', 'Sell'])\n", (47187, 47213), True, 'import matplotlib.pyplot as plt\n'), ((47221, 47270), 'matplotlib.pyplot.title', 'plt.title', (["('Naive Trading Strategy for ' + symbol)"], {}), "('Naive Trading Strategy for ' + symbol)\n", (47230, 47270), True, 'import matplotlib.pyplot as plt\n'), ((47278, 47288), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (47286, 47288), True, 'import matplotlib.pyplot as plt\n'), ((49809, 49821), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (49819, 49821), True, 'import matplotlib.pyplot as plt\n'), ((50849, 50885), 'matplotlib.pyplot.legend', 'plt.legend', (["['Price', 'low', 'high']"], {}), "(['Price', 'low', 'high'])\n", (50859, 50885), True, 'import matplotlib.pyplot as plt\n'), ((50895, 50945), 'matplotlib.pyplot.title', 'plt.title', (["('Turtle Trading Strategy for ' + symbol)"], {}), "('Turtle Trading Strategy for ' + symbol)\n", (50904, 50945), True, 'import matplotlib.pyplot as plt\n'), ((50955, 50965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50963, 50965), True, 'import matplotlib.pyplot as plt\n'), ((53914, 53943), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Portfolio value"""'], {}), "('Portfolio value')\n", (53924, 53943), True, 'import matplotlib.pyplot as plt\n'), ((53953, 53997), 'matplotlib.pyplot.title', 'plt.title', (["('Portfolio' + ' (' + symbol + ')')"], {}), "('Portfolio' + ' (' + symbol + ')')\n", (53962, 53997), True, 'import matplotlib.pyplot as plt\n'), ((54001, 54011), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54009, 54011), True, 'import matplotlib.pyplot as plt\n'), ((54021, 54048), 'pyfolio.plotting.show_perf_stats', 'pa.show_perf_stats', (['returns'], {}), '(returns)\n', (54039, 54048), True, 'from pyfolio import plotting as pa\n'), ((54058, 54082), 'pyfolio.plotting.plot_returns', 'pa.plot_returns', (['returns'], {}), '(returns)\n', (54073, 54082), True, 'from pyfolio import plotting as pa\n'), ((54092, 54134), 'matplotlib.pyplot.title', 'plt.title', (["('Returns' + ' (' + symbol + ')')"], {}), "('Returns' + ' (' + symbol + ')')\n", (54101, 54134), True, 'import matplotlib.pyplot as plt\n'), ((54138, 54148), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54146, 54148), True, 'import matplotlib.pyplot as plt\n'), ((54158, 54190), 'pyfolio.plotting.plot_rolling_returns', 'pa.plot_rolling_returns', (['returns'], {}), '(returns)\n', (54181, 54190), True, 'from pyfolio import plotting as pa\n'), ((54200, 54253), 'matplotlib.pyplot.title', 'plt.title', (["('Cumulative Returns' + ' (' + symbol + ')')"], {}), "('Cumulative Returns' + ' (' + symbol + ')')\n", (54209, 54253), True, 'import matplotlib.pyplot as plt\n'), ((54257, 54267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54265, 54267), True, 'import matplotlib.pyplot as plt\n'), ((54277, 54312), 'pyfolio.plotting.plot_rolling_volatility', 'pa.plot_rolling_volatility', (['returns'], {}), '(returns)\n', (54303, 54312), True, 'from pyfolio import plotting as pa\n'), ((54322, 54379), 'matplotlib.pyplot.title', 'plt.title', (["('Rolling Volatility (6-month)' + ' -' + symbol)"], {}), "('Rolling Volatility (6-month)' + ' -' + symbol)\n", (54331, 54379), True, 'import matplotlib.pyplot as plt\n'), ((54385, 54395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54393, 54395), True, 'import matplotlib.pyplot as plt\n'), ((54405, 54436), 'pyfolio.plotting.plot_rolling_sharpe', 'pa.plot_rolling_sharpe', (['returns'], {}), '(returns)\n', (54427, 54436), True, 'from pyfolio import plotting as pa\n'), ((54446, 54505), 'matplotlib.pyplot.title', 'plt.title', (["('Rolling Sharpe Ratio (6-month)' + ' -' + symbol)"], {}), "('Rolling Sharpe Ratio (6-month)' + ' -' + symbol)\n", (54455, 54505), True, 'import matplotlib.pyplot as plt\n'), ((54511, 54521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54519, 54521), True, 'import matplotlib.pyplot as plt\n'), ((54531, 54564), 'pyfolio.plotting.plot_return_quantiles', 'pa.plot_return_quantiles', (['returns'], {}), '(returns)\n', (54555, 54564), True, 'from pyfolio import plotting as pa\n'), ((54574, 54625), 'matplotlib.pyplot.title', 'plt.title', (["('Return Quantiles' + ' (' + symbol + ')')"], {}), "('Return Quantiles' + ' (' + symbol + ')')\n", (54583, 54625), True, 'import matplotlib.pyplot as plt\n'), ((54629, 54639), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54637, 54639), True, 'import matplotlib.pyplot as plt\n'), ((54649, 54686), 'pyfolio.plotting.plot_monthly_returns_dist', 'pa.plot_monthly_returns_dist', (['returns'], {}), '(returns)\n', (54677, 54686), True, 'from pyfolio import plotting as pa\n'), ((54696, 54762), 'matplotlib.pyplot.title', 'plt.title', (["('Distribution of Monthly Returns' + ' (' + symbol + ')')"], {}), "('Distribution of Monthly Returns' + ' (' + symbol + ')')\n", (54705, 54762), True, 'import matplotlib.pyplot as plt\n'), ((54766, 54776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54774, 54776), True, 'import matplotlib.pyplot as plt\n'), ((54786, 54829), 'pyfolio.plotting.plot_monthly_returns_timeseries', 'pa.plot_monthly_returns_timeseries', (['returns'], {}), '(returns)\n', (54820, 54829), True, 'from pyfolio import plotting as pa\n'), ((54839, 54883), 'matplotlib.pyplot.title', 'plt.title', (["('Monthly Returns' + ' -' + symbol)"], {}), "('Monthly Returns' + ' -' + symbol)\n", (54848, 54883), True, 'import matplotlib.pyplot as plt\n'), ((54889, 54899), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54897, 54899), True, 'import matplotlib.pyplot as plt\n'), ((54909, 54949), 'pyfolio.plotting.plot_monthly_returns_heatmap', 'pa.plot_monthly_returns_heatmap', (['returns'], {}), '(returns)\n', (54940, 54949), True, 'from pyfolio import plotting as pa\n'), ((54959, 55007), 'matplotlib.pyplot.title', 'plt.title', (["('Monthly Returns (%)' + ' -' + symbol)"], {}), "('Monthly Returns (%)' + ' -' + symbol)\n", (54968, 55007), True, 'import matplotlib.pyplot as plt\n'), ((55013, 55023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55021, 55023), True, 'import matplotlib.pyplot as plt\n'), ((55033, 55064), 'pyfolio.plotting.plot_annual_returns', 'pa.plot_annual_returns', (['returns'], {}), '(returns)\n', (55055, 55064), True, 'from pyfolio import plotting as pa\n'), ((55074, 55121), 'matplotlib.pyplot.title', 'plt.title', (["('Annual Returns (%)' + ' -' + symbol)"], {}), "('Annual Returns (%)' + ' -' + symbol)\n", (55083, 55121), True, 'import matplotlib.pyplot as plt\n'), ((55127, 55137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55135, 55137), True, 'import matplotlib.pyplot as plt\n'), ((55147, 55186), 'pyfolio.plotting.show_worst_drawdown_periods', 'pa.show_worst_drawdown_periods', (['returns'], {}), '(returns)\n', (55177, 55186), True, 'from pyfolio import plotting as pa\n'), ((55196, 55229), 'pyfolio.plotting.plot_drawdown_periods', 'pa.plot_drawdown_periods', (['returns'], {}), '(returns)\n', (55220, 55229), True, 'from pyfolio import plotting as pa\n'), ((55239, 55297), 'matplotlib.pyplot.title', 'plt.title', (["('Top 10 Drawdown Periods' + ' (' + symbol + ')')"], {}), "('Top 10 Drawdown Periods' + ' (' + symbol + ')')\n", (55248, 55297), True, 'import matplotlib.pyplot as plt\n'), ((55301, 55311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55309, 55311), True, 'import matplotlib.pyplot as plt\n'), ((55321, 55357), 'pyfolio.plotting.plot_drawdown_underwater', 'pa.plot_drawdown_underwater', (['returns'], {}), '(returns)\n', (55348, 55357), True, 'from pyfolio import plotting as pa\n'), ((55367, 55417), 'matplotlib.pyplot.title', 'plt.title', (["('Underwater Plot' + ' (' + symbol + ')')"], {}), "('Underwater Plot' + ' (' + symbol + ')')\n", (55376, 55417), True, 'import matplotlib.pyplot as plt\n'), ((55421, 55431), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55429, 55431), True, 'import matplotlib.pyplot as plt\n'), ((860, 883), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(1124)', '(854)'], {}), '(1124, 854)\n', (872, 883), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((928, 951), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(1124)', '(854)'], {}), '(1124, 854)\n', (940, 951), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1167, 1187), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(26)', '(26)'], {}), '(26, 26)\n', (1179, 1187), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1466, 1497), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(80)', '(210)', '(241)', '(341)'], {}), '(80, 210, 241, 341)\n', (1478, 1497), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1809, 1839), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(80)', '(130)', '(241)', '(51)'], {}), '(80, 130, 241, 51)\n', (1821, 1839), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2498, 2529), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(610)', '(130)', '(111)', '(41)'], {}), '(610, 130, 111, 41)\n', (2510, 2529), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2967, 2998), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(640)', '(180)', '(171)', '(31)'], {}), '(640, 180, 171, 31)\n', (2979, 2998), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3437, 3467), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(640)', '(220)', '(81)', '(41)'], {}), '(640, 220, 81, 41)\n', (3449, 3467), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3715, 3746), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(640)', '(260)', '(181)', '(31)'], {}), '(640, 260, 181, 31)\n', (3727, 3746), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4061, 4085), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (4073, 4085), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4247, 4278), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(930)', '(180)', '(161)', '(31)'], {}), '(930, 180, 161, 31)\n', (4259, 4278), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4734, 4764), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(930)', '(220)', '(81)', '(31)'], {}), '(930, 220, 81, 31)\n', (4746, 4764), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5532, 5563), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(620)', '(400)', '(141)', '(41)'], {}), '(620, 400, 141, 41)\n', (5544, 5563), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6016, 6046), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(640)', '(450)', '(81)', '(31)'], {}), '(640, 450, 81, 31)\n', (6028, 6046), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6639, 6669), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(610)', '(180)', '(21)', '(20)'], {}), '(610, 180, 21, 20)\n', (6651, 6669), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6948, 6978), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(900)', '(180)', '(16)', '(21)'], {}), '(900, 180, 16, 21)\n', (6960, 6978), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7186, 7216), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(640)', '(300)', '(51)', '(31)'], {}), '(640, 300, 51, 31)\n', (7198, 7216), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7460, 7491), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(640)', '(330)', '(181)', '(31)'], {}), '(640, 330, 181, 31)\n', (7472, 7491), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7798, 7822), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (7810, 7822), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7975, 8005), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(350)', '(140)', '(91)', '(41)'], {}), '(350, 140, 91, 41)\n', (7987, 8005), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8438, 8468), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/add/add.png"""'], {}), "(':/add/add.png')\n", (8451, 8468), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8581, 8601), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(34)', '(34)'], {}), '(34, 34)\n', (8593, 8601), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8764, 8794), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(350)', '(210)', '(91)', '(41)'], {}), '(350, 210, 91, 41)\n', (8776, 8794), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9015, 9050), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/delete/minus.png"""'], {}), "(':/delete/minus.png')\n", (9028, 9050), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9170, 9190), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(34)', '(34)'], {}), '(34, 34)\n', (9182, 9190), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9369, 9400), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(760)', '(151)', '(41)'], {}), '(200, 760, 151, 41)\n', (9381, 9400), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9543, 9589), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/candlestick/candlestick.png"""'], {}), "(':/candlestick/candlestick.png')\n", (9556, 9589), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9719, 9739), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(34)', '(34)'], {}), '(34, 34)\n', (9731, 9739), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9914, 9945), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(360)', '(760)', '(141)', '(41)'], {}), '(360, 760, 141, 41)\n', (9926, 9945), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10081, 10113), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/ohlc/ohlc.png"""'], {}), "(':/ohlc/ohlc.png')\n", (10094, 10113), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10229, 10249), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(24)', '(24)'], {}), '(24, 24)\n', (10241, 10249), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10430, 10460), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(760)', '(181)', '(41)'], {}), '(10, 760, 181, 41)\n', (10442, 10460), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10606, 10647), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/timeseries/timeser.png"""'], {}), "(':/timeseries/timeser.png')\n", (10619, 10647), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10783, 10803), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(34)', '(34)'], {}), '(34, 34)\n', (10795, 10803), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11018, 11049), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(660)', '(760)', '(191)', '(41)'], {}), '(660, 760, 191, 41)\n', (11030, 11049), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11202, 11254), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/histogram/chart-histogram-512.png"""'], {}), "(':/histogram/chart-histogram-512.png')\n", (11215, 11254), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11404, 11424), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(34)', '(34)'], {}), '(34, 34)\n', (11416, 11424), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11631, 11662), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(510)', '(760)', '(141)', '(41)'], {}), '(510, 760, 141, 41)\n', (11643, 11662), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11804, 11852), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/volatility/volatility-512.png"""'], {}), "(':/volatility/volatility-512.png')\n", (11817, 11852), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11980, 12000), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(34)', '(34)'], {}), '(34, 34)\n', (11992, 12000), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12177, 12208), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(860)', '(760)', '(141)', '(41)'], {}), '(860, 760, 141, 41)\n', (12189, 12208), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12346, 12378), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/area/area.png"""'], {}), "(':/area/area.png')\n", (12359, 12378), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12498, 12518), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(34)', '(34)'], {}), '(34, 34)\n', (12510, 12518), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12633, 12675), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/portfolio/portfolio.png"""'], {}), "(':/portfolio/portfolio.png')\n", (12646, 12675), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12992, 13023), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(220)', '(1121)', '(581)'], {}), '(0, 220, 1121, 581)\n', (13004, 13023), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13168, 13197), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1121)', '(428)'], {}), '(0, 0, 1121, 428)\n', (13180, 13197), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13364, 13394), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(160)', '(30)', '(111)', '(31)'], {}), '(160, 30, 111, 31)\n', (13376, 13394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13945, 13974), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(20)', '(121)', '(51)'], {}), '(30, 20, 121, 51)\n', (13957, 13974), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14422, 14453), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(500)', '(100)', '(121)', '(41)'], {}), '(500, 100, 121, 41)\n', (14434, 14453), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14947, 14976), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1121)', '(428)'], {}), '(0, 0, 1121, 428)\n', (14959, 14976), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15120, 15149), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(40)', '(261)', '(31)'], {}), '(30, 40, 261, 31)\n', (15132, 15149), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15381, 15410), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(290)', '(40)', '(81)', '(31)'], {}), '(290, 40, 81, 31)\n', (15393, 15410), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15833, 15863), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(100)', '(251)', '(31)'], {}), '(30, 100, 251, 31)\n', (15845, 15863), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16090, 16120), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(290)', '(100)', '(81)', '(31)'], {}), '(290, 100, 81, 31)\n', (16102, 16120), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16541, 16572), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(500)', '(190)', '(131)', '(31)'], {}), '(500, 190, 131, 31)\n', (16553, 16572), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16836, 16865), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1121)', '(428)'], {}), '(0, 0, 1121, 428)\n', (16848, 16865), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17024, 17053), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(30)', '(121)', '(41)'], {}), '(30, 30, 121, 41)\n', (17036, 17053), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17330, 17359), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(170)', '(40)', '(71)', '(31)'], {}), '(170, 40, 71, 31)\n', (17342, 17359), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17854, 17885), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(500)', '(100)', '(121)', '(41)'], {}), '(500, 100, 121, 41)\n', (17866, 17885), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18423, 18452), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(70)', '(241)', '(51)'], {}), '(30, 70, 241, 51)\n', (18435, 18452), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18733, 18763), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(270)', '(80)', '(131)', '(31)'], {}), '(270, 80, 131, 31)\n', (18745, 18763), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((19287, 19317), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(540)', '(70)', '(221)', '(41)'], {}), '(540, 70, 221, 41)\n', (19299, 19317), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((19578, 19608), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(760)', '(80)', '(101)', '(31)'], {}), '(760, 80, 101, 31)\n', (19590, 19608), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((20047, 20085), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/advisor/advisor.png"""'], {}), "(':/advisor/advisor.png')\n", (20060, 20085), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((29785, 29797), 'datetime.date.today', 'date.today', ([], {}), '()\n', (29795, 29797), False, 'from datetime import date\n'), ((41315, 41335), 'numpy.sqrt', 'np.sqrt', (['min_periods'], {}), '(min_periods)\n', (41322, 41335), True, 'import numpy as np\n'), ((45026, 45054), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (45038, 45054), True, 'import pandas as pd\n'), ((47508, 47536), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (47520, 47536), True, 'import pandas as pd\n'), ((3979, 4003), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (3991, 4003), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4005, 4026), 'PyQt5.QtCore.QTime', 'QtCore.QTime', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4017, 4026), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7718, 7742), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (7730, 7742), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7744, 7765), 'PyQt5.QtCore.QTime', 'QtCore.QTime', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (7756, 7765), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((42339, 42367), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (42351, 42367), True, 'import pandas as pd\n'), ((52873, 52906), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'signals.index'}), '(index=signals.index)\n', (52885, 52906), True, 'import pandas as pd\n'), ((42839, 42938), 'numpy.where', 'np.where', (["(signals['short_mavg'][short_window:] > signals['long_mavg'][short_window:])", '(1.0)', '(0.0)'], {}), "(signals['short_mavg'][short_window:] > signals['long_mavg'][\n short_window:], 1.0, 0.0)\n", (42847, 42938), True, 'import numpy as np\n'), ((43438, 43537), 'numpy.where', 'np.where', (["(signals['short_mavg'][short_window:] > signals['long_mavg'][short_window:])", '(1.0)', '(0.0)'], {}), "(signals['short_mavg'][short_window:] > signals['long_mavg'][\n short_window:], 1.0, 0.0)\n", (43446, 43537), True, 'import numpy as np\n'), ((37548, 37646), 'plotly.graph_objects.Candlestick', 'go.Candlestick', ([], {'x': 'df.index', 'open': "df['Open']", 'high': "df['High']", 'low': "df['Low']", 'close': "df['Close']"}), "(x=df.index, open=df['Open'], high=df['High'], low=df['Low'],\n close=df['Close'])\n", (37562, 37646), True, 'import plotly.graph_objects as go\n'), ((38724, 38816), 'plotly.graph_objects.Ohlc', 'go.Ohlc', ([], {'x': 'df.index', 'open': "df['Open']", 'high': "df['High']", 'low': "df['Low']", 'close': "df['Close']"}), "(x=df.index, open=df['Open'], high=df['High'], low=df['Low'], close=\n df['Close'])\n", (38731, 38816), True, 'import plotly.graph_objects as go\n'), ((37154, 37322), 'plotly.graph_objects.Candlestick', 'go.Candlestick', ([], {'x': 'df.index', 'open': "df['Open'][listSymbols[i]]", 'high': "df['High'][listSymbols[i]]", 'low': "df['Low'][listSymbols[i]]", 'close': "df['Close'][listSymbols[i]]"}), "(x=df.index, open=df['Open'][listSymbols[i]], high=df['High']\n [listSymbols[i]], low=df['Low'][listSymbols[i]], close=df['Close'][\n listSymbols[i]])\n", (37168, 37322), True, 'import plotly.graph_objects as go\n'), ((38337, 38498), 'plotly.graph_objects.Ohlc', 'go.Ohlc', ([], {'x': 'df.index', 'open': "df['Open'][listSymbols[i]]", 'high': "df['High'][listSymbols[i]]", 'low': "df['Low'][listSymbols[i]]", 'close': "df['Close'][listSymbols[i]]"}), "(x=df.index, open=df['Open'][listSymbols[i]], high=df['High'][\n listSymbols[i]], low=df['Low'][listSymbols[i]], close=df['Close'][\n listSymbols[i]])\n", (38344, 38498), True, 'import plotly.graph_objects as go\n')] |
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
from scipy.signal import argrelextrema
from sklearn.metrics import f1_score, precision_recall_curve
import matplotlib.pyplot as plt
import glob
import librosa
from tqdm import tqdm
from utilities.feature_extractor import FeatureExtractor, convert_time
VAL_SIZE = 0.15
BATCH_SIZE = 256
EPOCHS = 15
PATIENCE = 5
LR_RATE = 0.005
class TrainingDataset(Dataset):
def __init__(self, file, fuzzy_label=True, cache=None):
self.extractor = FeatureExtractor()
data, diffs, onsets = self.extractor.load_data(file, fuzzy_label, cache)
self.x = torch.from_numpy(np.array(data, dtype=float))
self.y = torch.from_numpy(diffs)
self.z = torch.from_numpy(onsets)
assert self.x.shape[0] == self.z.shape[0]
assert self.x.shape[0] == self.y.shape[0]
self.samples = self.x.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index], self.z[index]
def __len__(self):
return self.samples
class AudioDataset(Dataset):
def __init__(self, file, diff, segments):
self.extractor = FeatureExtractor()
mels = self.extractor.extract_mel(file)
data, diffs = self.extractor.convert_mel_to_in_data(mels, diff, intensity=segments)
self.x = torch.from_numpy(np.array(data))
self.y = torch.from_numpy(diffs)
assert self.x.shape[0] == self.y.shape[0]
self.samples = self.x.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.samples
class CBlstm(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, (3, 7))
self.conv2 = nn.Conv2d(10, 20, 3)
self.norm = nn.BatchNorm2d(20)
self.lstm1 = nn.LSTM(input_size=166, hidden_size=128, batch_first=True, bidirectional=True)
self.tanh = nn.Tanh()
self.fc1 = nn.Linear(7*2*128, 128)
self.out = nn.Linear(128, 1)
self.drop = nn.Dropout(p=0.8)
self.sig = nn.Sigmoid()
def forward(self, x, y):
x = F.dropout(F.relu(self.conv1(x)), p=0.2, training=self.training)
x = F.max_pool2d(x, (3,1))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, (3,1))
x = self.norm(x)
x = x.permute(0, 3, 2, 1)
x = torch.flatten(x, start_dim=2)
y = torch.repeat_interleave(y, x.shape[1], dim=1).view(-1, 7, 6)
x = torch.cat((x, y), 2)
x, _ = self.lstm1(x)
x = self.tanh(x)
x = self.drop(torch.flatten(x, start_dim=1))
x = self.drop(F.relu(self.fc1(x)))
x = self.out(x)
if not self.training:
x = self.sig(x)
return x
def start_training(self, dir, device, outputdir="..\\models\\default", ev_set=None, file_set=None, cache=None):
if not os.path.exists(outputdir):
os.mkdir(outputdir)
all_files = [f for f in glob.glob(os.path.join(dir, "**/*.osu"), recursive=True)]
eval_files_len = int(len(all_files) * VAL_SIZE) + 1
folders = glob.glob(os.path.join(dir, "*\\"))
np.random.shuffle(folders)
eval_files = []
i = 0
while len(eval_files) < eval_files_len:
eval_files.extend([f for f in glob.glob(os.path.join(folders[i], "*.osu"))])
i += 1
files = [x for x in all_files if x not in eval_files]
np.random.shuffle(files)
files = files[:-eval_files_len]
if ev_set is not None and file_set is not None:
eval_files = np.load(ev_set)
files = np.load(file_set)
optimizer = optim.Adam(self.parameters(), lr=LR_RATE)
#optimizer = optim.SGD(self.parameters(), lr=LR_RATE, momentum=0.85)
loss_fn = nn.BCEWithLogitsLoss()
loss_vals = []
val_losses = []
f_scores = []
highest_f = 0
prev_val_loss = float('inf')
prev_state = self.state_dict()
model_thresh = 0
training_patience = PATIENCE
for epoch in range(EPOCHS):
self.train()
np.random.shuffle(files)
running_loss = 0
dataset_len = 0
for i, file in enumerate(files):
try:
dataset = TrainingDataset(file, cache=cache)
loader = DataLoader(dataset, shuffle=True, batch_size=BATCH_SIZE)
dataset_len += len(loader)
print("Epoch: " + str(epoch) + "/" + str(EPOCHS) + ", data: " + str(i) + "/" + str(len(files)))
for (batch_X, batch_Y, batch_Z) in tqdm(loader):
optimizer.zero_grad()
out_on = self(batch_X.to(device, dtype=torch.float), batch_Y.to(device, dtype=torch.float))
loss = loss_fn(out_on.view(-1), batch_Z.to(device, dtype=torch.float))
loss.backward()
optimizer.step()
running_loss += loss.item()
except FileNotFoundError as e:
print(str(e))
files.remove(file)
train_loss = running_loss/dataset_len
print("loss: ", train_loss)
loss_vals.append(train_loss)
val_loss, f1, thresh = self.evaluate(eval_files, device)
if prev_val_loss < val_loss:
print("loss increased", abs(training_patience - 5))
training_patience -= 1
if training_patience == -1:
print("Early training stop checkpoint after", epoch, "epochs")
torch.save(prev_state, os.path.join(outputdir, "onset_model_check.pth"))
np.save(os.path.join(outputdir, "onset_check_thresh.npy"), np.array(model_thresh))
else:
prev_state = self.state_dict()
training_patience = PATIENCE
model_thresh = thresh
prev_val_loss = val_loss
f_scores.append(f1)
val_losses.append(val_loss)
if f_scores[-1] > highest_f:
np.save(os.path.join(outputdir, "onset_thresh_best_f1.npy"), np.array(thresh))
torch.save(self.state_dict(), os.path.join(outputdir, "onset_model_best_f1.pth"))
highest_f = f_scores[-1]
np.save(os.path.join(outputdir, "onset_thresh.npy"), np.array(thresh))
np.save(os.path.join(outputdir, "train_files.npy"), np.array(files))
np.save(os.path.join(outputdir, "val_files.npy"), np.array(eval_files))
torch.save(self.state_dict(), os.path.join(outputdir, "onset_model.pth"))
return loss_vals, val_losses, f_scores
def wide_window(self, onsets, ground, window_size=2):
windowed_onset = []
windowed_ground = []
for i in range(window_size, onsets.shape[0] - window_size):
if np.count_nonzero(onsets[i - window_size:i + window_size + 1]) > 0:
windowed_onset.append(1)
else:
windowed_onset.append(0)
if np.count_nonzero(ground[i - window_size:i + window_size + 1]) > 0:
windowed_ground.append(1)
else:
windowed_ground.append(0)
return windowed_onset, windowed_ground
def evaluate(self, files, device, dir=None, model=None):
if model is not None:
self.load_state_dict(torch.load(os.path.join(model, "onset_model.pth"), map_location=device))
if dir is not None:
files = [f for f in glob.glob(os.path.join(dir, "**/*.osu"), recursive=True)]
loss_fn = nn.BCEWithLogitsLoss()
ground = []
diffs = []
running_loss = 0
dataset_len = 0
with torch.no_grad():
self.eval()
predictions = []
for i, file in tqdm(enumerate(files)):
print("Data: " + str(i) + "/" + str(len(files)))
try:
dataset = TrainingDataset(file, fuzzy_label=False)
loader = DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE)
dataset_len += len(loader)
for (batch_X, batch_Y, batch_Z) in tqdm(loader):
out_0 = self(batch_X.to(device, dtype=torch.float), batch_Y.to(device, dtype=torch.float))
loss = loss_fn(out_0.view(-1), batch_Z.to(device, dtype=torch.float))
out_0 = self.sig(out_0)
predictions.extend(out_0.cpu())
diffs.extend(torch.argmax(batch_Y, dim=1).cpu())
ground.extend(batch_Z.view(-1).cpu())
running_loss += loss.item()
except FileNotFoundError as e:
print(str(e))
files.remove(file)
predictions = np.array(predictions)
predictions_smooth = np.convolve(predictions, np.hamming(5), 'same')
pr, re, thresh = precision_recall_curve(ground, predictions)
fscore = (2*pr*re)/(pr+re)
ix = np.argmax(fscore)
print("Best:", thresh[ix], "fscore1:", fscore[ix])
#plt.plot([0,1],[0,1], linestyle="--")
#plt.plot(pr, re, marker='.')
#plt.scatter(pr[ix], re[ix], marker='o', color="black")
#plt.show()
maxima = argrelextrema(predictions_smooth, np.greater_equal, order=1)[0]
pred_maxed = np.zeros(len(predictions))
for i in maxima:
if predictions[i] >= (thresh[ix] - diffs[i] * 0.015):
pred_maxed[i] = 1
pred_maxed, ground = self.wide_window(pred_maxed, ground)
val_loss = running_loss/dataset_len
onset_f = f1_score(ground, pred_maxed)
print("f1_score:", onset_f)
return val_loss, onset_f, thresh[ix]
def _calc_density(self, onsets, tempo):
if not onsets or len(onsets) == 1:
return 0
onsets = librosa.frames_to_time(onsets, sr=44100) * 1000
avg = 0
prev = onsets[0]
curr_tempo = -1
for onset in onsets[1:]:
if curr_tempo + 1 < tempo.shape[0]:
if onset >= tempo[curr_tempo + 1][0]:
curr_tempo += 1
avg += convert_time(onset - prev, tempo[curr_tempo - 1][1])
prev = onset
return avg / (len(onsets) - 1)
def infer(self, file, difficulty, segments, tempo, device, model="..\\models\\default"):
self.load_state_dict(torch.load(os.path.join(model, "onset_model.pth"), map_location=device))
thresh = np.load(os.path.join(model, "onset_thresh.npy"))
predictions = []
with torch.no_grad():
self.eval()
dataset = AudioDataset(file, difficulty, segments)
dataloader = DataLoader(dataset=dataset, batch_size=BATCH_SIZE)
for (x, y) in tqdm(dataloader):
out_0 = self(x.to(device, dtype=torch.float), y.to(device, dtype=torch.float))
out_0 = self.sig(out_0)
predictions.extend(out_0.cpu())
predictions_smooth = np.convolve(predictions, np.hamming(5), 'same')
maxima = argrelextrema(predictions_smooth, np.greater_equal, order=1)[0]
avg_density = 0
j = - difficulty
while avg_density >= 8 or avg_density == 0:
hits = []
for i in maxima:
if predictions[i] >= (thresh + j * 0.015):
hits.append(i)
avg_density = self._calc_density(hits, tempo)
print(avg_density)
j -= 1
print(len(hits))
return librosa.frames_to_time(hits, sr=44100) * 1000
| [
"torch.nn.Dropout",
"torch.nn.Tanh",
"utilities.feature_extractor.convert_time",
"scipy.signal.argrelextrema",
"torch.from_numpy",
"numpy.count_nonzero",
"numpy.array",
"torch.repeat_interleave",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"os.path.exists",
"torch.nn.LSTM",
"librosa.frames_t... | [((612, 630), 'utilities.feature_extractor.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (628, 630), False, 'from utilities.feature_extractor import FeatureExtractor, convert_time\n'), ((775, 798), 'torch.from_numpy', 'torch.from_numpy', (['diffs'], {}), '(diffs)\n', (791, 798), False, 'import torch\n'), ((810, 834), 'torch.from_numpy', 'torch.from_numpy', (['onsets'], {}), '(onsets)\n', (826, 834), False, 'import torch\n'), ((1178, 1196), 'utilities.feature_extractor.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (1194, 1196), False, 'from utilities.feature_extractor import FeatureExtractor, convert_time\n'), ((1380, 1403), 'torch.from_numpy', 'torch.from_numpy', (['diffs'], {}), '(diffs)\n', (1396, 1403), False, 'import torch\n'), ((1678, 1702), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(10)', '(3, 7)'], {}), '(3, 10, (3, 7))\n', (1687, 1702), True, 'import torch.nn as nn\n'), ((1718, 1738), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)', '(3)'], {}), '(10, 20, 3)\n', (1727, 1738), True, 'import torch.nn as nn\n'), ((1753, 1771), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(20)'], {}), '(20)\n', (1767, 1771), True, 'import torch.nn as nn\n'), ((1788, 1866), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(166)', 'hidden_size': '(128)', 'batch_first': '(True)', 'bidirectional': '(True)'}), '(input_size=166, hidden_size=128, batch_first=True, bidirectional=True)\n', (1795, 1866), True, 'import torch.nn as nn\n'), ((1881, 1890), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1888, 1890), True, 'import torch.nn as nn\n'), ((1905, 1932), 'torch.nn.Linear', 'nn.Linear', (['(7 * 2 * 128)', '(128)'], {}), '(7 * 2 * 128, 128)\n', (1914, 1932), True, 'import torch.nn as nn\n'), ((1942, 1959), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (1951, 1959), True, 'import torch.nn as nn\n'), ((1975, 1992), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.8)'}), '(p=0.8)\n', (1985, 1992), True, 'import torch.nn as nn\n'), ((2006, 2018), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2016, 2018), True, 'import torch.nn as nn\n'), ((2122, 2145), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(3, 1)'], {}), '(x, (3, 1))\n', (2134, 2145), True, 'import torch.nn.functional as F\n'), ((2179, 2202), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(3, 1)'], {}), '(x, (3, 1))\n', (2191, 2202), True, 'import torch.nn.functional as F\n'), ((2256, 2285), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(2)'}), '(x, start_dim=2)\n', (2269, 2285), False, 'import torch\n'), ((2359, 2379), 'torch.cat', 'torch.cat', (['(x, y)', '(2)'], {}), '((x, y), 2)\n', (2368, 2379), False, 'import torch\n'), ((2943, 2969), 'numpy.random.shuffle', 'np.random.shuffle', (['folders'], {}), '(folders)\n', (2960, 2969), True, 'import numpy as np\n'), ((3186, 3210), 'numpy.random.shuffle', 'np.random.shuffle', (['files'], {}), '(files)\n', (3203, 3210), True, 'import numpy as np\n'), ((3495, 3517), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (3515, 3517), True, 'import torch.nn as nn\n'), ((6608, 6630), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (6628, 6630), True, 'import torch.nn as nn\n'), ((8291, 8319), 'sklearn.metrics.f1_score', 'f1_score', (['ground', 'pred_maxed'], {}), '(ground, pred_maxed)\n', (8299, 8319), False, 'from sklearn.metrics import f1_score, precision_recall_curve\n'), ((735, 762), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (743, 762), True, 'import numpy as np\n'), ((1353, 1367), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1361, 1367), True, 'import numpy as np\n'), ((2439, 2468), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (2452, 2468), False, 'import torch\n'), ((2705, 2730), 'os.path.exists', 'os.path.exists', (['outputdir'], {}), '(outputdir)\n', (2719, 2730), False, 'import os\n'), ((2735, 2754), 'os.mkdir', 'os.mkdir', (['outputdir'], {}), '(outputdir)\n', (2743, 2754), False, 'import os\n'), ((2915, 2939), 'os.path.join', 'os.path.join', (['dir', '"""*\\\\"""'], {}), "(dir, '*\\\\')\n", (2927, 2939), False, 'import os\n'), ((3311, 3326), 'numpy.load', 'np.load', (['ev_set'], {}), '(ev_set)\n', (3318, 3326), True, 'import numpy as np\n'), ((3338, 3355), 'numpy.load', 'np.load', (['file_set'], {}), '(file_set)\n', (3345, 3355), True, 'import numpy as np\n'), ((3750, 3774), 'numpy.random.shuffle', 'np.random.shuffle', (['files'], {}), '(files)\n', (3767, 3774), True, 'import numpy as np\n'), ((5500, 5543), 'os.path.join', 'os.path.join', (['outputdir', '"""onset_thresh.npy"""'], {}), "(outputdir, 'onset_thresh.npy')\n", (5512, 5543), False, 'import os\n'), ((5545, 5561), 'numpy.array', 'np.array', (['thresh'], {}), '(thresh)\n', (5553, 5561), True, 'import numpy as np\n'), ((5573, 5615), 'os.path.join', 'os.path.join', (['outputdir', '"""train_files.npy"""'], {}), "(outputdir, 'train_files.npy')\n", (5585, 5615), False, 'import os\n'), ((5617, 5632), 'numpy.array', 'np.array', (['files'], {}), '(files)\n', (5625, 5632), True, 'import numpy as np\n'), ((5644, 5684), 'os.path.join', 'os.path.join', (['outputdir', '"""val_files.npy"""'], {}), "(outputdir, 'val_files.npy')\n", (5656, 5684), False, 'import os\n'), ((5686, 5706), 'numpy.array', 'np.array', (['eval_files'], {}), '(eval_files)\n', (5694, 5706), True, 'import numpy as np\n'), ((5740, 5782), 'os.path.join', 'os.path.join', (['outputdir', '"""onset_model.pth"""'], {}), "(outputdir, 'onset_model.pth')\n", (5752, 5782), False, 'import os\n'), ((6702, 6717), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6715, 6717), False, 'import torch\n'), ((7541, 7562), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (7549, 7562), True, 'import numpy as np\n'), ((7657, 7700), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['ground', 'predictions'], {}), '(ground, predictions)\n', (7679, 7700), False, 'from sklearn.metrics import f1_score, precision_recall_curve\n'), ((7739, 7756), 'numpy.argmax', 'np.argmax', (['fscore'], {}), '(fscore)\n', (7748, 7756), True, 'import numpy as np\n'), ((8491, 8531), 'librosa.frames_to_time', 'librosa.frames_to_time', (['onsets'], {'sr': '(44100)'}), '(onsets, sr=44100)\n', (8513, 8531), False, 'import librosa\n'), ((8725, 8777), 'utilities.feature_extractor.convert_time', 'convert_time', (['(onset - prev)', 'tempo[curr_tempo - 1][1]'], {}), '(onset - prev, tempo[curr_tempo - 1][1])\n', (8737, 8777), False, 'from utilities.feature_extractor import FeatureExtractor, convert_time\n'), ((9033, 9072), 'os.path.join', 'os.path.join', (['model', '"""onset_thresh.npy"""'], {}), "(model, 'onset_thresh.npy')\n", (9045, 9072), False, 'import os\n'), ((9100, 9115), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9113, 9115), False, 'import torch\n'), ((9202, 9252), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'BATCH_SIZE'}), '(dataset=dataset, batch_size=BATCH_SIZE)\n', (9212, 9252), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((9270, 9286), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (9274, 9286), False, 'from tqdm import tqdm\n'), ((9484, 9497), 'numpy.hamming', 'np.hamming', (['(5)'], {}), '(5)\n', (9494, 9497), True, 'import numpy as np\n'), ((9518, 9578), 'scipy.signal.argrelextrema', 'argrelextrema', (['predictions_smooth', 'np.greater_equal'], {'order': '(1)'}), '(predictions_smooth, np.greater_equal, order=1)\n', (9531, 9578), False, 'from scipy.signal import argrelextrema\n'), ((9875, 9913), 'librosa.frames_to_time', 'librosa.frames_to_time', (['hits'], {'sr': '(44100)'}), '(hits, sr=44100)\n', (9897, 9913), False, 'import librosa\n'), ((2292, 2337), 'torch.repeat_interleave', 'torch.repeat_interleave', (['y', 'x.shape[1]'], {'dim': '(1)'}), '(y, x.shape[1], dim=1)\n', (2315, 2337), False, 'import torch\n'), ((5995, 6056), 'numpy.count_nonzero', 'np.count_nonzero', (['onsets[i - window_size:i + window_size + 1]'], {}), '(onsets[i - window_size:i + window_size + 1])\n', (6011, 6056), True, 'import numpy as np\n'), ((6135, 6196), 'numpy.count_nonzero', 'np.count_nonzero', (['ground[i - window_size:i + window_size + 1]'], {}), '(ground[i - window_size:i + window_size + 1])\n', (6151, 6196), True, 'import numpy as np\n'), ((7612, 7625), 'numpy.hamming', 'np.hamming', (['(5)'], {}), '(5)\n', (7622, 7625), True, 'import numpy as np\n'), ((7972, 8032), 'scipy.signal.argrelextrema', 'argrelextrema', (['predictions_smooth', 'np.greater_equal'], {'order': '(1)'}), '(predictions_smooth, np.greater_equal, order=1)\n', (7985, 8032), False, 'from scipy.signal import argrelextrema\n'), ((8952, 8990), 'os.path.join', 'os.path.join', (['model', '"""onset_model.pth"""'], {}), "(model, 'onset_model.pth')\n", (8964, 8990), False, 'import os\n'), ((2791, 2820), 'os.path.join', 'os.path.join', (['dir', '"""**/*.osu"""'], {}), "(dir, '**/*.osu')\n", (2803, 2820), False, 'import os\n'), ((3923, 3979), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'shuffle': '(True)', 'batch_size': 'BATCH_SIZE'}), '(dataset, shuffle=True, batch_size=BATCH_SIZE)\n', (3933, 3979), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4153, 4165), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (4157, 4165), False, 'from tqdm import tqdm\n'), ((5303, 5354), 'os.path.join', 'os.path.join', (['outputdir', '"""onset_thresh_best_f1.npy"""'], {}), "(outputdir, 'onset_thresh_best_f1.npy')\n", (5315, 5354), False, 'import os\n'), ((5356, 5372), 'numpy.array', 'np.array', (['thresh'], {}), '(thresh)\n', (5364, 5372), True, 'import numpy as np\n'), ((5408, 5458), 'os.path.join', 'os.path.join', (['outputdir', '"""onset_model_best_f1.pth"""'], {}), "(outputdir, 'onset_model_best_f1.pth')\n", (5420, 5458), False, 'import os\n'), ((6431, 6469), 'os.path.join', 'os.path.join', (['model', '"""onset_model.pth"""'], {}), "(model, 'onset_model.pth')\n", (6443, 6469), False, 'import os\n'), ((6928, 6985), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'shuffle': '(False)', 'batch_size': 'BATCH_SIZE'}), '(dataset, shuffle=False, batch_size=BATCH_SIZE)\n', (6938, 6985), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((7058, 7070), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (7062, 7070), False, 'from tqdm import tqdm\n'), ((4935, 4983), 'os.path.join', 'os.path.join', (['outputdir', '"""onset_model_check.pth"""'], {}), "(outputdir, 'onset_model_check.pth')\n", (4947, 4983), False, 'import os\n'), ((4998, 5047), 'os.path.join', 'os.path.join', (['outputdir', '"""onset_check_thresh.npy"""'], {}), "(outputdir, 'onset_check_thresh.npy')\n", (5010, 5047), False, 'import os\n'), ((5049, 5071), 'numpy.array', 'np.array', (['model_thresh'], {}), '(model_thresh)\n', (5057, 5071), True, 'import numpy as np\n'), ((6548, 6577), 'os.path.join', 'os.path.join', (['dir', '"""**/*.osu"""'], {}), "(dir, '**/*.osu')\n", (6560, 6577), False, 'import os\n'), ((3081, 3114), 'os.path.join', 'os.path.join', (['folders[i]', '"""*.osu"""'], {}), "(folders[i], '*.osu')\n", (3093, 3114), False, 'import os\n'), ((7332, 7360), 'torch.argmax', 'torch.argmax', (['batch_Y'], {'dim': '(1)'}), '(batch_Y, dim=1)\n', (7344, 7360), False, 'import torch\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os.path as op
from argparse import Namespace
import torch
import pickle
import numpy as np
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.triplet_dataset import (
TripletDataConfig,
TripletDataset,
TripletDatasetCreator,
get_features_or_waveform,
)
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.data.audio.speech_to_text_dataset import SpeechToTextDataset
logger = logging.getLogger(__name__)
@register_task("triplet")
class TripletTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--normalize",
action="store_true",
help="if set, normalizes input to have 0 mean and unit variance",
)
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument(
"--dump-feature-to-file",
type=str, default=None,
)
parser.add_argument(
"--sample-rate", type=int, default=16000
)
def __init__(self, args, tgt_dict, src_dict, data_cfg):
super().__init__(args)
self.tgt_dict = tgt_dict
self.src_dict = src_dict
self.data_cfg = data_cfg
self.dump_feature_to_file = args.dump_feature_to_file
if self.dump_feature_to_file is not None:
self.cached_features = {
_name: [] for _name in
('src_text', 'audio_features', 'text_features')
}
else:
self.cached_features = None
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = TripletDataConfig(op.join(args.data, args.config_yaml))
def load_dict(vocab_filename):
_dict_path = op.join(args.data, vocab_filename)
if not op.isfile(_dict_path):
raise FileNotFoundError(f"Dict not found: {_dict_path}")
_dict = Dictionary.load(_dict_path)
return _dict
tgt_dict = load_dict(data_cfg.vocab_filename)
src_dict = load_dict(data_cfg.src_vocab_filename)
logger.info(
f"target dictionary size ({data_cfg.vocab_filename}): "
f"{len(tgt_dict):,}"
)
logger.info(
f"source dictionary size ({data_cfg.src_vocab_filename}): "
f"{len(src_dict):,}"
)
return cls(args, tgt_dict, src_dict, data_cfg)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
src_bpe_tokenizer = self.build_src_bpe()
self.datasets[split] = TripletDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
self.src_dict,
pre_tokenizer,
bpe_tokenizer,
src_bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
normalize=self.args.normalize,
sample_rate=self.args.sample_rate,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return self.src_dict
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super().build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids}
return super().build_generator(
models, args, seq_gen_cls=None,
extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
self.tokenizer = encoders.build_tokenizer(
Namespace(**self.data_cfg.pre_tokenizer))
return self.tokenizer
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
self.bpe = encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
return self.bpe
def build_src_bpe(self):
logger.info(f"source tokenizer: {self.data_cfg.src_bpe_tokenizer}")
self.src_bpe = encoders.build_bpe(
Namespace(**self.data_cfg.src_bpe_tokenizer))
return self.src_bpe
'''
@classmethod
def build_dataset_for_inference(cls, audio_paths, n_frames, **kwargs):
return TripletDataset("interactive", False, {}, audio_paths, n_frames)
'''
def valid_step(self, sample, model, criterion):
if self.dump_feature_to_file is not None:
model.eval()
with torch.no_grad():
st_input = sample['net_input']
mt_input = {
"src_tokens": sample["src_text"],
"src_lengths": sample["src_text_lengths"],
"prev_output_tokens":
sample["net_input"]["prev_output_tokens"],
"mask": sample["net_input"]["mask"],
}
_, audio_internal = model.forward_with_internal(**st_input)
_, text_internal = model.forward_with_internal(**mt_input)
self.cached_features['audio_features'].append(
audio_internal.detach().cpu().numpy().transpose(1, 0, 2),
)
self.cached_features['text_features'].append(
text_internal.detach().cpu().numpy().transpose(1, 0, 2),
)
self.cached_features['src_text'].extend([
self.datasets['dev_wave'].datasets[0].src_texts[i]
for i in sample['id']
])
return super().valid_step(sample, model, criterion)
def dump_features(self):
if self.cached_features is None:
return
with open(self.dump_feature_to_file, 'wb') as f:
self.cached_features['audio_features'] = np.concatenate(
self.cached_features['audio_features']
)
self.cached_features['text_features'] = np.concatenate(
self.cached_features['text_features']
)
pickle.dump(self.cached_features, f)
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p, True).shape[0] for p in lines]
return lines, n_frames
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return TripletDataset(
"interactive", False, self.data_cfg, src_tokens, src_lengths
)
| [
"logging.getLogger",
"fairseq.tasks.register_task",
"fairseq.data.audio.triplet_dataset.TripletDatasetCreator.from_tsv",
"fairseq.data.audio.triplet_dataset.TripletDataset",
"pickle.dump",
"fairseq.criterions.build_criterion",
"fairseq.data.audio.speech_to_text_dataset.SpeechToTextDataset.is_lang_tag",
... | [((633, 660), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (650, 660), False, 'import logging\n'), ((664, 688), 'fairseq.tasks.register_task', 'register_task', (['"""triplet"""'], {}), "('triplet')\n", (677, 688), False, 'from fairseq.tasks import LegacyFairseqTask, register_task\n'), ((3551, 3589), 'fairseq.criterions.build_criterion', 'criterions.build_criterion', (['args', 'self'], {}), '(args, self)\n', (3577, 3589), False, 'from fairseq import criterions\n'), ((3897, 4194), 'fairseq.data.audio.triplet_dataset.TripletDatasetCreator.from_tsv', 'TripletDatasetCreator.from_tsv', (['self.args.data', 'self.data_cfg', 'split', 'self.tgt_dict', 'self.src_dict', 'pre_tokenizer', 'bpe_tokenizer', 'src_bpe_tokenizer'], {'is_train_split': 'is_train_split', 'epoch': 'epoch', 'seed': 'self.args.seed', 'normalize': 'self.args.normalize', 'sample_rate': 'self.args.sample_rate'}), '(self.args.data, self.data_cfg, split, self.\n tgt_dict, self.src_dict, pre_tokenizer, bpe_tokenizer,\n src_bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=\n self.args.seed, normalize=self.args.normalize, sample_rate=self.args.\n sample_rate)\n', (3927, 4194), False, 'from fairseq.data.audio.triplet_dataset import TripletDataConfig, TripletDataset, TripletDatasetCreator, get_features_or_waveform\n'), ((8431, 8507), 'fairseq.data.audio.triplet_dataset.TripletDataset', 'TripletDataset', (['"""interactive"""', '(False)', 'self.data_cfg', 'src_tokens', 'src_lengths'], {}), "('interactive', False, self.data_cfg, src_tokens, src_lengths)\n", (8445, 8507), False, 'from fairseq.data.audio.triplet_dataset import TripletDataConfig, TripletDataset, TripletDatasetCreator, get_features_or_waveform\n'), ((2446, 2482), 'os.path.join', 'op.join', (['args.data', 'args.config_yaml'], {}), '(args.data, args.config_yaml)\n', (2453, 2482), True, 'import os.path as op\n'), ((2549, 2583), 'os.path.join', 'op.join', (['args.data', 'vocab_filename'], {}), '(args.data, vocab_filename)\n', (2556, 2583), True, 'import os.path as op\n'), ((2719, 2746), 'fairseq.data.Dictionary.load', 'Dictionary.load', (['_dict_path'], {}), '(_dict_path)\n', (2734, 2746), False, 'from fairseq.data import Dictionary, encoders\n'), ((5740, 5780), 'argparse.Namespace', 'Namespace', ([], {}), '(**self.data_cfg.pre_tokenizer)\n', (5749, 5780), False, 'from argparse import Namespace\n'), ((5947, 5987), 'argparse.Namespace', 'Namespace', ([], {}), '(**self.data_cfg.bpe_tokenizer)\n', (5956, 5987), False, 'from argparse import Namespace\n'), ((6174, 6218), 'argparse.Namespace', 'Namespace', ([], {}), '(**self.data_cfg.src_bpe_tokenizer)\n', (6183, 6218), False, 'from argparse import Namespace\n'), ((7888, 7942), 'numpy.concatenate', 'np.concatenate', (["self.cached_features['audio_features']"], {}), "(self.cached_features['audio_features'])\n", (7902, 7942), True, 'import numpy as np\n'), ((8025, 8078), 'numpy.concatenate', 'np.concatenate', (["self.cached_features['text_features']"], {}), "(self.cached_features['text_features'])\n", (8039, 8078), True, 'import numpy as np\n'), ((8121, 8157), 'pickle.dump', 'pickle.dump', (['self.cached_features', 'f'], {}), '(self.cached_features, f)\n', (8132, 8157), False, 'import pickle\n'), ((2603, 2624), 'os.path.isfile', 'op.isfile', (['_dict_path'], {}), '(_dict_path)\n', (2612, 2624), True, 'import os.path as op\n'), ((5297, 5331), 'fairseq.data.audio.speech_to_text_dataset.SpeechToTextDataset.is_lang_tag', 'SpeechToTextDataset.is_lang_tag', (['s'], {}), '(s)\n', (5328, 5331), False, 'from fairseq.data.audio.speech_to_text_dataset import SpeechToTextDataset\n'), ((6581, 6596), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6594, 6596), False, 'import torch\n'), ((8247, 8280), 'fairseq.data.audio.triplet_dataset.get_features_or_waveform', 'get_features_or_waveform', (['p', '(True)'], {}), '(p, True)\n', (8271, 8280), False, 'from fairseq.data.audio.triplet_dataset import TripletDataConfig, TripletDataset, TripletDatasetCreator, get_features_or_waveform\n')] |
from typing import Union, Tuple, Iterable
import os
import torch
import numpy as np
import logging
from .models.models import GANModel, ReturnsModel
from .models.losses import LossCompose, WeightedLSLoss
from .utils import get_scheduler_from_config, get_optimizer_from_config, to_numpy
from .data_loader import FinanceDataset
from .portfolio_utils import sharpe, construct_long_short_portfolio
from .callbacks import EarlyStopping
from .config_reader import Config
HIDDEN_STATE_TYPE = Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
MODEL_TYPE = Union[GANModel, ReturnsModel]
LOSS_TYPE = Union[LossCompose, WeightedLSLoss]
def forward_with_dataset(model: MODEL_TYPE, dataset: FinanceDataset,
hidden_state: torch.Tensor = None) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
return model(
macro_features=dataset.macro_feat_tensor,
individual_features=dataset.ind_feat_tensor,
masks=dataset.masks_tensor,
returns_tensor=dataset.returns_tensor,
hidden_state=hidden_state)
def evaluate(model: MODEL_TYPE, dataset: FinanceDataset, loss: LOSS_TYPE,
hidden_state: HIDDEN_STATE_TYPE, normalize_sdf: bool = False) -> Iterable[float]:
model.eval()
residual_loss_value = None
if isinstance(model, GANModel):
sdf, sdf_weights, moments = forward_with_dataset(model, dataset, hidden_state)
main_loss_value = loss.main_loss(
returns_tensor=dataset.returns_tensor,
masks=dataset.masks_tensor,
sdf=sdf,
moments=moments)
residual_loss_value = loss.residual_loss(
returns_tensor=dataset.returns_tensor,
masks=dataset.masks_tensor,
sdf_weights=sdf_weights
)
sharpe_val = evaluate_sharpe_from_sdf(
sdf, sdf_weights,
dataset.returns_tensor,
dataset.masks_tensor,
normalize=normalize_sdf)
elif isinstance(model, ReturnsModel):
residual_returns = forward_with_dataset(model, dataset, hidden_state)
main_loss_value = loss(returns_tensor=residual_returns, masks=dataset.masks_tensor)
sharpe_val = evaluate_sharpe_from_residual_returns(
dataset.returns_tensor,
dataset.masks_tensor,
residual_returns)
else:
raise ValueError(f"Model of type {type(model)} is not supported.")
main_loss_value, residual_loss_value, sharpe_val = to_numpy(main_loss_value, residual_loss_value, sharpe_val)
return main_loss_value, residual_loss_value, sharpe_val
def _get_normalized_sdf_from_weights(sdf_weights: np.ndarray, returns: np.ndarray, mask: np.ndarray) -> np.ndarray:
splits = np.sum(mask, axis=1).cumsum()[:-1]
sdf_weights_list = np.split(sdf_weights, splits)
sdf_weights_array = np.concatenate([item / np.absolute(item).sum() for item in sdf_weights_list])
if len(returns.shape) > 3:
returns = returns[0, :]
weighted_returns_list = np.split(returns[mask] * sdf_weights_array.flatten(), splits)
return np.array([[item.sum()] for item in weighted_returns_list]) + 1
def _get_predicted_returns(residual_returns: np.ndarray, returns: np.ndarray, mask: np.ndarray) -> np.ndarray:
if len(returns.shape) > 3:
returns = returns[0, :]
returns = returns[mask]
return returns - residual_returns.flatten()
def predict_returns(model: ReturnsModel, dataset: FinanceDataset) -> np.ndarray:
model.eval()
residual_returns = to_numpy(forward_with_dataset(model, dataset))
# Get predicted returns from residual and true returns
return _get_predicted_returns(residual_returns,
returns=dataset.individual_data.return_array,
mask=dataset.individual_data.mask)
def predict_normalized_sdf(model: GANModel, dataset: FinanceDataset, hidden_state: HIDDEN_STATE_TYPE = None,
as_factor: bool = False) -> Tuple[np.ndarray, HIDDEN_STATE_TYPE]:
model.eval()
if hidden_state is None:
hidden_state = model.initialize_sdf_hidden_state()
_, sdf_weights, _ = to_numpy(forward_with_dataset(model, dataset, hidden_state))
sdf = _get_normalized_sdf_from_weights(
sdf_weights=sdf_weights, returns=dataset.individual_data.return_array,
mask=dataset.individual_data.mask
)
if as_factor:
sdf = 1 - sdf
output_hidden_state = model.get_last_hidden_state()
return sdf, output_hidden_state
def evaluate_sharpe_from_sdf(sdf: torch.Tensor, sdf_weights: torch.Tensor, returns: torch.Tensor,
mask: torch.Tensor, normalize: bool) -> float:
sdf, sdf_weights, returns, mask = to_numpy(sdf, sdf_weights, returns, mask)
if normalize:
sdf = _get_normalized_sdf_from_weights(sdf_weights, returns, mask)
sdf = 1 - sdf
return sharpe(sdf)
def evaluate_sharpe_from_residual_returns(returns: torch.Tensor, mask: torch.Tensor,
residual_returns: torch.Tensor) -> float:
returns, mask, residual_returns = to_numpy(returns, mask, residual_returns)
predicted_returns = _get_predicted_returns(residual_returns, returns, mask)
if len(returns.shape) > 3:
returns = returns[0, :]
returns = returns[mask]
portfolio = construct_long_short_portfolio(predicted_returns, returns, mask[:, :, 0])
return sharpe(portfolio)
def train_model(config: Config, epochs: int, model: MODEL_TYPE,
loss: LOSS_TYPE, dataset_train: FinanceDataset,
dataset_valid: FinanceDataset, dataset_test: FinanceDataset,
path_to_save: str, print_freq: int = 10):
# Initialize early stopper
early_stopping = EarlyStopping(minimize=loss.minimize)
# Get optimizer and scheduler if applicable
optimizer = get_optimizer_from_config(config, model.trainable_params())
scheduler = get_scheduler_from_config(config, optimizer)
# If RNN should be used - initialize random hidden state
train_initial_hidden_state = model.initialize_sdf_hidden_state() if config['use_rnn'] else None
sub_epochs = config['sub_epoch']
if loss.minimize:
last_metric_value = 1e10
else:
last_metric_value = -1e10
for epoch in range(epochs):
model.train()
for sub_epoch in range(sub_epochs):
if isinstance(model, GANModel):
sdf, sdf_weights, moments = forward_with_dataset(model, dataset_train, train_initial_hidden_state)
loss_tensor = loss(
returns_tensor=dataset_train.returns_tensor,
masks=dataset_train.masks_tensor,
sdf=sdf,
sdf_weights=sdf_weights,
moments=moments)
elif isinstance(model, ReturnsModel):
residual_returns = forward_with_dataset(model, dataset_train)
loss_tensor = loss(returns_tensor=residual_returns, masks=dataset_train.masks_tensor)
else:
raise NotImplementedError
optimizer.zero_grad()
loss_tensor.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
train_main_loss, train_residual_loss, train_sharpe = evaluate(
model=model,
dataset=dataset_train,
loss=loss,
hidden_state=train_initial_hidden_state)
train_hidden_state = model.get_last_hidden_state()
valid_main_loss, valid_residual_loss, valid_sharpe = evaluate(
model=model,
dataset=dataset_valid,
loss=loss,
hidden_state=train_hidden_state)
valid_hidden_state = model.get_last_hidden_state()
test_main_loss, test_residual_loss, test_sharpe = evaluate(
model=model,
dataset=dataset_test,
loss=loss,
hidden_state=valid_hidden_state)
if epoch % print_freq == 0:
logging.info(f"Epoch: {epoch}")
logging.info(f"Train main loss: {train_main_loss} "
f"and residual loss: {train_residual_loss}"
f" and sharpe {train_sharpe}")
logging.info(f"Valid main loss: {valid_main_loss} "
f"and residual loss: {valid_residual_loss}"
f" and sharpe {valid_sharpe}")
logging.info(f"Test main loss: {test_main_loss} "
f"and residual loss: {test_residual_loss}"
f" and sharpe {test_sharpe}")
if early_stopping(valid_main_loss):
return
is_min_cond = loss.minimize and last_metric_value > valid_main_loss
is_not_min_cond = not loss.minimize and last_metric_value < valid_main_loss
if is_min_cond or is_not_min_cond:
last_metric_value = valid_main_loss
torch.save(model.state_dict(), path_to_save)
def train_gan(config: Config, path_to_dump: str,
dataset_train: FinanceDataset, dataset_valid: FinanceDataset,
dataset_test: FinanceDataset = None, norm_std: float = 0.05) -> GANModel:
# Initialize GAN Model
gan_model = GANModel(config=config)
if os.path.exists(f"{path_to_dump}/model_dump.pth"):
gan_model.load_state_dict(torch.load(f"{path_to_dump}/model_dump.pth", map_location=torch.device('cpu')))
return gan_model
logging.info(f"Initialize weights with zero mean and {norm_std} std")
for x in gan_model.parameters():
torch.nn.init.normal_(x, std=norm_std)
# Prepare inputs for training
train_inputs = {
'config': config,
'dataset_train': dataset_train,
'dataset_valid': dataset_valid,
'dataset_test': dataset_test
}
# Firstly, Initialize and optimize unconditional loss
unconditional_loss = LossCompose(minimize=True,
to_weight=config['weighted_loss'],
main_loss_conditional=False,
residual_loss_factor=config['residual_loss_factor']
)
logging.info('Train unconditional Loss')
gan_model.froze_moment_net()
train_model(epochs=config['num_epochs_unc'],
model=gan_model,
loss=unconditional_loss,
path_to_save=f'{path_to_dump}/unconditional_model.pth',
**train_inputs)
# Secondly, Initialize and optimize moment conditional loss
moment_conditional_loss = LossCompose(minimize=False,
to_weight=config['weighted_loss'],
main_loss_conditional=True
)
logging.info('Train moment loss')
gan_model.froze_sdf_net()
train_model(epochs=config['num_epochs_moment'],
model=gan_model,
loss=moment_conditional_loss,
path_to_save=f'{path_to_dump}/moment_model.pth',
**train_inputs)
# Lastly, initialize and optimize conditional loss
logging.info('Train conditional loss')
conditional_loss = LossCompose(minimize=True,
to_weight=config['weighted_loss'],
main_loss_conditional=True,
residual_loss_factor=config['residual_loss_factor'])
gan_model.froze_moment_net()
train_model(epochs=config['num_epochs'],
model=gan_model,
loss=conditional_loss,
path_to_save=f'{path_to_dump}/condition_model.pth',
**train_inputs)
# Dump trained model
torch.save(gan_model.state_dict(), f"{path_to_dump}/model_dump.pth")
return gan_model
def train_returns_model(config: Config, path_to_dump: str,
dataset_train: FinanceDataset, dataset_valid: FinanceDataset,
dataset_test: FinanceDataset = None, norm_std: float = None) -> ReturnsModel:
# Initialize weighted loss
least_squares_loss = WeightedLSLoss(to_weight=config['weighted_loss'])
# Initialize returns model
returns_model = ReturnsModel(config=config)
if os.path.exists(f"{path_to_dump}/returns_model.pth"):
returns_model.load_state_dict(torch.load(f"{path_to_dump}/returns_model.pth", map_location=torch.device('cpu')))
return returns_model
logging.info(f"Initialize weights with zero mean and {norm_std} std")
for x in returns_model.parameters():
torch.nn.init.normal_(x, std=norm_std)
logging.info('Train returns model')
train_model(config,
epochs=config['num_epochs'],
model=returns_model,
loss=least_squares_loss,
dataset_train=dataset_train,
dataset_valid=dataset_valid,
dataset_test=dataset_test,
path_to_save=f'{path_to_dump}/returns_model.pth')
return returns_model
| [
"os.path.exists",
"numpy.absolute",
"numpy.sum",
"numpy.split",
"logging.info",
"torch.nn.init.normal_",
"torch.device"
] | [((2792, 2821), 'numpy.split', 'np.split', (['sdf_weights', 'splits'], {}), '(sdf_weights, splits)\n', (2800, 2821), True, 'import numpy as np\n'), ((9310, 9358), 'os.path.exists', 'os.path.exists', (['f"""{path_to_dump}/model_dump.pth"""'], {}), "(f'{path_to_dump}/model_dump.pth')\n", (9324, 9358), False, 'import os\n'), ((9504, 9573), 'logging.info', 'logging.info', (['f"""Initialize weights with zero mean and {norm_std} std"""'], {}), "(f'Initialize weights with zero mean and {norm_std} std')\n", (9516, 9573), False, 'import logging\n'), ((10243, 10283), 'logging.info', 'logging.info', (['"""Train unconditional Loss"""'], {}), "('Train unconditional Loss')\n", (10255, 10283), False, 'import logging\n'), ((10861, 10894), 'logging.info', 'logging.info', (['"""Train moment loss"""'], {}), "('Train moment loss')\n", (10873, 10894), False, 'import logging\n'), ((11213, 11251), 'logging.info', 'logging.info', (['"""Train conditional loss"""'], {}), "('Train conditional loss')\n", (11225, 11251), False, 'import logging\n'), ((12335, 12386), 'os.path.exists', 'os.path.exists', (['f"""{path_to_dump}/returns_model.pth"""'], {}), "(f'{path_to_dump}/returns_model.pth')\n", (12349, 12386), False, 'import os\n'), ((12542, 12611), 'logging.info', 'logging.info', (['f"""Initialize weights with zero mean and {norm_std} std"""'], {}), "(f'Initialize weights with zero mean and {norm_std} std')\n", (12554, 12611), False, 'import logging\n'), ((12704, 12739), 'logging.info', 'logging.info', (['"""Train returns model"""'], {}), "('Train returns model')\n", (12716, 12739), False, 'import logging\n'), ((9619, 9657), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['x'], {'std': 'norm_std'}), '(x, std=norm_std)\n', (9640, 9657), False, 'import torch\n'), ((12661, 12699), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['x'], {'std': 'norm_std'}), '(x, std=norm_std)\n', (12682, 12699), False, 'import torch\n'), ((8052, 8083), 'logging.info', 'logging.info', (['f"""Epoch: {epoch}"""'], {}), "(f'Epoch: {epoch}')\n", (8064, 8083), False, 'import logging\n'), ((8096, 8224), 'logging.info', 'logging.info', (['f"""Train main loss: {train_main_loss} and residual loss: {train_residual_loss} and sharpe {train_sharpe}"""'], {}), "(\n f'Train main loss: {train_main_loss} and residual loss: {train_residual_loss} and sharpe {train_sharpe}'\n )\n", (8108, 8224), False, 'import logging\n'), ((8285, 8413), 'logging.info', 'logging.info', (['f"""Valid main loss: {valid_main_loss} and residual loss: {valid_residual_loss} and sharpe {valid_sharpe}"""'], {}), "(\n f'Valid main loss: {valid_main_loss} and residual loss: {valid_residual_loss} and sharpe {valid_sharpe}'\n )\n", (8297, 8413), False, 'import logging\n'), ((8474, 8598), 'logging.info', 'logging.info', (['f"""Test main loss: {test_main_loss} and residual loss: {test_residual_loss} and sharpe {test_sharpe}"""'], {}), "(\n f'Test main loss: {test_main_loss} and residual loss: {test_residual_loss} and sharpe {test_sharpe}'\n )\n", (8486, 8598), False, 'import logging\n'), ((2734, 2754), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (2740, 2754), True, 'import numpy as np\n'), ((9452, 9471), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9464, 9471), False, 'import torch\n'), ((12487, 12506), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (12499, 12506), False, 'import torch\n'), ((2869, 2886), 'numpy.absolute', 'np.absolute', (['item'], {}), '(item)\n', (2880, 2886), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
import matplotlib
del matplotlib.font_manager.weight_dict['roman']
matplotlib.font_manager._rebuild()
def plot_embedding(data, label, title):
plt.figure()
for i in range(data.shape[0]):
if label[i] == 0:
color = '#32CD32'
if label[i] == 1:
color = '#EEEE00'
if label[i] == 2:
color = '#00008B'
if label[i] == 3:
color = '#551A8B'
plt.scatter(data[i, 0], data[i, 1],s=4, c=color)
plt.scatter(data[:, 0], data[:, 1], s=4, c=label)
plt.title(title)
fonts = {'family': 'Times New Roman', 'style': 'italic', 'size': 15}
plt.xlabel("X", fonts)
plt.ylabel("Y", fonts)
plt.savefig('T-SNE.png',dpi=300)
plt.show()
def preprocess_for_tsne(embedding, label):
data = np.round(embedding.astype(float), 7)
label = np.array(label)
print('Computing T-SNE......')
tsne = TSNE(n_components=2, init='pca', random_state=0)
result = tsne.fit_transform(data)
plot_embedding(result, label, 'T-SNE task')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.manifold.TSNE",
"matplotlib.font_manager._rebuild",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((153, 187), 'matplotlib.font_manager._rebuild', 'matplotlib.font_manager._rebuild', ([], {}), '()\n', (185, 187), False, 'import matplotlib\n'), ((233, 245), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (243, 245), True, 'import matplotlib.pyplot as plt\n'), ((566, 615), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[:, 0]', 'data[:, 1]'], {'s': '(4)', 'c': 'label'}), '(data[:, 0], data[:, 1], s=4, c=label)\n', (577, 615), True, 'import matplotlib.pyplot as plt\n'), ((620, 636), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (629, 636), True, 'import matplotlib.pyplot as plt\n'), ((714, 736), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""', 'fonts'], {}), "('X', fonts)\n", (724, 736), True, 'import matplotlib.pyplot as plt\n'), ((741, 763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""', 'fonts'], {}), "('Y', fonts)\n", (751, 763), True, 'import matplotlib.pyplot as plt\n'), ((768, 801), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""T-SNE.png"""'], {'dpi': '(300)'}), "('T-SNE.png', dpi=300)\n", (779, 801), True, 'import matplotlib.pyplot as plt\n'), ((805, 815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (813, 815), True, 'import matplotlib.pyplot as plt\n'), ((921, 936), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (929, 936), True, 'import numpy as np\n'), ((983, 1031), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'init': '"""pca"""', 'random_state': '(0)'}), "(n_components=2, init='pca', random_state=0)\n", (987, 1031), False, 'from sklearn.manifold import TSNE\n'), ((513, 562), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[i, 0]', 'data[i, 1]'], {'s': '(4)', 'c': 'color'}), '(data[i, 0], data[i, 1], s=4, c=color)\n', (524, 562), True, 'import matplotlib.pyplot as plt\n')] |
"""
defines:
- clear_out_solids(bdf_filename, bdf_filename_out=None,
equivalence=True, renumber=True, equivalence_tol=0.01)
- nastran_to_surf(bdf_filename, pid_to_element_flags, surf_filename,
renumber_pids=None, line_map=None,
scale=1.0, tol=1e-10,
xref=True)
"""
from collections import defaultdict
from numpy import array, allclose, unique, zeros
from pyNastran.bdf.bdf import read_bdf
from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes
from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber
from pyNastran.bdf.mesh_utils.remove_unused import remove_unused
def remove_unassociated_nodes(bdf_filename, unused_bdf_filename_out, renumber=False):
"""dummy function"""
assert renumber is False, renumber
remove_unused(bdf_filename, remove_nids=True, remove_cids=False,
remove_pids=False, remove_mids=False)
def clear_out_solids(bdf_filename, bdf_filename_out=None,
equivalence=True, renumber=True, equivalence_tol=0.01):
"""removes solid elements"""
if bdf_filename_out is None:
if renumber or equivalence:
msg = ('bdf_filename_out=%s must be specified if renumber=%s '
'or equivalence=%s are True' % (
bdf_filename_out, renumber, equivalence))
raise RuntimeError(msg)
if isinstance(bdf_filename, str):
print('clearing out solids from %s' % bdf_filename)
model = read_bdf(bdf_filename, xref=False)
else:
model = bdf_filename
#nodes2 = {nid, node for nid, node in model.nodes.items()}
#elements2 = {eid, element for eid, element in model.elements.items()
#if element.type in ['CTRIA3', 'CQUAD4']}
out_dict = model.get_card_ids_by_card_types(card_types=['CTRIA3', 'CQUAD4'])
save_eids = set(out_dict['CTRIA3'] + out_dict['CQUAD4'])
all_eids = set(model.element_ids)
#print('all_eids =', all_eids)
#print('save_eids =', save_eids)
remove_eids = all_eids - save_eids
#print('remove_eids =', remove_eids)
for eid in remove_eids:
#print('eid =', eid)
del model.elements[eid]
# TODO: seems like we could be more efficient...
#nids = unique(hstack([model.elements[eid].node_ids for eid in save_eids]))
# get nodes that are remaining in the model
nids = set()
unused_elements2 = {}
#print(model.elements)
for eid, element in model.elements.items():
#if element.type not in ['CTRIA3', 'CQUAD4']:
#continue
#elements2[eid] = element
nids.update(element.node_ids)
nids = list(nids)
nids.sort()
# filter out old nodes & properties
nodes2 = {nid : node for nid, node in model.nodes.items() if nid in nids}
properties2 = {pid : prop for pid, prop in model.properties.items() if prop.type == 'PSHELL'}
model.nodes = nodes2
#model.elements = elements2
model.properties = properties2
# already equivalenced?
#remove_unassociated_nodes(bdf_filename, bdf_filename_out, renumber=False)
#bdf_filename_out = 'equivalence.bdf'
starting_id_dict = {
'cid' : 1,
'nid' : 1,
'eid' : 1,
'pid' : 1,
'mid' : 1,
}
if equivalence:
if renumber:
bdf_equivalenced_filename = 'equivalence.bdf'
else:
bdf_equivalenced_filename = bdf_filename_out
model.write_bdf('remove_unused_nodes.bdf')
bdf_equivalence_nodes(model, bdf_equivalenced_filename, equivalence_tol,
renumber_nodes=False, neq_max=4, xref=True)
if renumber:
bdf_renumber(bdf_equivalenced_filename, bdf_filename_out, size=8, is_double=False,
starting_id_dict=starting_id_dict)
elif renumber:
model.cross_reference()
bdf_renumber(model, bdf_filename_out, size=8, is_double=False,
starting_id_dict=starting_id_dict)
return model
def nastran_to_surf(bdf_filename, pid_to_element_flags, surf_filename,
renumber_pids=None, line_map=None,
scale=1.0, tol=1e-10,
xref=True):
"""
Converts a BDF to an AFLR3 surf file
Parameters
----------
bdf_filename : str/BDF
str : the input BDF filename
BDF : a BDF model that has been cross-referenced
surf_filename : str
the output SURF filename
pid_to_element_flags : dict[key] = value
key=PSHELL value=[layer0 thickness, BL_thickness, grid_bc]
renumber_pids : bool; default=None
a mapping of pid to surface ID
None = no remapping
line_map : dict[key] = value
same as pid_to_element_flags, but for the specific intersections
where there are BC differences
NOTE: we only check [thickness, BL_thickness] because we're
averaging this data for the nodes
scale : float; default=1.0
scales the mesh by scale for unit conversion
tol : float; default=1e-16
I hate 1e-16 values in my model
xref : bool; default=True
does the model need to be cross-referenced to calculate the
node positions?
# these pids correspond to the BDF
pid_to_element_flags = {
1 : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc], # top_wall
2 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # right_wall
3 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # left_wall
4 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # outlet
5 : [far_field_initial_normal_spacing, far_field_bl_thickness, grid_bc], # bottom_wall
6 : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc], # bay
11 : [far_field_initial_normal_spacing, far_field_bl_thickness, grid_bc], # inlet_btm
12 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # inlet_front
13 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # inlet_left
14 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # inlet_right
15 : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc], # inlet_visc
}
# these pids correspond to the BDF
# the pid_to_element_flag at the intersection between pids
line_map = {
(1, 2) : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc],
(1, 3) : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc],
(1, 4) : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc],
}
# these are done at the last step to make the output "nice"
renumber_pids = {
11 : 7,
12 : 8,
13 : 9,
14 : 10,
15 : 11,
}
scale = 0.0254 # inches to meters;
"""
if renumber_pids is None:
renumber_pids = {}
if line_map is None:
line_map = {}
if isinstance(bdf_filename, str):
model = read_bdf(bdf_filename, xref=xref)
else:
model = bdf_filename
unused_nnodes = len(model.nodes)
nodes = []
quads = []
tris = []
maxnode, nodes, node_flags, node_flags_temp = _get_nodes(model, scale, xref)
node_remaps = {}
#if 0:
#xyz_array = array(nodes, dtype='float64')
#for nid, xyz in enumerate(xyz_array):
#for nidi, xyz2 in enumerate(xyz_array[nid+1:, :]):
#nid2 = nid + nidi + 1
#if not allclose(nid + 1, nid2 + 1):
#msg = 'nid=%s nid2=%s xyz=%s' % (nid+1, nid2+1, xyz)
#raise RuntimeError(msg)
#if allclose(xyz, xyz2):
##print(nid, nid2, nidi)
##if nid + 1 in node_remaps:
#node_remaps[nid2 + 1] = nid + 1
#print('nid=%s nid2=%s xyz=%s xyz2=%s' % (nid+1, nid2+1, xyz, xyz2))
#assert not(allclose(xyz, xyz2)), 'nid=%s nid2=%s xyz=%s' % (nid+1, nid2+1, xyz)
#del xyz_array
pid0 = 1
for pid, prop in sorted(model.properties.items()):
if pid != pid0:
msg = 'properties must go from 1 to N, no gaps; pid=%s expected=%s' % (
pid, pid0)
raise RuntimeError(msg)
#assert pid in pid_to_element_flags, pid
if prop.type in ['PSOLID']:
continue
if prop.type not in ['PSHELL', 'PCOMP', 'PCOMPG']:
raise NotImplementedError(prop)
pid0 += 1
nid_to_eid_map = get_nid_to_eid_map(
model,
node_flags_temp, pid_to_element_flags, node_remaps,
tris, quads)
initial_normal_spacing0 = 0
bl_thickness0 = 0
for nid, node_flagsi in node_flags_temp.items():
nodes_flags_array = array(node_flagsi) # (N, 2)
nflags = nodes_flags_array.shape[0]
if nflags == 0:
#node_flags[nid] = [initial_normal_spacing0, bl_thickness0]
continue
try:
avg_node_flagsi = nodes_flags_array.mean(axis=0)
max_node_flagsi = nodes_flags_array.max(axis=0)
except ValueError:
print('nid=%s node_flagsi=%s' % (nid, node_flagsi))
raise RuntimeError('node %i is duplicated (equivalence your nodes)'
' or you have unused nodes' % nid)
if not allclose(avg_node_flagsi, max_node_flagsi):
eidsi = unique(nid_to_eid_map[nid])
pidsi = unique([model.elements[eid].Pid() for eid in eidsi])
pidsi.sort()
pidsi = tuple(pidsi)
if pidsi in line_map:
element_flag = line_map[pidsi]
unused_name, spacing, thickness, unused_grid_bc = element_flag
avg_node_flagsi = [spacing, thickness]
else:
msg = ('\nERROR BL THICKNESS MISMATCH:\n define a line_map to resolve for nid=%s;'
' map=%s; pids=%s\n' % (
nid, nid_to_eid_map[nid], pidsi))
for pid in pidsi:
msg += ' pid=%s name=%s\n' % (pid, pid_to_element_flags[pid][0])
msg += " nodes_flags_array = \n%s\n" % nodes_flags_array
avg_node_flagsi = nodes_flags_array.max(axis=0)
msg += ' FOUND: avg_node_flag =\n%s\n' % (avg_node_flagsi)
msg += ' ASSUMING: max_node_flags =\n%s' % (max_node_flagsi)
#raise NotImplementedError(msg)
print(msg)
avg_node_flagsi = max_node_flagsi
#avg_node_flagsi = avg_node_flagsi.max()
if len(avg_node_flagsi) != 2:
msg = 'len([normal_spacing, bl_thickness])=2; actual=%s' % len(avg_node_flagsi)
raise RuntimeError(msg)
assert nid > 0, nid
node_flags[nid] = avg_node_flagsi
_write_surf(surf_filename, maxnode,
nodes, tris, quads,
node_flags, initial_normal_spacing0, pid_to_element_flags, bl_thickness0,
renumber_pids, tol)
def _get_nodes(model, scale, xref):
"""helper method for ``nastran_to_surf``"""
# assume nodes go from 1:#
#nid0 = 1
node_flags = {}
node_flags_temp = {}
maxnode = max(model.nodes.keys())
nodes = zeros((maxnode, 3), dtype='float64')
if xref:
for nid, node in sorted(model.nodes.items()):
#if nid != nid0:
#msg = 'nodes must go from 1 to N, no gaps; nid=%s expected=%s' % (nid, nid0)
#raise RuntimeError(msg)
xyz = node.get_position()
nodes[nid-1] = xyz * scale
node_flags[nid] = []
node_flags_temp[nid] = []
#nid0 += 1
else:
for nid, node in sorted(model.nodes.items()):
#if nid != nid0:
#msg = 'nodes must go from 1 to N, no gaps; nid=%s expected=%s' % (nid, nid0)
#raise RuntimeError(msg)
xyz = node.xyz
nodes[nid-1] = xyz * scale
node_flags[nid] = []
node_flags_temp[nid] = []
#nid0 += 1
return maxnode, nodes, node_flags, node_flags_temp
def get_nid_to_eid_map(model,
node_flags_temp, pid_to_element_flags, node_remaps,
tris, quads):
"""helper method for ``nastran_to_surf``"""
nid_to_eid_map = defaultdict(list)
for eid, element in sorted(model.elements.items()):
#if element.type not in ['CQUAD4', 'CTRIA3']:
#continue
nids = element.node_ids
nids2 = []
for nid in nids:
nid_to_eid_map[nid].append(eid)
if nid in node_remaps:
nid = node_remaps[nid]
nids2.append(nid)
pid = element.Pid()
element_flag = pid_to_element_flags[pid]
unused_name, spacing, thickness, unused_grid_bc = element_flag
element_flag_node = [spacing, thickness]
for nid in nids:
try:
node_flags_temp[nid].append(element_flag_node)
except KeyError:
print('max_nid=%s max_temp_nid=%s' % (max(nids), max(node_flags_temp)))
print("nids = %s" % nids)
print("node_flags_temp = %s" % node_flags_temp.keys())
raise
assert nid > 0, element
if element.type == 'CTRIA3':
tris.append([nids2, pid])
elif element.type == 'CQUAD4':
quads.append([nids2, pid])
else:
raise NotImplementedError(element)
#del pid, nids
return nid_to_eid_map
def _write_surf(surf_filename, maxnode,
nodes, tris, quads,
node_flags, initial_normal_spacing0, pid_to_element_flags, bl_thickness0,
renumber_pids, tol):
"""writes the actual surf file"""
ntris = len(tris)
nquads = len(quads)
assert ntris + nquads > 0, 'nelements=%s' % (ntris + nquads)
with open(surf_filename, 'w', encoding='ascii') as surf_file:
#surf_file.write('ntris nquads nnodes\n')
surf_file.write('%i %i %i\n' % (ntris, nquads, maxnode))
# writing nodes
for nid, node in enumerate(nodes):
x, y, z = node
if abs(x) < tol:
x = 0.0
if abs(y) < tol:
y = 0.0
if abs(z) < tol:
z = 0.0
try:
initial_normal_spacing, bl_thickness = node_flags[nid + 1]
except KeyError:
initial_normal_spacing = initial_normal_spacing0
bl_thickness = bl_thickness0
except ValueError:
initial_normal_spacing = initial_normal_spacing0
bl_thickness = bl_thickness0
surf_file.write('%.10e %.10e %.10e %g %g\n' % (
x, y, z, initial_normal_spacing, bl_thickness))
# writing triangles
rf = 0 # recon flag is used for tris and is super cobnfusing; quads are better
for unused_eid, (element, pid) in enumerate(tris):
unused_name, initial_normal_spacing, bl_thickness, grid_bc = pid_to_element_flags[pid]
if pid in renumber_pids:
pid = renumber_pids[pid]
n1, n2, n3 = element
surf_file.write('%i %i %i %i %s %s\n' % (n1, n2, n3, pid, rf, grid_bc))
# writing quads
rf = 0
for unused_eid, (element, pid) in enumerate(quads):
unused_name, initial_normal_spacing, bl_thickness, grid_bc = pid_to_element_flags[pid]
if pid in renumber_pids:
pid = renumber_pids[pid]
n1, n2, n3, n4 = element
surf_file.write('%i %i %i %i %i %s %s\n' % (n1, n2, n3, n4, pid, rf, grid_bc))
| [
"numpy.allclose",
"pyNastran.bdf.bdf.read_bdf",
"numpy.unique",
"pyNastran.bdf.mesh_utils.remove_unused.remove_unused",
"pyNastran.bdf.mesh_utils.bdf_renumber.bdf_renumber",
"numpy.array",
"numpy.zeros",
"collections.defaultdict",
"pyNastran.bdf.mesh_utils.bdf_equivalence.bdf_equivalence_nodes"
] | [((828, 934), 'pyNastran.bdf.mesh_utils.remove_unused.remove_unused', 'remove_unused', (['bdf_filename'], {'remove_nids': '(True)', 'remove_cids': '(False)', 'remove_pids': '(False)', 'remove_mids': '(False)'}), '(bdf_filename, remove_nids=True, remove_cids=False,\n remove_pids=False, remove_mids=False)\n', (841, 934), False, 'from pyNastran.bdf.mesh_utils.remove_unused import remove_unused\n'), ((11396, 11432), 'numpy.zeros', 'zeros', (['(maxnode, 3)'], {'dtype': '"""float64"""'}), "((maxnode, 3), dtype='float64')\n", (11401, 11432), False, 'from numpy import array, allclose, unique, zeros\n'), ((12490, 12507), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12501, 12507), False, 'from collections import defaultdict\n'), ((1530, 1564), 'pyNastran.bdf.bdf.read_bdf', 'read_bdf', (['bdf_filename'], {'xref': '(False)'}), '(bdf_filename, xref=False)\n', (1538, 1564), False, 'from pyNastran.bdf.bdf import read_bdf\n'), ((3530, 3650), 'pyNastran.bdf.mesh_utils.bdf_equivalence.bdf_equivalence_nodes', 'bdf_equivalence_nodes', (['model', 'bdf_equivalenced_filename', 'equivalence_tol'], {'renumber_nodes': '(False)', 'neq_max': '(4)', 'xref': '(True)'}), '(model, bdf_equivalenced_filename, equivalence_tol,\n renumber_nodes=False, neq_max=4, xref=True)\n', (3551, 3650), False, 'from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes\n'), ((7104, 7137), 'pyNastran.bdf.bdf.read_bdf', 'read_bdf', (['bdf_filename'], {'xref': 'xref'}), '(bdf_filename, xref=xref)\n', (7112, 7137), False, 'from pyNastran.bdf.bdf import read_bdf\n'), ((8886, 8904), 'numpy.array', 'array', (['node_flagsi'], {}), '(node_flagsi)\n', (8891, 8904), False, 'from numpy import array, allclose, unique, zeros\n'), ((3710, 3832), 'pyNastran.bdf.mesh_utils.bdf_renumber.bdf_renumber', 'bdf_renumber', (['bdf_equivalenced_filename', 'bdf_filename_out'], {'size': '(8)', 'is_double': '(False)', 'starting_id_dict': 'starting_id_dict'}), '(bdf_equivalenced_filename, bdf_filename_out, size=8, is_double\n =False, starting_id_dict=starting_id_dict)\n', (3722, 3832), False, 'from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber\n'), ((3912, 4013), 'pyNastran.bdf.mesh_utils.bdf_renumber.bdf_renumber', 'bdf_renumber', (['model', 'bdf_filename_out'], {'size': '(8)', 'is_double': '(False)', 'starting_id_dict': 'starting_id_dict'}), '(model, bdf_filename_out, size=8, is_double=False,\n starting_id_dict=starting_id_dict)\n', (3924, 4013), False, 'from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber\n'), ((9463, 9505), 'numpy.allclose', 'allclose', (['avg_node_flagsi', 'max_node_flagsi'], {}), '(avg_node_flagsi, max_node_flagsi)\n', (9471, 9505), False, 'from numpy import array, allclose, unique, zeros\n'), ((9527, 9554), 'numpy.unique', 'unique', (['nid_to_eid_map[nid]'], {}), '(nid_to_eid_map[nid])\n', (9533, 9554), False, 'from numpy import array, allclose, unique, zeros\n')] |
import os
import sys
import argparse
import torch
import numpy as np
from random import shuffle
from collections import OrderedDict
from dataloaders.datasetGen import SplitGen, PermutedGen
from utils.utils import factory
import random
def run(args):
if not os.path.exists('outputs'):
os.mkdir('outputs')
# Prepare dataloaders
# train_dataset, val_dataset = dataloaders.base.__dict__[args.dataset](args.dataroot, args.train_aug)
train_dataset, val_dataset = factory(
'dataloaders', 'base', args.dataset)(args.dataroot, args.train_aug)
if args.n_permutation > 0:
train_dataset_splits, val_dataset_splits, task_output_space = PermutedGen(train_dataset, val_dataset,
args.n_permutation,
remap_class=not args.no_class_remap)
else:
train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,
first_split_sz=args.first_split_size,
other_split_sz=args.other_split_size,
rand_split=args.rand_split,
remap_class=not args.no_class_remap)
# Prepare the Agent (model)
dataset_name = args.dataset + \
'_{}'.format(args.first_split_size) + \
'_{}'.format(args.other_split_size)
agent_config = {'model_lr': args.model_lr, 'momentum': args.momentum, 'model_weight_decay': args.model_weight_decay,
'schedule': args.schedule,
'model_type': args.model_type, 'model_name': args.model_name, 'model_weights': args.model_weights,
'out_dim': {'All': args.force_out_dim} if args.force_out_dim > 0 else task_output_space,
'model_optimizer': args.model_optimizer,
'print_freq': args.print_freq,
'gpu': True if args.gpuid[0] >= 0 else False,
'with_head': args.with_head,
'reset_model_opt': args.reset_model_opt,
'reg_coef': args.reg_coef,
'head_lr': args.head_lr,
'svd_lr': args.svd_lr,
'bn_lr': args.bn_lr,
'svd_thres': args.svd_thres,
'gamma': args.gamma,
'dataset_name': dataset_name
}
# agent = agents.__dict__[args.agent_type].__dict__[args.agent_name](agent_config)
agent = factory('svd_agent', args.agent_type,
args.agent_name)(agent_config)
# Decide split ordering
task_names = sorted(list(task_output_space.keys()), key=int)
print('Task order:', task_names)
acc_table = OrderedDict()
acc_table_train = OrderedDict()
for i in range(len(task_names)):
train_name = task_names[i]
print('======================', train_name,
'=======================')
train_loader = torch.utils.data.DataLoader(train_dataset_splits[train_name],
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers)
val_loader = torch.utils.data.DataLoader(val_dataset_splits[train_name],
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
if args.incremental_class:
agent.add_valid_output_dim(task_output_space[train_name])
# Learn
agent.train_task(train_loader, val_loader)
torch.cuda.empty_cache()
# Evaluate
acc_table[train_name] = OrderedDict()
acc_table_train[train_name] = OrderedDict()
for j in range(i + 1):
val_name = task_names[j]
print('validation split name:', val_name)
val_data = val_dataset_splits[val_name] if not args.eval_on_train_set else train_dataset_splits[
val_name]
val_loader = torch.utils.data.DataLoader(val_data,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
acc_table[val_name][train_name] = agent.validation(val_loader)
print("**************************************************")
print('training split name:', val_name)
train_data = train_dataset_splits[val_name] if not args.eval_on_train_set else train_dataset_splits[
val_name]
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
acc_table_train[val_name][train_name] = agent.validation(
train_loader)
print("**************************************************")
return acc_table, task_names
def get_args(argv):
# This function prepares the variables shared across demo.py
parser = argparse.ArgumentParser()
parser.add_argument('--gpuid', nargs="+", type=int, default=[1],
help="The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only")
parser.add_argument('--model_type', type=str, default='resnet',
help="The type (mlp|lenet|vgg|resnet) of backbone network")
parser.add_argument('--model_name', type=str, default='resnet18',
help="The name of actual model for the backbone")
parser.add_argument('--force_out_dim', type=int, default=0,
help="Set 0 to let the task decide the required output dimension")
parser.add_argument('--agent_type', type=str,
default='svd_based', help="The type (filename) of agent")
parser.add_argument('--agent_name', type=str,
default='svd_based', help="The class name of agent")
parser.add_argument('--model_optimizer', type=str, default='Adam',
help="SGD|Adam|RMSprop|amsgrad|Adadelta|Adagrad|Adamax ...")
parser.add_argument('--dataroot', type=str, default='../data',
help="The root folder of dataset or downloaded data")
parser.add_argument('--dataset', type=str, default='CIFAR100',
help="MNIST(default)|CIFAR10|CIFAR100")
parser.add_argument('--n_permutation', type=int, default=0,
help="Enable permuted tests when >0")
parser.add_argument('--first_split_size', type=int, default=10)
parser.add_argument('--other_split_size', type=int, default=10)
parser.add_argument('--no_class_remap', dest='no_class_remap', default=False, action='store_true',
help="Avoid the dataset with a subset of classes doing the remapping. Ex: [2,5,6 ...] -> [0,1,2 ...]") # class:we need to know specific class,other:no need to know specific class
parser.add_argument('--train_aug', dest='train_aug', default=True, action='store_false',
help="Allow data augmentation during training")
parser.add_argument('--rand_split', dest='rand_split', default=False, action='store_true',
help="Randomize the classes in splits")
parser.add_argument('--workers', type=int, default=0,
help="#Thread for dataloader")
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--model_lr', type=float,
default=0.0005, help="Classifier Learning rate")
parser.add_argument('--head_lr', type=float,
default=0.0005, help="Classifier Learning rate")
parser.add_argument('--svd_lr', type=float, default=0.0005,
help="Classifier Learning rate")
parser.add_argument('--bn_lr', type=float, default=0.0005,
help="Classifier Learning rate")
parser.add_argument('--gamma', type=float, default=0.5,
help="Learning rate decay")
parser.add_argument('--svd_thres', type=float,
default=1.0, help='reserve eigenvector')
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--model_weight_decay',
type=float, default=1e-5) # 1e-4
parser.add_argument('--schedule', nargs="+", type=int, default=[1],
help="epoch ")
parser.add_argument('--print_freq', type=float, default=10,
help="Print the log at every x iteration")
parser.add_argument('--model_weights', type=str, default=None,
help="The path to the file for the model weights (*.pth).")
parser.add_argument('--eval_on_train_set', dest='eval_on_train_set', default=False, action='store_true',
help="Force the evaluation on train set")
parser.add_argument('--offline_training', dest='offline_training', default=False, action='store_true',
help="Non-incremental learning by make all data available in one batch. For measuring the upperbound performance.")
parser.add_argument('--repeat', type=int, default=1,
help="Repeat the experiment N times")
parser.add_argument('--incremental_class', dest='incremental_class', default=False, action='store_true',
help="The number of output node in the single-headed model increases along with new categories.")
parser.add_argument('--with_head', dest='with_head', default=False, action='store_true',
help="whether constraining head")
parser.add_argument('--reset_model_opt', dest='reset_model_opt', default=True, action='store_true',
help="whether reset optimizer for model at the start of training each tasks")
parser.add_argument('--reg_coef', type=float, default=100,
help="The coefficient for ewc reg")
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
args = get_args(sys.argv[1:])
avg_final_acc = np.zeros(args.repeat)
final_bwt = np.zeros(args.repeat)
torch.cuda.set_device(args.gpuid[0])
# Seed
SEED = 0
np.random.seed(SEED)
torch.manual_seed(SEED)
random.seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
for r in range(args.repeat):
# Run the experiment
acc_table, task_names = run(args)
print(acc_table)
# Calculate average performance across tasks
# Customize this part for a different performance metric
avg_acc_history = [0] * len(task_names)
bwt_history = [0] * len(task_names)
for i in range(len(task_names)):
train_name = task_names[i]
cls_acc_sum = 0
backward_transfer = 0
for j in range(i + 1):
val_name = task_names[j]
cls_acc_sum += acc_table[val_name][train_name]
backward_transfer += acc_table[val_name][train_name] - \
acc_table[val_name][val_name]
avg_acc_history[i] = cls_acc_sum / (i + 1)
bwt_history[i] = backward_transfer / i if i > 0 else 0
print('Task', train_name, 'average acc:', avg_acc_history[i])
print('Task', train_name, 'backward transfer:', bwt_history[i])
# Gather the final avg accuracy
avg_final_acc[r] = avg_acc_history[-1]
final_bwt[r] = bwt_history[-1]
# Print the summary so far
print('===Summary of experiment repeats:',
r + 1, '/', args.repeat, '===')
print('The last avg acc of all repeats:', avg_final_acc)
print('The last bwt of all repeats:', final_bwt)
print('acc mean:', avg_final_acc.mean(),
'acc std:', avg_final_acc.std())
print('bwt mean:', final_bwt.mean(), 'bwt std:', final_bwt.std())
| [
"torch.manual_seed",
"collections.OrderedDict",
"os.path.exists",
"argparse.ArgumentParser",
"random.seed",
"utils.utils.factory",
"dataloaders.datasetGen.PermutedGen",
"numpy.zeros",
"torch.cuda.is_available",
"dataloaders.datasetGen.SplitGen",
"numpy.random.seed",
"os.mkdir",
"torch.utils.... | [((3024, 3037), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3035, 3037), False, 'from collections import OrderedDict\n'), ((3060, 3073), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3071, 3073), False, 'from collections import OrderedDict\n'), ((5453, 5478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5476, 5478), False, 'import argparse\n'), ((10526, 10547), 'numpy.zeros', 'np.zeros', (['args.repeat'], {}), '(args.repeat)\n', (10534, 10547), True, 'import numpy as np\n'), ((10564, 10585), 'numpy.zeros', 'np.zeros', (['args.repeat'], {}), '(args.repeat)\n', (10572, 10585), True, 'import numpy as np\n'), ((10590, 10626), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpuid[0]'], {}), '(args.gpuid[0])\n', (10611, 10626), False, 'import torch\n'), ((10655, 10675), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (10669, 10675), True, 'import numpy as np\n'), ((10680, 10703), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (10697, 10703), False, 'import torch\n'), ((10708, 10725), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (10719, 10725), False, 'import random\n'), ((10730, 10758), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (10752, 10758), False, 'import torch\n'), ((10857, 10882), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10880, 10882), False, 'import torch\n'), ((263, 288), 'os.path.exists', 'os.path.exists', (['"""outputs"""'], {}), "('outputs')\n", (277, 288), False, 'import os\n'), ((298, 317), 'os.mkdir', 'os.mkdir', (['"""outputs"""'], {}), "('outputs')\n", (306, 317), False, 'import os\n'), ((484, 528), 'utils.utils.factory', 'factory', (['"""dataloaders"""', '"""base"""', 'args.dataset'], {}), "('dataloaders', 'base', args.dataset)\n", (491, 528), False, 'from utils.utils import factory\n'), ((670, 770), 'dataloaders.datasetGen.PermutedGen', 'PermutedGen', (['train_dataset', 'val_dataset', 'args.n_permutation'], {'remap_class': '(not args.no_class_remap)'}), '(train_dataset, val_dataset, args.n_permutation, remap_class=not\n args.no_class_remap)\n', (681, 770), False, 'from dataloaders.datasetGen import SplitGen, PermutedGen\n'), ((1011, 1196), 'dataloaders.datasetGen.SplitGen', 'SplitGen', (['train_dataset', 'val_dataset'], {'first_split_sz': 'args.first_split_size', 'other_split_sz': 'args.other_split_size', 'rand_split': 'args.rand_split', 'remap_class': '(not args.no_class_remap)'}), '(train_dataset, val_dataset, first_split_sz=args.first_split_size,\n other_split_sz=args.other_split_size, rand_split=args.rand_split,\n remap_class=not args.no_class_remap)\n', (1019, 1196), False, 'from dataloaders.datasetGen import SplitGen, PermutedGen\n'), ((2787, 2841), 'utils.utils.factory', 'factory', (['"""svd_agent"""', 'args.agent_type', 'args.agent_name'], {}), "('svd_agent', args.agent_type, args.agent_name)\n", (2794, 2841), False, 'from utils.utils import factory\n'), ((3262, 3396), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset_splits[train_name]'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers'}), '(train_dataset_splits[train_name], batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers)\n', (3289, 3396), False, 'import torch\n'), ((3515, 3648), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset_splits[train_name]'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(val_dataset_splits[train_name], batch_size=args\n .batch_size, shuffle=False, num_workers=args.workers)\n', (3542, 3648), False, 'import torch\n'), ((3924, 3948), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3946, 3948), False, 'import torch\n'), ((4000, 4013), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4011, 4013), False, 'from collections import OrderedDict\n'), ((4052, 4065), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4063, 4065), False, 'from collections import OrderedDict\n'), ((10892, 10920), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (10914, 10920), False, 'import torch\n'), ((4349, 4460), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(val_data, batch_size=args.batch_size, shuffle=\n False, num_workers=args.workers)\n', (4376, 4460), False, 'import torch\n'), ((4928, 5041), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(train_data, batch_size=args.batch_size, shuffle\n =False, num_workers=args.workers)\n', (4955, 5041), False, 'import torch\n')] |
import unittest
from CoolProp.CoolProp import PropsSI
import CoolProp
import numpy as np
def test_input_types():
for Fluid in ['Water']:
for Tvals in [0.5 * PropsSI(Fluid, 'Tmin') + 0.5 * PropsSI(Fluid, 'Tcrit'),
[PropsSI(Fluid, 'Tmin') + 1e-5, PropsSI(Fluid, 'Tcrit') - 1e-5],
np.linspace(PropsSI(Fluid, 'Tmin') + 1e-5, PropsSI(Fluid, 'Tcrit') - 1e-5, 30)
]:
yield check_type, Fluid, Tvals
def check_type(fluid, Tvals):
PropsSI('P', 'T', Tvals, 'Q', 0, fluid)
class PropsFailures(unittest.TestCase):
def testUnmatchedLengths(self):
self.assertRaises(TypeError, PropsSI, 'P', 'T', [280, 290, 300], 'Q', [0, 1], 'R134a')
def testMatrix(self):
self.assertRaises(TypeError, PropsSI, 'P', 'T', np.array([280, 290, 300, 280, 290, 300]).reshape(2, 3), 'Q', np.array([0, 0.5, 1, 0.0, 0.5, 1]).reshape(2, 3), 'R134a')
if __name__ == '__main__':
import nose
nose.runmodule()
| [
"CoolProp.CoolProp.PropsSI",
"numpy.array",
"nose.runmodule"
] | [((519, 558), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['"""P"""', '"""T"""', 'Tvals', '"""Q"""', '(0)', 'fluid'], {}), "('P', 'T', Tvals, 'Q', 0, fluid)\n", (526, 558), False, 'from CoolProp.CoolProp import PropsSI\n'), ((984, 1000), 'nose.runmodule', 'nose.runmodule', ([], {}), '()\n', (998, 1000), False, 'import nose\n'), ((171, 193), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['Fluid', '"""Tmin"""'], {}), "(Fluid, 'Tmin')\n", (178, 193), False, 'from CoolProp.CoolProp import PropsSI\n'), ((202, 225), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['Fluid', '"""Tcrit"""'], {}), "(Fluid, 'Tcrit')\n", (209, 225), False, 'from CoolProp.CoolProp import PropsSI\n'), ((250, 272), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['Fluid', '"""Tmin"""'], {}), "(Fluid, 'Tmin')\n", (257, 272), False, 'from CoolProp.CoolProp import PropsSI\n'), ((281, 304), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['Fluid', '"""Tcrit"""'], {}), "(Fluid, 'Tcrit')\n", (288, 304), False, 'from CoolProp.CoolProp import PropsSI\n'), ((348, 370), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['Fluid', '"""Tmin"""'], {}), "(Fluid, 'Tmin')\n", (355, 370), False, 'from CoolProp.CoolProp import PropsSI\n'), ((379, 402), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['Fluid', '"""Tcrit"""'], {}), "(Fluid, 'Tcrit')\n", (386, 402), False, 'from CoolProp.CoolProp import PropsSI\n'), ((815, 855), 'numpy.array', 'np.array', (['[280, 290, 300, 280, 290, 300]'], {}), '([280, 290, 300, 280, 290, 300])\n', (823, 855), True, 'import numpy as np\n'), ((876, 910), 'numpy.array', 'np.array', (['[0, 0.5, 1, 0.0, 0.5, 1]'], {}), '([0, 0.5, 1, 0.0, 0.5, 1])\n', (884, 910), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Script to create pedestal file for low level calibration.
To set start sample in waveform --start_r0_waveform i (default i = 11)
not to use deltaT correction add --deltaT False
- Input: fits.fz file
- Output: drs4_pedestal.fits file
Usage:
$> python lstchain_data_create_pedestal_file.py
--input-file LST-1.1.Run00097.0000.fits.fz
--output_file drs4_pedestalRun2028.0000.fits
--max_events 9000
"""
from traitlets.config import Config
import argparse
import numpy as np
from astropy.io import fits
from ctapipe.io import EventSource
from distutils.util import strtobool
from lstchain.calib.camera.drs4 import DragonPedestal
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument("--input-file", '-f', type=str, action='store',
dest='input_file',
help="Path to fitz.fz file to create pedestal file.",
default=None, required=True)
parser.add_argument("--output-file", '-o', type=str, action='store',
dest='output_file',
help="Path where script create pedestal file",
default=None, required=True)
# Optional arguments
parser.add_argument("--max-events",
help="Maximum numbers of events to read. Default = 20000",
type=int,
default=20000)
parser.add_argument("--start-r0-waveform",
help="Start sample for waveform. Default = 11",
type=int,
default=11)
parser.add_argument('--deltaT', '-s',
type=lambda x: bool(strtobool(x)),
help='Boolean. True for use deltaT correction'
'Default=True, use False otherwise',
default=True)
args = parser.parse_args()
source_config = {
"LSTEventSource": {
"max_events":args.max_events
}
}
def main():
print("--> Input file: {}".format(args.input_file))
print("--> Number of events: {}".format(args.max_events))
reader = EventSource(input_url=args.input_file, config=Config(source_config))
print("--> Number of files", reader.multi_file.num_inputs())
for i, event in enumerate(reader):
for tel_id in event.trigger.tels_with_trigger:
if i==0:
n_modules = event.lst.tel[tel_id].svc.num_modules
pedestal = DragonPedestal(tel_id=tel_id, n_module=n_modules, r0_sample_start=args.start_r0_waveform)
if args.deltaT:
reader.r0_r1_calibrator.update_first_capacitors(event)
reader.r0_r1_calibrator.time_lapse_corr(event, tel_id)
pedestal.fill_pedestal_event(event)
if i%500 == 0:
print("i = {}, ev id = {}".format(i, event.index.event_id))
# Finalize pedestal and write to fits file
pedestal.finalize_pedestal()
expected_pixel_id = fits.PrimaryHDU(event.lst.tel[tel_id].svc.pixel_ids)
pedestal_array = fits.ImageHDU(np.int16(pedestal.meanped),
name="pedestal array")
failing_pixels_column = fits.Column(name='failing pixels',
array=pedestal.failing_pixels_array,
format='K')
failing_pixels = fits.BinTableHDU.from_columns([failing_pixels_column],
name="failing pixels")
hdulist = fits.HDUList([expected_pixel_id, pedestal_array, failing_pixels])
hdulist.writeto(args.output_file)
if __name__ == '__main__':
main()
| [
"distutils.util.strtobool",
"lstchain.calib.camera.drs4.DragonPedestal",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.HDUList",
"argparse.ArgumentParser",
"numpy.int16",
"astropy.io.fits.Column",
"traitlets.config.Config",
"astropy.io.fits.BinTableHDU.from_columns"
] | [((671, 696), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (694, 696), False, 'import argparse\n'), ((2928, 2980), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['event.lst.tel[tel_id].svc.pixel_ids'], {}), '(event.lst.tel[tel_id].svc.pixel_ids)\n', (2943, 2980), False, 'from astropy.io import fits\n'), ((3130, 3217), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""failing pixels"""', 'array': 'pedestal.failing_pixels_array', 'format': '"""K"""'}), "(name='failing pixels', array=pedestal.failing_pixels_array,\n format='K')\n", (3141, 3217), False, 'from astropy.io import fits\n'), ((3315, 3392), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['[failing_pixels_column]'], {'name': '"""failing pixels"""'}), "([failing_pixels_column], name='failing pixels')\n", (3344, 3392), False, 'from astropy.io import fits\n'), ((3459, 3524), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[expected_pixel_id, pedestal_array, failing_pixels]'], {}), '([expected_pixel_id, pedestal_array, failing_pixels])\n', (3471, 3524), False, 'from astropy.io import fits\n'), ((3016, 3042), 'numpy.int16', 'np.int16', (['pedestal.meanped'], {}), '(pedestal.meanped)\n', (3024, 3042), True, 'import numpy as np\n'), ((2111, 2132), 'traitlets.config.Config', 'Config', (['source_config'], {}), '(source_config)\n', (2117, 2132), False, 'from traitlets.config import Config\n'), ((1631, 1643), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (1640, 1643), False, 'from distutils.util import strtobool\n'), ((2409, 2503), 'lstchain.calib.camera.drs4.DragonPedestal', 'DragonPedestal', ([], {'tel_id': 'tel_id', 'n_module': 'n_modules', 'r0_sample_start': 'args.start_r0_waveform'}), '(tel_id=tel_id, n_module=n_modules, r0_sample_start=args.\n start_r0_waveform)\n', (2423, 2503), False, 'from lstchain.calib.camera.drs4 import DragonPedestal\n')] |
# syn.py
# <NAME> (with <NAME>)
# August 3, 2017
# python3
"""
Routines to generate a synthetic test election dataset for multi.py.
Calls data generation routines in syn1.py for elections "of type 1",
and calls routines in syn2.py for elections "of type 2".
"""
import argparse
import copy
import numpy as np
import os
import multi
import audit_orders
import election_spec
import ids
import outcomes
import reported
import syn1
import syn2
import utils
import write_csv
class Syn_Params(object):
""" An object we can hang synthesis generation parameters off of. """
pass
##############################################################################
## random choices
def geospace(start, stop, num=7):
"""
Return a list of up to num distinct integer values,
from start, start+1, ..., stop, inclusive, geometrically spread out.
A bit like numpy.linspace, but geometrically spread
out rather than linearly spread out, and only integers returned.
>>> geospace(0,1)
[0, 1]
>>> geospace(0,10)
[0, 1, 2, 3, 5, 7, 10]
>>> geospace(20, 10000)
[20, 56, 159, 447, 1260, 3550, 10000]
>>> geospace(1, 64)
[1, 2, 4, 8, 16, 32, 64]
This should presumably be replaced by numpy.logspace !
(although duplicates need to be removed...)
"""
answer = {start, stop}
start = max(start, 1)
for i in range(1, num-1):
answer.add(int(np.rint(start*(stop/start)**(i/(num-1)))))
return sorted(answer)
def geospace_choice(e, syn, start, stop, num=7):
"""
Return a random element from geospace(start, stop, num),
based on syn.RandomState.
"""
elts = geospace(start, stop, num)
return syn.RandomState.choice(elts)
def generate_segments(e, syn, low, high):
"""
Return list of random segments (r, s) where low <= r < s <= high.
Number of segments returned is (high-low).
Since r<s, does not return segments of the form (k, k).
Intent is that cids are integers in range low <= cid <= high,
and each segment yields a contest group covering cids r..s (inclusive).
The segments "nest" -- given any two segments, either they
are disjoint, or they are equal, or one contains the other.
"""
assert low <= high
L = []
if low!=high:
L.append((low, high))
mid = syn.RandomState.choice(range(low, high))
L.extend(generate_segments(e, syn, low, mid))
L.extend(generate_segments(e, syn, mid+1, high))
return L
##############################################################################
# Command-line arguments
def parse_args():
parser = argparse.ArgumentParser(description=\
("syn.py: "
"Generates synthetic elections for "
"multi.py, a Bayesian post-election "
"audit program for an election with "
"multiple contests and multiple paper "
"ballot collections."))
# Mandatory argument: dirname
parser.add_argument("election_dirname",
help=('The name of a subdirectory within the elections '
'root directory, where the output of this program '
'will be placed. '
'A parameter value of "" gets the default '
'of TestElection followed by datetime. '
'A file with name foo.csv within subdirectory syn2_specs '
'gives the synthetic election specification for '
'syn_type 2, where foo is the election_dirname. '))
# All others are optional
parser.add_argument("--syn_type",
help="Type of synthetic election. (1 or 2)",
default='1')
args = parser.parse_args()
return args
def process_args(e, args):
e.election_dirname = ids.filename_safe(args.election_dirname)
e.election_name = e.election_dirname
if args.syn_type == '1':
syn1.generate_syn_type_1(e, args)
elif args.syn_type == '2':
syn2.generate_syn_type_2(e, args)
else:
print("Illegal syn_type:", args.syn_type)
if __name__=="__main__":
e = multi.Election()
args = parse_args()
process_args(e, args)
filepath = os.path.join(multi.ELECTIONS_ROOT, e.election_dirname)
print(" Done. Synthetic election written to:", filepath)
| [
"syn1.generate_syn_type_1",
"argparse.ArgumentParser",
"os.path.join",
"ids.filename_safe",
"numpy.rint",
"multi.Election",
"syn2.generate_syn_type_2"
] | [((2641, 2858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""syn.py: Generates synthetic elections for multi.py, a Bayesian post-election audit program for an election with multiple contests and multiple paper ballot collections."""'}), "(description=\n 'syn.py: Generates synthetic elections for multi.py, a Bayesian post-election audit program for an election with multiple contests and multiple paper ballot collections.'\n )\n", (2664, 2858), False, 'import argparse\n'), ((4067, 4107), 'ids.filename_safe', 'ids.filename_safe', (['args.election_dirname'], {}), '(args.election_dirname)\n', (4084, 4107), False, 'import ids\n'), ((4414, 4430), 'multi.Election', 'multi.Election', ([], {}), '()\n', (4428, 4430), False, 'import multi\n'), ((4498, 4552), 'os.path.join', 'os.path.join', (['multi.ELECTIONS_ROOT', 'e.election_dirname'], {}), '(multi.ELECTIONS_ROOT, e.election_dirname)\n', (4510, 4552), False, 'import os\n'), ((4211, 4244), 'syn1.generate_syn_type_1', 'syn1.generate_syn_type_1', (['e', 'args'], {}), '(e, args)\n', (4235, 4244), False, 'import syn1\n'), ((4284, 4317), 'syn2.generate_syn_type_2', 'syn2.generate_syn_type_2', (['e', 'args'], {}), '(e, args)\n', (4308, 4317), False, 'import syn2\n'), ((1420, 1470), 'numpy.rint', 'np.rint', (['(start * (stop / start) ** (i / (num - 1)))'], {}), '(start * (stop / start) ** (i / (num - 1)))\n', (1427, 1470), True, 'import numpy as np\n')] |
import numpy as np
from quadprog import solve_qp as _solve_qp
def solve_qp(P, q, G, h, A=None, b=None, sym_proj=False):
"""Solve a Quadratic Program defined as:
.. math::
\\begin{eqnarray}
\\mathrm{minimize} & & (1/2) x^T P x + q^T x \\\\
\\mathrm{subject\\ to} & & G x \\leq h \\\\
& & A x = b
\\end{eqnarray}
using the `quadprog <https://pypi.python.org/pypi/quadprog/>`_ QP
solver, which implements the Goldfarb-Idnani dual algorithm
[Goldfarb83]_.
Parameters
----------
P : array, shape=(n, n)
Symmetric quadratic-cost matrix.
q : array, shape=(n,)
Quadratic-cost vector.
G : array, shape=(m, n)
Linear inequality matrix.
h : array, shape=(m,)
Linear inequality vector.
A : array, shape=(meq, n), optional
Linear equality matrix.
b : array, shape=(meq,), optional
Linear equality vector.
sym_proj : bool, optional
Set to `True` when the `P` matrix provided is not symmetric.
Returns
-------
x : array, shape=(n,)
Optimal solution to the QP, if found.
Raises
------
ValueError
If the QP is not feasible.
Note
----
The quadprog solver assumes `P` is symmetric. If that is not the case, set
`sym_proj=True` to project it on its symmetric part beforehand.
"""
if sym_proj:
qp_G = .5 * (P + P.T)
else:
qp_G = P
qp_a = -q
if A is not None:
qp_C = - np.vstack([A, G]).T
qp_b = - np.hstack([b, h])
meq = A.shape[0]
else: # no equality constraint
qp_C = - G.T
qp_b = - h
meq = 0
return _solve_qp(qp_G, qp_a, qp_C, qp_b, meq)[0]
| [
"quadprog.solve_qp",
"numpy.vstack",
"numpy.hstack"
] | [((1690, 1728), 'quadprog.solve_qp', '_solve_qp', (['qp_G', 'qp_a', 'qp_C', 'qp_b', 'meq'], {}), '(qp_G, qp_a, qp_C, qp_b, meq)\n', (1699, 1728), True, 'from quadprog import solve_qp as _solve_qp\n'), ((1544, 1561), 'numpy.hstack', 'np.hstack', (['[b, h]'], {}), '([b, h])\n', (1553, 1561), True, 'import numpy as np\n'), ((1507, 1524), 'numpy.vstack', 'np.vstack', (['[A, G]'], {}), '([A, G])\n', (1516, 1524), True, 'import numpy as np\n')] |
import numpy as np
from scipy.io.idl import readsav
from scipy.interpolate import interp1d
from harps_hacks import read_harps
import h5py
import math
from astropy.io import fits
import shutil
import glob
import os
def dimensions(instrument):
if instrument == 'HARPS':
M = 4096 # pixels per order
R = 72 # orders
else:
print("instrument not recognized. valid options are: HARPS")
return
return M, R
def read_data_from_fits(filelist, e2ds=False):
# input : a list of CCF filenames
N = len(filelist) # number of epochs
M, R = dimensions('HARPS')
data = [np.zeros((N,M)) for r in range(R)]
ivars = [np.zeros((N,M)) for r in range(R)]
xs = [np.zeros((N,M)) for r in range(R)]
empty = np.array([], dtype=int)
pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)
for n,f in enumerate(filelist):
sp = fits.open(f)
if not e2ds:
try:
pipeline_rvs[n] = sp[0].header['HIERARCH ESO DRS CCF RVC'] * 1.e3 # m/s
pipeline_sigmas[n] = sp[0].header['HIERARCH ESO DRS CCF NOISE'] * 1.e3 # m/s
drifts[n] = sp[0].header['HIERARCH ESO DRS DRIFT SPE RV']
except KeyError:
print("WARNING: {0} does not appear to be a stellar CCF file. Skipping this one.".format(f))
empty = np.append(empty, n)
continue
dates[n] = sp[0].header['HIERARCH ESO DRS BJD']
bervs[n] = sp[0].header['HIERARCH ESO DRS BERV'] * 1.e3 # m/s
airms[n] = sp[0].header['HIERARCH ESO TEL AIRM START']
spec_file = str.replace(f, 'ccf_G2', 'e2ds')
spec_file = str.replace(spec_file, 'ccf_M2', 'e2ds')
spec_file = str.replace(spec_file, 'ccf_K5', 'e2ds')
try:
wave, spec = read_harps.read_spec_2d(spec_file)
except:
empty = np.append(empty, n)
continue
snrs = read_harps.read_snr(f) # HACK
# save stuff
for r in range(R):
data[r][n,:] = spec[r,:]
ivars[r][n,:] = snrs[r]**2/spec[r,:]/np.nanmean(spec[r,:]) # scaling hack
xs[r][n,:] = wave[r,:]
# delete data without wavelength solutions:
for r in range(R):
data[r] = np.delete(data[r], empty, axis=0)
ivars[r] = np.delete(ivars[r], empty, axis=0)
xs[r] = np.delete(xs[r], empty, axis=0)
pipeline_rvs = np.delete(pipeline_rvs, empty)
pipeline_sigmas = np.delete(pipeline_sigmas, empty)
dates = np.delete(dates, empty)
bervs = np.delete(bervs, empty)
airms = np.delete(airms, empty)
drifts = np.delete(drifts, empty)
# re-introduce BERVs to HARPS results:
pipeline_rvs -= bervs
pipeline_rvs -= np.mean(pipeline_rvs)
return data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts
def savfile_to_filelist(savfile, destination_dir='../data/'):
# copies CCF + E2DS files to destination_dir and returns a list of the CCFs
s = readsav(savfile)
filelist = []
files = [f.decode('utf8') for f in s.files]
for f in files:
shutil.copy2(f, destination_dir)
spec_file = str.replace(f, 'ccf_G2', 'e2ds')
shutil.copy2(spec_file, destination_dir)
basename = f[str.rfind(f,'/')+1:]
filelist = np.append(filelist, destination_dir+basename)
return filelist
def missing_wavelength_files(filelist):
missing_files = []
for f in filelist:
path = f[0:str.rfind(f,'/')+1]
sp = fits.open(f)
header = sp[0].header
wave_file = header['HIERARCH ESO DRS CAL TH FILE']
if os.path.isfile(path+wave_file):
continue
else:
missing_files = np.append(missing_files, wave_file)
return np.unique(missing_files)
def write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, filenames, hdffile):
h = h5py.File(hdffile, 'w')
dset = h.create_dataset('data', data=data)
dset = h.create_dataset('ivars', data=ivars)
dset = h.create_dataset('xs', data=xs)
dset = h.create_dataset('pipeline_rvs', data=pipeline_rvs)
dset = h.create_dataset('pipeline_sigmas', data=pipeline_sigmas)
dset = h.create_dataset('dates', data=dates)
dset = h.create_dataset('bervs', data=bervs)
dset = h.create_dataset('airms', data=airms)
dset = h.create_dataset('drifts', data=drifts)
filenames = [a.encode('utf8') for a in filenames] # h5py workaround
dset = h.create_dataset('filelist', data=filenames)
h.close()
if __name__ == "__main__":
if False: #51 Peg
ccf_filelist = glob.glob('/Users/mbedell/python/wobble/data/51peg/HARPS*ccf_G2_A.fits')
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '../data/51peg_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
if False: #Barnard's Star
ccf_filelist = glob.glob('/Users/mbedell/python/wobble/data/barnards/HARPS*ccf_M2_A.fits')
if False: # check for missing wavelength files
missing_files = missing_wavelength_files(ccf_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files for Barnard\'s Star'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '../data/barnards_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
if False: # HD189733
ccf_filelist = glob.glob('/Users/mbedell/python/wobble/data/HD189733/HARPS*ccf_*_A.fits')
if False: # check for missing wavelength files
missing_files = missing_wavelength_files(ccf_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '../data/HD189733_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
if False: # telluric standard
e2ds_filelist = glob.glob('/Users/mbedell/python/wobble/data/telluric/HARPS*e2ds_A.fits')
if True: # check for missing wavelength files
missing_files = missing_wavelength_files(e2ds_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(e2ds_filelist, e2ds=True)
hdffile = '../data/telluric_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, e2ds_filelist, hdffile)
if True: # beta hyi
ccf_filelist = glob.glob('/mnt/ceph/users/mbedell/wobble/betahyi/HARPS*ccf_*_A.fits')
if True: # check for missing wavelength files
missing_files = missing_wavelength_files(ccf_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '/mnt/ceph/users/mbedell/wobble/betahyi_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
| [
"numpy.mean",
"numpy.unique",
"harps_hacks.read_harps.read_spec_2d",
"shutil.copy2",
"numpy.delete",
"harps_hacks.read_harps.read_snr",
"h5py.File",
"numpy.append",
"numpy.array",
"numpy.zeros",
"os.path.isfile",
"numpy.nanmean",
"numpy.savetxt",
"astropy.io.fits.open",
"scipy.io.idl.rea... | [((758, 781), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (766, 781), True, 'import numpy as np\n'), ((2536, 2566), 'numpy.delete', 'np.delete', (['pipeline_rvs', 'empty'], {}), '(pipeline_rvs, empty)\n', (2545, 2566), True, 'import numpy as np\n'), ((2589, 2622), 'numpy.delete', 'np.delete', (['pipeline_sigmas', 'empty'], {}), '(pipeline_sigmas, empty)\n', (2598, 2622), True, 'import numpy as np\n'), ((2635, 2658), 'numpy.delete', 'np.delete', (['dates', 'empty'], {}), '(dates, empty)\n', (2644, 2658), True, 'import numpy as np\n'), ((2671, 2694), 'numpy.delete', 'np.delete', (['bervs', 'empty'], {}), '(bervs, empty)\n', (2680, 2694), True, 'import numpy as np\n'), ((2707, 2730), 'numpy.delete', 'np.delete', (['airms', 'empty'], {}), '(airms, empty)\n', (2716, 2730), True, 'import numpy as np\n'), ((2744, 2768), 'numpy.delete', 'np.delete', (['drifts', 'empty'], {}), '(drifts, empty)\n', (2753, 2768), True, 'import numpy as np\n'), ((2865, 2886), 'numpy.mean', 'np.mean', (['pipeline_rvs'], {}), '(pipeline_rvs)\n', (2872, 2886), True, 'import numpy as np\n'), ((3138, 3154), 'scipy.io.idl.readsav', 'readsav', (['savfile'], {}), '(savfile)\n', (3145, 3154), False, 'from scipy.io.idl import readsav\n'), ((3922, 3946), 'numpy.unique', 'np.unique', (['missing_files'], {}), '(missing_files)\n', (3931, 3946), True, 'import numpy as np\n'), ((4078, 4101), 'h5py.File', 'h5py.File', (['hdffile', '"""w"""'], {}), "(hdffile, 'w')\n", (4087, 4101), False, 'import h5py\n'), ((618, 634), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (626, 634), True, 'import numpy as np\n'), ((666, 682), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (674, 682), True, 'import numpy as np\n'), ((711, 727), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (719, 727), True, 'import numpy as np\n'), ((847, 858), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (855, 858), True, 'import numpy as np\n'), ((860, 871), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (868, 871), True, 'import numpy as np\n'), ((873, 884), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (881, 884), True, 'import numpy as np\n'), ((886, 897), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (894, 897), True, 'import numpy as np\n'), ((899, 910), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (907, 910), True, 'import numpy as np\n'), ((912, 923), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (920, 923), True, 'import numpy as np\n'), ((973, 985), 'astropy.io.fits.open', 'fits.open', (['f'], {}), '(f)\n', (982, 985), False, 'from astropy.io import fits\n'), ((2037, 2059), 'harps_hacks.read_harps.read_snr', 'read_harps.read_snr', (['f'], {}), '(f)\n', (2056, 2059), False, 'from harps_hacks import read_harps\n'), ((2376, 2409), 'numpy.delete', 'np.delete', (['data[r]', 'empty'], {'axis': '(0)'}), '(data[r], empty, axis=0)\n', (2385, 2409), True, 'import numpy as np\n'), ((2429, 2463), 'numpy.delete', 'np.delete', (['ivars[r]', 'empty'], {'axis': '(0)'}), '(ivars[r], empty, axis=0)\n', (2438, 2463), True, 'import numpy as np\n'), ((2480, 2511), 'numpy.delete', 'np.delete', (['xs[r]', 'empty'], {'axis': '(0)'}), '(xs[r], empty, axis=0)\n', (2489, 2511), True, 'import numpy as np\n'), ((3249, 3281), 'shutil.copy2', 'shutil.copy2', (['f', 'destination_dir'], {}), '(f, destination_dir)\n', (3261, 3281), False, 'import shutil\n'), ((3343, 3383), 'shutil.copy2', 'shutil.copy2', (['spec_file', 'destination_dir'], {}), '(spec_file, destination_dir)\n', (3355, 3383), False, 'import shutil\n'), ((3445, 3492), 'numpy.append', 'np.append', (['filelist', '(destination_dir + basename)'], {}), '(filelist, destination_dir + basename)\n', (3454, 3492), True, 'import numpy as np\n'), ((3654, 3666), 'astropy.io.fits.open', 'fits.open', (['f'], {}), '(f)\n', (3663, 3666), False, 'from astropy.io import fits\n'), ((3767, 3799), 'os.path.isfile', 'os.path.isfile', (['(path + wave_file)'], {}), '(path + wave_file)\n', (3781, 3799), False, 'import os\n'), ((4800, 4872), 'glob.glob', 'glob.glob', (['"""/Users/mbedell/python/wobble/data/51peg/HARPS*ccf_G2_A.fits"""'], {}), "('/Users/mbedell/python/wobble/data/51peg/HARPS*ccf_G2_A.fits')\n", (4809, 4872), False, 'import glob\n'), ((5218, 5293), 'glob.glob', 'glob.glob', (['"""/Users/mbedell/python/wobble/data/barnards/HARPS*ccf_M2_A.fits"""'], {}), "('/Users/mbedell/python/wobble/data/barnards/HARPS*ccf_M2_A.fits')\n", (5227, 5293), False, 'import glob\n'), ((5952, 6026), 'glob.glob', 'glob.glob', (['"""/Users/mbedell/python/wobble/data/HD189733/HARPS*ccf_*_A.fits"""'], {}), "('/Users/mbedell/python/wobble/data/HD189733/HARPS*ccf_*_A.fits')\n", (5961, 6026), False, 'import glob\n'), ((6648, 6721), 'glob.glob', 'glob.glob', (['"""/Users/mbedell/python/wobble/data/telluric/HARPS*e2ds_A.fits"""'], {}), "('/Users/mbedell/python/wobble/data/telluric/HARPS*e2ds_A.fits')\n", (6657, 6721), False, 'import glob\n'), ((7337, 7407), 'glob.glob', 'glob.glob', (['"""/mnt/ceph/users/mbedell/wobble/betahyi/HARPS*ccf_*_A.fits"""'], {}), "('/mnt/ceph/users/mbedell/wobble/betahyi/HARPS*ccf_*_A.fits')\n", (7346, 7407), False, 'import glob\n'), ((1910, 1944), 'harps_hacks.read_harps.read_spec_2d', 'read_harps.read_spec_2d', (['spec_file'], {}), '(spec_file)\n', (1933, 1944), False, 'from harps_hacks import read_harps\n'), ((3862, 3897), 'numpy.append', 'np.append', (['missing_files', 'wave_file'], {}), '(missing_files, wave_file)\n', (3871, 3897), True, 'import numpy as np\n'), ((5437, 5493), 'numpy.savetxt', 'np.savetxt', (['"""missing_files.txt"""', 'missing_files'], {'fmt': '"""%s"""'}), "('missing_files.txt', missing_files, fmt='%s')\n", (5447, 5493), True, 'import numpy as np\n'), ((6161, 6217), 'numpy.savetxt', 'np.savetxt', (['"""missing_files.txt"""', 'missing_files'], {'fmt': '"""%s"""'}), "('missing_files.txt', missing_files, fmt='%s')\n", (6171, 6217), True, 'import numpy as np\n'), ((6856, 6912), 'numpy.savetxt', 'np.savetxt', (['"""missing_files.txt"""', 'missing_files'], {'fmt': '"""%s"""'}), "('missing_files.txt', missing_files, fmt='%s')\n", (6866, 6912), True, 'import numpy as np\n'), ((7541, 7597), 'numpy.savetxt', 'np.savetxt', (['"""missing_files.txt"""', 'missing_files'], {'fmt': '"""%s"""'}), "('missing_files.txt', missing_files, fmt='%s')\n", (7551, 7597), True, 'import numpy as np\n'), ((1981, 2000), 'numpy.append', 'np.append', (['empty', 'n'], {}), '(empty, n)\n', (1990, 2000), True, 'import numpy as np\n'), ((2201, 2223), 'numpy.nanmean', 'np.nanmean', (['spec[r, :]'], {}), '(spec[r, :])\n', (2211, 2223), True, 'import numpy as np\n'), ((1443, 1462), 'numpy.append', 'np.append', (['empty', 'n'], {}), '(empty, n)\n', (1452, 1462), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from scipy.stats import halfnorm, invgamma
from skopt.learning.gaussian_process.kernels import RBF, ConstantKernel
from bask.bayesgpr import BayesGPR
@pytest.fixture(params=[False, True])
def minimal_gp(request):
kernel = ConstantKernel(
constant_value=1 ** 2, constant_value_bounds=(0.01 ** 2, 1 ** 2)
) * RBF(length_scale=1.0, length_scale_bounds=(0.5, 1.5))
gp = BayesGPR(
random_state=1, normalize_y=False, kernel=kernel, warp_inputs=request.param
)
return gp
@pytest.fixture
def minimal_priors():
return [
lambda x: halfnorm(scale=1.0).logpdf(np.sqrt(np.exp(x)))
+ x / 2.0
- np.log(2.0),
lambda x: invgamma(a=5.0, scale=1.0).logpdf(np.exp(x)) + x,
lambda x: halfnorm(scale=1.0).logpdf(np.sqrt(np.exp(x)))
+ x / 2.0
- np.log(2.0),
]
def test_noise_vector(minimal_gp, minimal_priors):
X = np.array([[0.0], [0.0]])
y = np.array([1.0, 0.0])
noise_vector = np.array([1234, 0.0])
minimal_gp.fit(
X,
y,
noise_vector=noise_vector,
n_burnin=1,
progress=False,
priors=minimal_priors,
)
prediction = minimal_gp.predict(np.array([[0.0]]))
assert (
prediction < 0.01
) # The high noise is supposed to diminish the effect of the datapoint
def test_noise_set_to_zero(minimal_gp, minimal_priors):
X = np.array([[0.1], [0.0], [-0.1]])
y = np.array([0.0, 0.0, 0.0])
minimal_gp.fit(X, y, n_burnin=1, progress=False, priors=minimal_priors)
minimal_gp.theta = np.array([0.0, 0.0, 0.0])
assert minimal_gp.predict(np.array([[0.0]]), return_std=True)[1] >= 1.0
with minimal_gp.noise_set_to_zero():
assert minimal_gp.predict(np.array([[0.0]]), return_std=True)[1] < 1.0
assert minimal_gp.predict(np.array([[0.0]]), return_std=True)[1] >= 1.0
def test_sample_without_fit(minimal_gp):
# Calling sample without data (X, y) or a previous fit, should raise a ValueError:
with pytest.raises(ValueError):
minimal_gp.sample()
| [
"skopt.learning.gaussian_process.kernels.RBF",
"skopt.learning.gaussian_process.kernels.ConstantKernel",
"numpy.log",
"numpy.exp",
"bask.bayesgpr.BayesGPR",
"numpy.array",
"scipy.stats.invgamma",
"pytest.raises",
"scipy.stats.halfnorm",
"pytest.fixture"
] | [((187, 223), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[False, True]'}), '(params=[False, True])\n', (201, 223), False, 'import pytest\n'), ((422, 512), 'bask.bayesgpr.BayesGPR', 'BayesGPR', ([], {'random_state': '(1)', 'normalize_y': '(False)', 'kernel': 'kernel', 'warp_inputs': 'request.param'}), '(random_state=1, normalize_y=False, kernel=kernel, warp_inputs=\n request.param)\n', (430, 512), False, 'from bask.bayesgpr import BayesGPR\n'), ((936, 960), 'numpy.array', 'np.array', (['[[0.0], [0.0]]'], {}), '([[0.0], [0.0]])\n', (944, 960), True, 'import numpy as np\n'), ((969, 989), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (977, 989), True, 'import numpy as np\n'), ((1009, 1030), 'numpy.array', 'np.array', (['[1234, 0.0]'], {}), '([1234, 0.0])\n', (1017, 1030), True, 'import numpy as np\n'), ((1425, 1457), 'numpy.array', 'np.array', (['[[0.1], [0.0], [-0.1]]'], {}), '([[0.1], [0.0], [-0.1]])\n', (1433, 1457), True, 'import numpy as np\n'), ((1466, 1491), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1474, 1491), True, 'import numpy as np\n'), ((1591, 1616), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1599, 1616), True, 'import numpy as np\n'), ((262, 347), 'skopt.learning.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', ([], {'constant_value': '(1 ** 2)', 'constant_value_bounds': '(0.01 ** 2, 1 ** 2)'}), '(constant_value=1 ** 2, constant_value_bounds=(0.01 ** 2, 1 ** 2)\n )\n', (276, 347), False, 'from skopt.learning.gaussian_process.kernels import RBF, ConstantKernel\n'), ((359, 412), 'skopt.learning.gaussian_process.kernels.RBF', 'RBF', ([], {'length_scale': '(1.0)', 'length_scale_bounds': '(0.5, 1.5)'}), '(length_scale=1.0, length_scale_bounds=(0.5, 1.5))\n', (362, 412), False, 'from skopt.learning.gaussian_process.kernels import RBF, ConstantKernel\n'), ((1225, 1242), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (1233, 1242), True, 'import numpy as np\n'), ((2028, 2053), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2041, 2053), False, 'import pytest\n'), ((682, 693), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (688, 693), True, 'import numpy as np\n'), ((856, 867), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (862, 867), True, 'import numpy as np\n'), ((1647, 1664), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (1655, 1664), True, 'import numpy as np\n'), ((1843, 1860), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (1851, 1860), True, 'import numpy as np\n'), ((747, 756), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (753, 756), True, 'import numpy as np\n'), ((1768, 1785), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (1776, 1785), True, 'import numpy as np\n'), ((713, 739), 'scipy.stats.invgamma', 'invgamma', ([], {'a': '(5.0)', 'scale': '(1.0)'}), '(a=5.0, scale=1.0)\n', (721, 739), False, 'from scipy.stats import halfnorm, invgamma\n'), ((607, 626), 'scipy.stats.halfnorm', 'halfnorm', ([], {'scale': '(1.0)'}), '(scale=1.0)\n', (615, 626), False, 'from scipy.stats import halfnorm, invgamma\n'), ((642, 651), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (648, 651), True, 'import numpy as np\n'), ((781, 800), 'scipy.stats.halfnorm', 'halfnorm', ([], {'scale': '(1.0)'}), '(scale=1.0)\n', (789, 800), False, 'from scipy.stats import halfnorm, invgamma\n'), ((816, 825), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (822, 825), True, 'import numpy as np\n')] |
"""
Linear Prediction (LP) functions for extrapolating and modeling NMR signals.
"""
from __future__ import print_function
__developer_info__ = """
Notes
^^^^^
This module contains functions for performing linear prediction on NMR data.
The algorithms used were selected for simplicity to show how linear prediction
works not for computational speed nor stability. Locations where significant
improvements can be made to improve speed or stability are indicated with
SPEED and STABILITY within the source code with discussion following.
The notation for the Linear Prediction equation, coefficients, roots, etc.
closely match those in "NMR Data Processing" by <NAME>. This book was
references for many of the algorithms in this module.
Reduced order LP-SVD and LP-TLS methods are not implemented but should
be easy to add if desired. See :py:func:`find_lproots_hsvd` for an example.
"""
import numpy as np
import scipy.linalg
#############################################
# linear prediction extrapolation functions #
#############################################
def lp(data, pred=1, slice=slice(None), order=8, mode="f", append="after",
bad_roots="auto", fix_mode="on", mirror=None, method="svd"):
"""
Linear prediction extrapolation of 1D or 2D data.
Parameters
----------
data : ndarray
1D or 2D NMR data with the last (-1) axis in the time domain.
pred : int
Number of points to predict along the last axis.
slice : slice object, optional
Slice object which selects the region along the last axis to use in LP
equation. The default (slice(None)) will use all points.
order : int
Prediction order, number of LP coefficients calculated.
mode : {'f', 'b', 'fb' or 'bf'}
Mode to generate LP filter. 'f' for forward,'b' for backward, fb for
'forward-backward and 'bf' for backward-forward.
extend : {'before', 'after'}
Location to extend the data, either 'before' the current data, or
'after' the existing data. This is independent of the `mode` parameter.
bad_roots : {'incr', 'decr', None, 'auto'}
Type of roots which to consider bad and to stabilize. Option are those
with increasing signals 'incr' or decreasing signals 'decr'. None will
perform no root stabiliting. The default ('auto') will set the
parameter based on the `mode` parameter. 'f' or 'fb' `mode` will
results in a 'incr' `bad_roots` parameter, 'b' or 'bf` in 'decr'
fix_mode : {'on', 'reflect'}
Method used to stabilize bad roots, 'on' to move the roots onto the
unit circle, 'reflect' to reflect bad roots across the unit circle.
This parameter is ignored when `bad_roots` is None.
mirror : {None, '0', '180'}
Mode to form mirror image of data before processing. None will
process the data trace as provided (no mirror image). '0' or '180'
forms a mirror image of the sliced trace to calculate the LP filter.
'0' should be used with data with no delay, '180' with data
with an initial half-point delay.
method : {'svd', 'qr', 'choleskey', 'tls'}
Method to use to calculate the LP filter. Choices are a SVD ('svd'), QR
('qr'), or Choleskey ('choleskey') decomposition, or Total Least
Squares ('tls').
Returns
-------
ndata : ndarray
NMR data with `pred` number of points linear predicted and appended to
the original data.
Notes
-----
When given 2D data a series of 1D linear predictions are made to
each row in the array, extending each by pred points. To perform a 2D
linear prediction using a 2D prediction matrix use :py:func:`lp2d`.
In forward-backward or backward-forward mode root stabilizing is done
on both sets of signal roots as calculated in the first mode direction.
After averaging the coefficient the roots are again stabilized.
When the append parameter does not match the LP mode, for example
if a backward linear prediction (mode='b') is used to predict points
after the trace (append='after'), any root fixing is done before reversing
the filter.
"""
if data.ndim == 1:
return lp_1d(data, pred, slice, order, mode, append, bad_roots,
fix_mode, mirror, method)
elif data.ndim == 2:
# create empty array to hold output
s = list(data.shape)
s[-1] = s[-1] + pred
new = np.empty(s, dtype=data.dtype)
# vector-wise 1D LP
for i, trace in enumerate(data):
new[i] = lp_1d(trace, pred, slice, order, mode, append, bad_roots,
fix_mode, mirror, method)
return new
else:
raise ValueError("data must be a one or two dimensional array")
# method specific extrapolation
def lp_svd(data, pred=1, slice=slice(None), order=8, mode="f", append="after",
bad_roots="auto", fix_mode="on", mirror=None):
"""
Linear Prediction extrapolation of 1D or 2D data using SVD decomposition.
See :py:func:`lp` for documentation.
"""
return lp(data, pred, slice, order, mode, append, bad_roots, fix_mode,
mirror, method="svd")
def lp_qr(data, pred=1, slice=slice(None), order=8, mode="f", append="after",
bad_roots="auto", fix_mode="on", mirror=None):
"""
Linear Prediction extrapolation of 1D or 2D data using QR decomposition.
See :py:func:`lp` for documentation
"""
return lp(data, pred, slice, order, mode, append, bad_roots, fix_mode,
mirror, method="qr")
def lp_cho(data, pred=1, slice=slice(None), order=8, mode="f", append="after",
bad_roots="auto", fix_mode="on", mirror=None):
"""
Linear Prediction extrapolation of 1D or 2D data using Cholesky
decomposition.
See :py:func:`lp` for documentation
"""
return lp(data, pred, slice, order, mode, append, bad_roots, fix_mode,
mirror, method="cholesky")
def lp_tls(data, pred=1, slice=slice(None), order=8, mode="f", append="after",
bad_roots="auto", fix_mode="on", mirror=None):
"""
Linear Prediction extrapolation of 1D or 2D data using Total Least Squares.
See :py:func:`lp` for documentation.
"""
return lp(data, pred, slice, order, mode, append, bad_roots, fix_mode,
mirror, method="tls")
# underlying 1D extrapolation
def lp_1d(trace, pred=1, slice=slice(None), order=8, mode="f", append="after",
bad_roots="auto", fix_mode="on", mirror=None, method="svd"):
"""
Linear Prediction extrapolation of 1D data.
Parameters
----------
trace : ndarray
1D NMR data in the time domain.
pred : int
Number of points to predict along the last axis.
slice : slice object, optional
Slice object which selects the region along the last axis to use in LP
equation. The default (slice(None)) will use all points.
order : int
Prediction order, number of LP coefficients calculated.
mode : {'f', 'b', 'fb' or 'bf'}
Mode to generate LP filter. 'f' for forward,'b' for backward, fb for
'forward-backward and 'bf' for backward-forward.
extend : {'before', 'after'}
Location to extend the data, either 'before' the current data, or
'after' the existing data. This is independent of the `mode` parameter.
bad_roots : {'incr', 'decr', None, 'auto'}
Type of roots which to consider bad and to stabilize. Option are those
with increasing signals 'incr' or decreasing signals 'decr'. None will
perform no root stabiliting. The default ('auto') will set the
parameter based on the `mode` parameter. 'f' or 'fb' `mode` will
results in a 'incr' `bad_roots` parameter, 'b' or 'bf` in 'decr'
fix_mode : {'on', 'reflect'}
Method used to stabilize bad roots, 'on' to move the roots onto the
unit circle, 'reflect' to reflect bad roots across the unit circle.
This parameter is ignored when `bad_roots` is None.
mirror : {None, '0', '180'}
Mode to form mirror image of data before processing. None will
process the data trace as provided (no mirror image). '0' or '180'
forms a mirror image of the sliced trace to calculate the LP filter.
'0' should be used with data with no delay, '180' with data
with an initial half-point delay.
method : {'svd', 'qr', 'choleskey', 'tls'}
Method to use to calculate the LP filter. Choices are a SVD ('svd'), QR
('qr'), or Choleskey ('choleskey') decomposition, or Total Least
Squares ('tls').
Returns
-------
ntrace : ndarray
NMR data with `pred` number of points linear predicted and appended to
the original data.
Notes
-----
In forward-backward or backward-forward mode root stabilizing is done
on both sets of signal roots as calculated in the first mode direction.
After averaging the coefficient the roots are again stabilized.
When the append parameter does not match the LP mode, for example
if a backward linear prediction (mode='b') is used to predict points
after the trace (append='after'), any root fixing is done before reversing
the filter.
See Also
--------
lp : 1D or 2D linear prediction extrapolation.
"""
# check for bad arguments
if mode not in ["f", "b", "fb", "bf"]:
raise ValueError("mode must be 'f', 'b', 'fb', or 'bf'")
if append not in ["before", "after"]:
raise ValueError("append must be 'before' or 'after'")
if bad_roots not in [None, "incr", "decr", "auto"]:
raise ValueError("bad_roots must be None, 'auto', 'incr' or 'decr'")
if fix_mode not in ["on", "reflect"]:
raise ValueError("fix_mode must be 'on' or 'reflect'")
if mirror not in [None, "0", "180"]:
raise ValueError("mirror must be None, '0' or '180'")
if method not in ['svd', 'qr', 'cholesky', 'tls']:
raise ValueError("Invalid method")
if trace.ndim != 1:
raise ValueError("trace must be 1D")
# bad_roots auto mode
if bad_roots == "auto":
if mode == "f" or mode == "fb":
bad_roots = "incr"
else:
bad_roots = "decr"
x = trace[slice] # extract region to use for finding LP coefficients
if mirror is not None: # make mirror image if selected
x = make_mirror(x, mirror)
if mode == "fb":
a = find_lpc_fb(x, order, bad_roots, fix_mode, method)
mode = "f"
elif mode == "bf":
a = find_lpc_bf(x, order, bad_roots, fix_mode, method)
mode = "b"
else:
# form the LP equation matrix and vector
D, d = make_Dd(x, order, mode)
a = find_lpc(D, d, method) # determind the LP prediction filter
# stablize roots if needed
if bad_roots is not None: # stablize roots if needed
poles = find_roots(a, mode) # find roots (poles)
poles = fix_roots(poles, bad_roots, fix_mode) # fix roots
# reverse filter when calculated filter is in wrong direction
if (mode == "b" and append == "after") or (mode == "f" and
append == "before"):
poles = [1. / pole for pole in poles]
mode = {'f': 'b', 'b': 'f'}[mode]
a = find_coeff(poles, mode) # find LP filter from roots
else:
# reverse filter when calculated filter is in wrong direction
if (mode == "b" and append == "after") or (mode == "f" and
append == "before"):
a = reverse_filter(a, mode)
# extrapolate the trace using the prediction filter
ntrace = extrapolate(trace, a, pred, append)
return ntrace
##################
# LP2D functions #
##################
def lp2d(data, pred, P, M, mirror='0', fix_points=True, method='svd'):
"""
Perform a forward 2D linear prediction extrapolation on data.
Use the 2D linear prediction algorithm presented in:
<NAME> and <NAME>, Journal of Magnetic Resonance, 1992, 98, 192-199.
to extend the last (1) axis by `pred` points. A PxM prediction matrix, C,
is formed by solving the modified linear prediction equation given by:
data[n,m] = /sigma_{l=0}^{P-1} /sigma_{k=1}^M C_{l,k}*data[n-l,m-k]
For all valid points in data. This prediction matrix together with the
data matrix with a mirror image appended is used to extend the last (1)
axis by pred points resulting in a new array of size [N_0, N_1+pred] where
N_0 and N_1 are the sizes of the original data. To linear predict
both dimensions this function should be used twice with a transpose
between the uses.
Backward linear prediction using this method is not possible as the
method depends on being able to mirror the data before the first collected
point. In backwards mode this would correspond to being able to correctly
determind points after the last point which cannot be determinded using the
mirror method. A backward prediction matrix can be calculated but would
not prove useful.
The forward-backward averaging of the linear prediction coefficients is
not possible as there is no characteristic polynomial to root and reflect.
Therefore the backward prediction matrix cannot be reversed.
Parameters
----------
data : ndarray
2D NMR data (time domain for last axes).
pred : int
Number of points to predict along the last (1) axes.
P : int
Prediction matrix length along the non-predicted (0) axis.
M : int
Prediction matrix length along the predicted (1) axis.
mirror : {'0' or '180'}
Method to use for forming the mirror image of the non-predicted axis.
'0' indicated no initial delay, '180' for a half-point delay.
fix_points : bool
True to reduce predicted points with magnitude larger than the largest
data point. False leaved predicted points unaltered.
method : {'svd', 'qr', 'cholesky', 'tls'}
Method used to calculate the LP prediction matrix. See :py:func:`lp`
for a description of theses methods.
Returns
-------
ndata : ndarray
2D NMR data with `pred` points appended to the last (1) axes.
Notes
-----
The axes in this function are reversed as compared to the JMR paper.
"""
# check parameters
if data.ndim != 2:
raise ValueError("data must be a 2D array")
if mirror not in ['0', '180']:
raise ValueError("mirror must be '0' or '180'")
if method not in ['svd', 'qr', 'cholesky', 'tls']:
raise ValueError("method must be 'svd', 'qr', 'cholesky' or 'tls'")
# form lp2d equation matrix and vector
D, d = make_lp2d_Dd(data, P, M, 'f')
# Solve lp2d equation to find the prediction matrix
c = find_lpc(D, d, method)
C = c.reshape(P, M)
# extrapolate the 2D data using the prediction matrix
return extrapolate_2d(data, C, pred, fix_points, mirror)
def extrapolate_2d(x, C, pred, fix_points, mirror):
"""
Extrapolate points along the 1st axis using the lp2d algorithm.
"""
# find the prediction matrix shape and flatten it
P, M = C.shape
c = C.flatten()
# find data parameters
x_max = x.max()
N_0, N_1 = x.shape
# create a empty matrix
if mirror == "0":
new = np.empty((2 * N_0 - 1, N_1 + pred), dtype=x.dtype)
plane = N_0 - 1 # index of first non-mirrored point
else:
new = np.empty((2 * N_0, N_1 + pred), dtype=x.dtype)
plane = N_0 # index of first non-mirrored plane
last = new.shape[0] # number of rows in new matrix
# fill the matrix with the mirrored version of each column
for i in range(N_1):
new[:, i] = make_mirror(x[:, i], mirror)
# fill each new column with predicted values
# i,j give coordinates of top-left corner of PxM reading matrix
# after filling a column, replace the whole column with the mirrored column
for j in range(N_1 - M, N_1 - M + pred): # column index loop
for i in range(plane - P + 1, last - P + 1): # row index loop
new[i + P - 1, j + M] = np.sum(new[i:i + P, j:j + M].flat * c)
if fix_points: # reduce predicted point is needed
if new[i + P - 1, j + M] > x_max:
new[i + P - 1, j + M] = ((x_max * x_max) /
(new[i + P - 1, j + M]))
# fill the column with the mirrored column so it can be read in the
# next interation of the loop
new[:, j + M] = make_mirror(new[plane:, j + M], mirror)
return new[plane:]
def make_lp2d_Dd(x, P, M, mode='f'):
"""
Form the lp2d equation matrix and vector.
"""
# Form the D and d' matrix and vector in the 2DLP equation:
# D*c = d'
# where c is a flattened prediction matrix ordered:
# (P-1,M) (P-1,M-1) ... (P-1,1) (P-2,M-1) ... (P-2,1) (P-3,M-1) ... ...
# (2,1) (1,M-1) ... (1,1) (0,M-1) ... (0,1)
if mode == 'b':
# backward mode not implemented
# this would have the d' value as the
# top left corner
# this can be done with the same code
# after reversing x,
# x = x[::-1,::-1]
raise NotImplemented
# Build D and d' row by row by flattening a PxM region of the x matrix
# starting at 0, 0 and moving down to the bottom of the matrix, then moving
# back to the top and over one row, and continue till all data has been
# read. d' is filled with the element next to the right corner of this
# PxM region.
N_0, N_1 = x.shape # length of the matrix
count_P = N_0 - P + 1 # number of valid starting position vertically
# number of valid starting position horizontally
# taking into account the element next to the
# bottom right corner is the predicted value.
count_M = N_1 - M
# create an empty D matrix
D = np.empty((count_P * count_M, P * M), dtype=x.dtype)
d = np.empty((count_P * count_M, 1), dtype=x.dtype)
# fill D and d' row by row (i,j) give coordinates of top-left corner of
# PxM reading matrix
for j in range(count_M):
for i in range(count_P):
D[j * count_P + i] = x[i:i + P, j:j + M].flat
d[j * count_P + i] = x[i + P - 1, j + M]
return D, d
#############################################
# Cadzow/Minumum variance signal enhacement #
#############################################
def cadzow(data, M, K, niter, min_var=False):
"""
Perform a (row wise) Cadzow-like signal enhancement on 1D or 2D data.
Performs a Cadzow-like signal enhancement with optional adjustment
of singular values using the minimum variance method as desribed in:
Chen, VanHuffel, Decanniere, VanHecke, JMR, 1994, 109A, 46-55.
For 2D data performs independant enhancement on each row of data array.
Parameters
----------
data : ndarray
1D or 2D NMR data to enhance.
M : int
Large prediction order. For best results should be between
K + 5 and 2 * K.
K : int
Reduced prediction order.
niter : int
Number if iteration of the Cadzow procedure to perform.
min_var : bool
True to adjust retained singular values using the minimum variance
method. False does not correct the singular values and is the
Cadzow method.
Returns
-------
ndata : ndarray
Array of enhanced data
"""
if data.ndim == 1:
for i in range(niter):
data = cadzow_single(data, M, K, min_var)
return data
elif data.ndim == 2:
for trace in data:
for i in range(niter):
trace = cadzow_single(trace, M, K, min_var)
return data
else:
raise ValueError("data must be a 1D or 2D array")
def cadzow_single(x, M, K, min_var=False):
"""
Perform a single iteration of Cadzow signal enhancement on a 1D vector
See :py:func:`cadzow` for documentation.
"""
# variable names based upon Chen et al, JMR 1994 109A 46
# form the Hankel data matrix X
N = len(x)
L = N - M + 1
X = scipy.linalg.hankel(x[:L], x[L - 1:])
# Compute the SVD of X
U, s, Vh = scipy.linalg.svd(X)
# correct the singular values and truncate the rank K
Ul = np.mat(U[:, :K])
Vlh = np.mat(Vh[:K, :]) # first K columns of V are first K rows of Vh
sl = s[:K]
if min_var: # adjust singular values using minimum variance method
# estimate variance of noise singular values
s2 = (1. / (M - K)) * np.power(s[K:], 2).sum()
sl = np.array([l - s2 / l for l in sl])
Sl = np.mat(np.diag(sl))
# compute enhanced data vector for rank-reduced data matrix
Xp = Ul * Sl * Vlh
xp = np.empty_like(x)
for i, v in enumerate(range(M - 1, -L, -1)):
# the anti-diagonal is the diagonal with rows reversed
xp[i] = np.diag(Xp[:, ::-1], v).mean()
return xp
###################################################
# Linear Prediction parametric modeling functions #
###################################################
def lp_model(trace, slice=slice(None), order=8, mode="f", mirror=None,
method="svd", full=False):
"""
Use Linear Prediction to model 1D NMR time domain data.
Parameters
----------
trace : 1D ndarray
One dimensional time domain NMR data to model.
slice : slice object, optional
Slice object which selects the region along the last axis to use in LP
equation. The default, slice(None), will use all points.
order : int
Prediction order, number of LP coefficients calculated.
mode : {'f', 'b'}
Mode to generate LP filter. 'f' for forward,'b' for backward.
mirror : {None, '0', '180'}
Mode to form mirror image of data before processing. None will
process the data trace as provided (no mirror image). '0' or '180'
forms a mirror image of the sliced trace to calculate the LP filter.
'0' should be used with data with no delay, '180' with data
with an initial half-point delay.
method : {'svd', 'qr', 'choleskey', 'tls'}
Method to use to calculate the LP filter. Choices are a SVD ('svd'), QR
('qr'), or Choleskey ('choleskey') decomposition, or Hankel SVD
('hsvd').
full : bool
True to return amplitudes and phases calculated by performing a least
squares fitting to the data after LP modeling. False will return only
the damping (relaxation) factors and signal frequencies.
Returns
-------
damp : list
List of damping (relaxation) factors found from LP modeling.
freq : list
List of signal frequencies found from LP modeling.
amp : list, optional
List of signal amplitudes found by least squares fitting of data after
LP modeling, only returned when `full` parameter is True.
phase : list, optional.
List of signal phases found by least squares fitting of data after LP
modeling, only returned when `full` parameter is True.
Notes
-----
When backward LP is used the signal roots are reflected before calculating
model parameters.
"""
# check for bad arguments
if mode not in ["f", "b"]:
raise ValueError("mode must be 'f' or 'b'")
if method not in ['svd', 'qr', 'cholesky', 'tls', 'hsvd']:
raise ValueError("Invalid method")
if trace.ndim != 1:
raise ValueError("trace must be a 1D array")
x = trace[slice] # extract region to use for finding LP coefficients
if mirror is not None: # make mirror image if requested
x = make_mirror(x, mirror)
# calculate LP coefficient and factor to find poles
if method in ['svd', 'qr', 'cholseky', 'tls']:
D, d = make_Dd(x, order, mode) # form the LP equation elements
a = find_lpc(D, d, method) # find LP coefficients
poles = find_roots(a, mode) # find roots
elif method == "hsvd":
poles = find_lproots_hsvd(x, M=order, K=order, mode=mode, zmethod='sm')
else:
raise ValueError("Invalid method")
# reverse poles if we have backward poles
if mode == "b":
poles = [1. / pole for pole in poles]
# determind the damping factor and frequencies from the roots
damp = [root2damp(pole) for pole in poles]
freq = [root2freq(pole) for pole in poles]
if full is False:
return damp, freq
# perform Least Squares fitting to determind amplitudes and phases.
# We need to find a least squares solutions to:
# z_0*b_0^0+z_1*b_1^0+.... = x_0
# z_0*b_0^1+z_1*b_1^1+.... = x_1
# ...
# Where b is the LP roots (poles), x is the signal, and a_0 are unknown
#
# To solve this recast into Bz = x and solve for a using scipy.lstsq
# SPEED
# B is a Vandermonde matrix, this characteristic can be used to more
# efficiently solve this least squares problem.
# build the B matrix (a Vandermonde matrix) and solve for the coefficients
poles = np.array(poles)
B = np.row_stack([poles ** (i) for i in range(len(x))])
z, resid, rank, s = np.linalg.lstsq(B, np.array(x))
# Now the z_n = amp_n*exp(phase_n*i), use this to determind the amplitudes
# and phases
amp = [cof2amp(cof) for cof in z]
phase = [cof2phase(cof) for cof in z]
return damp, freq, amp, phase
###############################################################
# functions to determine signal parameters from LP parameters #
###############################################################
def root2damp(pole):
"""
Calculate the damping factor from a LP root
"""
# damping factor is the radius in the complex plane -1/pi*ln(|pole|)
return -1. / (np.pi) * np.log(np.abs(pole))
def root2freq(pole):
"""
Calculate the frequency from a LP root
"""
# frequency is the angle from the x-axis to the point in the complex plane
# arg(pole) / (2*pi) = atan2(imag,real) / (2*pi)
return np.arctan2(pole.imag, pole.real) / (2. * np.pi)
def cof2amp(z):
"""
Calculate a signal amplitude from a model coefficient
"""
# z = amp*exp(phase*i) so amp is abs(z)
return np.abs(z)
def cof2phase(z):
"""
Calculate a signal phase from a model coefficient
"""
# z = amp*exp(phase(i) so phase is the arg(z)
return np.arctan2(z.imag, z.real)
##############################
# data preperation functions #
##############################
def make_D(x, order, mode):
"""
Make the LP equation D matrix (Da = d')
"""
L = len(x) - order
if mode == "f":
return scipy.linalg.hankel(x[:L], x[L - 1:-1])
elif mode == "b":
return scipy.linalg.hankel(x[1:L + 1], x[L:])
else:
raise ValueError("mode must be 'f' or 'b'")
def make_little_d(x, order, mode):
"""
Make the LP equation d' vector (Da = d')
"""
if mode == "f":
return x[order:].reshape(len(x) - order, 1)
elif mode == "b":
L = len(x) - order
return x[:L].reshape(L, 1)
else:
raise ValueError("mode must be 'f' or 'b'")
def make_Dd(x, order, mode):
"""
make the LP equation D matrix and d' vector (Da=d')
"""
return make_D(x, order, mode), make_little_d(x, order, mode)
def make_mirror(x, mode):
"""
Make a mirror image trace.
Reflects trace over zero as described in:
<NAME> and <NAME>, Journal of Magnetic Resonance, 1990, 90, 405
When mode is "0" (no initial delay) form the an array with length 2N-1:
x_n-1 ... x_1 x_0 x_1 ... x_n-1
When mode is "180" (half point delay) form an array with length 2N:
x_n-1 .. x_1 x_0 x_0 x_1 ... x_n-1
Parameters
----------
x : ndarray
1D array to form mirrored trace from.
mode : {'180', '0'}
Mirror mode, see above.
"""
if mode == "0":
return np.concatenate((x[:0:-1], x))
elif mode == "180":
return np.concatenate((x[::-1], x))
###########################################################
# LP prediction filter calculation functions (find_lpc_*) #
###########################################################
# the coefficients returned from these functions depend on the mode of the
# prediction. Forward LP returns coefficients ordered m, m-1, ...1
# Backward LP returns 1, 2, ..., m where m is the order of the prediction.
def find_lpc(D, d, method):
"""
Find linear prediction filter using a provided method.
"""
if method == "svd":
return find_lpc_svd(D, d)
elif method == "qr":
return find_lpc_qr(D, d)
elif method == "cholesky":
return find_lpc_cholesky(D, d)
elif method == "tls":
return find_lpc_tls(D, d)
else:
raise ValueError("invalid method")
def find_lpc_svd(D, d):
"""
Find linear prediction filter using single value decomposition.
"""
L = D.shape[0]
m = D.shape[1]
U, s, Vh = scipy.linalg.svd(D) # SVD decomposition
U, Vh = np.mat(U), np.mat(Vh) # make U and Vh matrices
Si = pinv_diagsvd(s, m, L) # construct the pseudo-inverse sigma matrix
return np.array(Vh.H * Si * U.H * d)
# the next 3 lines and the pinv_diagsvd function were adapted from the
# scipy.linalg.pinv2 function - jjh
eps = np.finfo('float').eps
feps = np.finfo('single').eps
_array_precision = {'f': 0, 'd': 1, 'F': 0, 'D': 1}
def pinv_diagsvd(s, m, L):
"""
Construct the pseudo-inverse of the sigma matrix from singular values
"""
t = s.dtype.char
cond = {0: feps * 1e3, 1: eps * 1e6}[_array_precision[t]]
cutoff = s[0] * cond
Si = np.zeros((m, L), t)
for i in range(len(s)):
if s[i] > cutoff:
Si[i, i] = 1.0 / np.conj(s[i])
return Si
def find_lpc_qr(D, d):
"""
Find linear prediction filter using QR decomposition.
"""
L = D.shape[0]
m = D.shape[1]
q, r = scipy.linalg.qr(D)
q, r = np.mat(q), np.mat(r)
# SPEED
# the next line is slow and the use of pinv2 should be avoided as
# pseudo inversion of r involves a computationally expensive SVD
# decomposition which is not needed. Rather r*x = q.H*d should be
# solved for x using LAPACK's ZTRTRS function (or similar function with
# different prefix). This is not currently available in scipy/numpy and
# therefore is not used here.
return scipy.linalg.pinv2(r) * q.H * d
def find_lpc_cholesky(D, d):
"""
Find linear prediction filter using a Cholesky decomposition.
"""
# form the normal equation (D.H*D)*a = D.H*d
# SPEED
# this can be improved by using the Hankel nature of D
D = np.mat(D)
DhD = np.mat(np.dot(D.H, D))
Dhd = np.mat(np.dot(D.H, d))
c, lower = scipy.linalg.cho_factor(DhD) # Compute Cholesky decomp.
return scipy.linalg.cho_solve((c, lower), Dhd) # solve normal equation
def find_lpc_tls(D, d):
"""
Find linear prediction filter using the Total Least Squares method
"""
m = D.shape[1] # the order of the prediction
E = np.append(D, d, axis=1) # form the augmented data matrix
U, s, Vh = scipy.linalg.svd(E) # SVD decompositon of augmented matrix
V = np.conj(Vh.T) # Hermetian transpose
return (-1. / V[m, m] * V[:m, m]).reshape((m, 1))
def find_lpc_fb(x, order, bad_roots, fix_mode, method):
"""
Determind LP coefficient using forward-backward linear prediction.
Averages LP coefficients generated from solving the forward and backward
linear prediction equations after reversing the roots of characteristic
polynomial of the backward solution. Method is described in:
<NAME> and <NAME>, Journal of Magnetic Resonance, 1992, 100, 202-207.
Description of parameters can be found in :py:func:`lp`.
"""
# find forward LP coefficients
D, d = make_Dd(x, order, 'f')
a = find_lpc(D, d, method)
# stabilize roots if needed
if bad_roots is not None:
poles = find_roots(a, 'f')
poles = fix_roots(poles, bad_roots, fix_mode)
a = find_coeff(poles, 'f')
# store the forward coefficients
forward_a = a.copy()
# find the backwards LP coefficients
D, d = make_Dd(x, order, 'b')
a = find_lpc(D, d, method)
# find poles, reverse poles
poles = find_roots(a, 'b')
poles = [1. / pole for pole in poles]
# stabilize roots if needed
if bad_roots is not None:
poles = fix_roots(poles, bad_roots, fix_mode)
# find the backward predicted, forward ordered coefficients
backward_a = find_coeff(poles, 'f')
# average the forward and backward coefficients
return (forward_a + backward_a) / 2.
def find_lpc_bf(x, order, bad_roots, fix_mode, method):
"""
Determind LP coefficient using backward-forward linear prediction.
Averages LP coefficients generated from solving the forward and backward
linear prediction equations after reversing the roots of characteristic
polynomial of the forward solution. Similar to method described in:
<NAME> and <NAME>, Journal of Magnetic Resonance, 1992, 100, 202-207.
Description of parameters can be found in :py:func:`lp` function.
"""
# find backward LP coefficients
D, d = make_Dd(x, order, 'b')
a = find_lpc(D, d, method)
# stabilize roots if needed
if bad_roots is not None:
poles = find_roots(a, 'b')
poles = fix_roots(poles, bad_roots, fix_mode)
a = find_coeff(poles, 'b')
# store the forward coefficients
backward_a = a.copy()
# find the forward LP coefficients
D, d = make_Dd(x, order, 'f')
a = find_lpc(D, d, method)
# find poles, reverse poles
poles = find_roots(a, 'f')
poles = [1. / pole for pole in poles]
# stabilize roots if needed
if bad_roots is not None:
poles = fix_roots(poles, bad_roots, fix_mode)
# find the forward predicted, backward ordered coefficients
forward_a = find_coeff(poles, 'b')
# average the forward and backward coefficients
return (forward_a + backward_a) / 2.
#####################################
# root finding and fixing functions #
#####################################
def find_lproots_hsvd(x, M, K, mode, zmethod='sm'):
"""
Find LP roots (poles) using the HSVD method
Perform a HSVD linear prediction to determind signal roots (poles) as
described in:
Barkhuijsen, DeBeer, and <NAME>, JMR, 1987, 73, 553
Parameters x, M and K are the same as those described in the above article.
zmethod refer to the method used to calculate Z', either a least-squares
method (lstsq) can be used to solve U_b*Z'=U_t or the Sherman-Morrison
formula (sm) can be used to avoid the full matrix inversion with equation
[12] being used to find Z'. The Sherman-Morrison method should be faster
with similar precision.
Parameters
----------
x : 1D ndarray
1D trace of NMR data in the time domain, the FID.
M : int
Length (M+1) of data matrix to form.
K : int
Reduced prediction order (number of signal roots) Must be less than the
smaller of M + 1 or len(x) - M.
mode : {'f', 'b'}
Mode to perform LP. 'f' for forward,'b' for backward.
zmethod : {'lstsq', 'sm'}
Method used to find Z' 'lstsq' for least squares, 'sm' for
Sherman-Morrison.
Returns
-------
y : ndarray
Array of signal roots (poles)
"""
# check parameters
if mode not in ['f', 'b']:
raise ValueError("mode must be 'f' or 'b'")
if zmethod not in ['lstsq', 'sm']:
raise ValueError("zmethod must be 'lstsq' or 'sm'")
if K > min(M + 1, len(x) - M):
raise ValueError("K must be less than min(M + 1, len(x) - M)")
# form the data matrix X
N = len(x)
L = N - M - 1
if mode == "f":
X = scipy.linalg.hankel(x[:L + 1], x[L:])
else:
# for backward LP we need to make the hankel matrix:
# x_N-1 x_N-2 ... x_N-M-1
# x_N-2 x_N-3 ... x_N-M-2
# ...
# x_M x_M-1 ... x_0
X = scipy.linalg.hankel(x[:M - 1:-1], x[M::-1])
# SVD of data matrix and truncation of U to form Uk
U, s, Vh = scipy.linalg.svd(X)
Uk = np.mat(U[:, :K]) # trucated U matrix of rank K
Ub = Uk[:-1] # Uk with bottom row removed
Ut = Uk[1:] # Uk with top row removed
# calculate the Z' matrix
if zmethod == 'lstsq': # solve Ub*Z' = Ut using least-squares
Zp, resid, rank, s = scipy.linalg.lstsq(Ub, Ut)
else:
# solve using equation [12]:
# Z' = (Ek + (u*uh / (1-uh*u)) ) * Ub.H*Ut
uh = Uk[-1] # bottom row of Uk
u = uh.H
Zp = (np.eye(K, dtype=u.dtype) + (u * uh / (1. - uh * u))) * Ub.H * Ut
# diagonalization (find eigenvalues) of Z' to yield roots
return scipy.linalg.eigvals(Zp)
def find_roots(a, mode="f"):
"""
Find LP roots (poles) from a set of LP coefficients.
Parameters
----------
a : array
LP coefficients.
mode : {'f', 'b'}
Mode of LP coefficients. 'f' for coefficients ordered m, m - 1,..., 1.
'b' for coefficients ordered 1, 2, ...., m
Returns
-------
roots : array
LP roots (poles)
"""
if mode not in ['f', 'b']:
raise ValueError("mode must be 'f' or 'b'")
# STABILITY
# the algorithm here is that used by numpy roots, build the companion
# matrix and find its eigenvalues. These values should be polished for
# better numerical accuracy.
# np.roots expects a array, p, with coefficients
# p[0] * x**n + p[1] * x**(n-1] + ... + p[n-1]*x + p[n]
# in forward mode LP the coefficients are ordered m,m-1,...1
# in backward mode LP the coefficient are ordered is 1,2,...,m
# To calculate the roots, create a leading 1.0+0.0j and reverse if needed.
p = np.empty(len(a) + 1, dtype=a.dtype)
p[0] = (1.0 + 0.0j)
if mode == "f": # reverse for forward LP
p[1:] = -a.flat[::-1]
else: # backward LP
p[1:] = -a.flat[:]
return np.roots(p)
def find_coeff(poles, mode="f"):
"""
Find LP coefficients from a set of LP roots (poles).
Parameters
----------
poles : ndarray
Array of LP roots (poles)
mode : {'f', 'b'}
Mode in which LP coefficients should be returned. 'f' for coefficients
ordered m, m - 1,..., 1. 'b' for coefficients ordered 1, 2, ...., m.
Returns
-------
c : ndarray
LP coefficients ordered according to `mode`.
"""
# STABILITY
# the algorithm used here is numpy poly function which convolves
# the roots to find the coefficients, the accuracy of this method
# depends on the dtype of the poles parameter.
if mode not in ['f', 'b']:
raise ValueError("mode must be 'f'or 'b'")
if mode == 'f': # reverse resulting coefficients
return np.squeeze(-np.poly(poles)[:0:-1])
else: # keep coefficients as is
return np.squeeze(-np.poly(poles)[1:])
def reverse_filter(a, mode):
"""
Reverse a filter (change forward LP to backwards LP).
"""
nmode = {'f': 'b', 'b': 'f'}[mode]
# find roots, replace each root with 1/root, then recalculate filter
return find_coeff([1. / pole for pole in find_roots(a, mode)], nmode)
def fix_roots(poles, fix_roots="incr", fix_mode="reflect"):
"""
Fix (stabilize) LP roots.
Parameters
----------
poles : ndarray
Array of LP roots (poles).
fix_roots : {'incr', 'decr'}
Type of roots which to consider bad and to stabilize. Either those
with increasing signals 'incr' or decreasing signals 'decr'.
fix_mode : {'on', 'reflect'}
Method used to stabilize bad roots, 'on' to move the roots onto the
unit circle, 'reflect' to reflect bad roots across the unit circle.
Returns
-------
npoles : ndarray
Array of stabilized LP roots (poles).
"""
if fix_roots not in ["incr", "decr"]:
raise ValueError("fix_roots must be 'incr' or 'decr'")
if fix_mode not in ["on", "reflect"]:
raise ValueError("fix_mode must be 'on' or 'reflect'")
if fix_roots == "incr": # remove increasing signals
for i, pole in enumerate(poles):
if np.abs(pole) > 1:
# print("Fixing root:",i)
if fix_mode == "on":
poles[i] = pole / np.abs(pole)
else:
poles[i] = 1 / np.conj(pole)
else: # remove decreasing signals
for i, pole in enumerate(poles):
if np.abs(pole) < 1:
# print("Fixing root:",i)
if fix_mode == "on":
poles[i] = pole / np.abs(pole)
else:
poles[i] = 1 / np.conj(pole)
return poles
###########################
# Extrapolation functions #
###########################
def extrapolate(trace, a, pred, append):
"""
Extrapolate points using LP prediction filter.
Parameters
----------
trace : 1D ndarray
1D array to extrapolate from and append to.
a : ndarray
LP coefficients, must be ordered according to direction of
extrapolation.
pred : int
Number of points to predict using LP.
append : {'a', 'b'}
Location to append new points, 'a' for after the current data, 'b' for
before the current data.
Returns
-------
ntrace : 1D ndarray
1D array with extrapolated points appended
"""
m = len(a) # LP order
M = len(trace) # number of points in original trace
ntrace = np.empty((M + pred), dtype=trace.dtype)
if append not in ["after", "before"]:
raise ValueError("append must be 'a' or 'b'")
if append == "after": # append after trace
ntrace[:M] = trace
for i in range(pred):
ntrace[M + i] = np.sum(np.multiply(ntrace[M - m + i:M + i],
a.flat))
return ntrace
if append == "before": # append before trace
ntrace[-M:] = trace
for i in range(pred):
ntrace[pred - i - 1] = np.sum(np.multiply(
ntrace[pred - i:pred + m - i], a.flat))
return ntrace
| [
"numpy.poly",
"numpy.roots",
"numpy.array",
"numpy.arctan2",
"numpy.multiply",
"numpy.dot",
"numpy.empty",
"numpy.concatenate",
"numpy.mat",
"numpy.abs",
"numpy.eye",
"numpy.conj",
"numpy.finfo",
"numpy.power",
"numpy.diag",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.empt... | [((18097, 18148), 'numpy.empty', 'np.empty', (['(count_P * count_M, P * M)'], {'dtype': 'x.dtype'}), '((count_P * count_M, P * M), dtype=x.dtype)\n', (18105, 18148), True, 'import numpy as np\n'), ((18157, 18204), 'numpy.empty', 'np.empty', (['(count_P * count_M, 1)'], {'dtype': 'x.dtype'}), '((count_P * count_M, 1), dtype=x.dtype)\n', (18165, 18204), True, 'import numpy as np\n'), ((20500, 20516), 'numpy.mat', 'np.mat', (['U[:, :K]'], {}), '(U[:, :K])\n', (20506, 20516), True, 'import numpy as np\n'), ((20527, 20544), 'numpy.mat', 'np.mat', (['Vh[:K, :]'], {}), '(Vh[:K, :])\n', (20533, 20544), True, 'import numpy as np\n'), ((20967, 20983), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (20980, 20983), True, 'import numpy as np\n'), ((25280, 25295), 'numpy.array', 'np.array', (['poles'], {}), '(poles)\n', (25288, 25295), True, 'import numpy as np\n'), ((26446, 26455), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (26452, 26455), True, 'import numpy as np\n'), ((26607, 26633), 'numpy.arctan2', 'np.arctan2', (['z.imag', 'z.real'], {}), '(z.imag, z.real)\n', (26617, 26633), True, 'import numpy as np\n'), ((29407, 29436), 'numpy.array', 'np.array', (['(Vh.H * Si * U.H * d)'], {}), '(Vh.H * Si * U.H * d)\n', (29415, 29436), True, 'import numpy as np\n'), ((29551, 29568), 'numpy.finfo', 'np.finfo', (['"""float"""'], {}), "('float')\n", (29559, 29568), True, 'import numpy as np\n'), ((29580, 29598), 'numpy.finfo', 'np.finfo', (['"""single"""'], {}), "('single')\n", (29588, 29598), True, 'import numpy as np\n'), ((29891, 29910), 'numpy.zeros', 'np.zeros', (['(m, L)', 't'], {}), '((m, L), t)\n', (29899, 29910), True, 'import numpy as np\n'), ((30915, 30924), 'numpy.mat', 'np.mat', (['D'], {}), '(D)\n', (30921, 30924), True, 'import numpy as np\n'), ((31314, 31337), 'numpy.append', 'np.append', (['D', 'd'], {'axis': '(1)'}), '(D, d, axis=1)\n', (31323, 31337), True, 'import numpy as np\n'), ((31458, 31471), 'numpy.conj', 'np.conj', (['Vh.T'], {}), '(Vh.T)\n', (31465, 31471), True, 'import numpy as np\n'), ((36501, 36517), 'numpy.mat', 'np.mat', (['U[:, :K]'], {}), '(U[:, :K])\n', (36507, 36517), True, 'import numpy as np\n'), ((38365, 38376), 'numpy.roots', 'np.roots', (['p'], {}), '(p)\n', (38373, 38376), True, 'import numpy as np\n'), ((41949, 41986), 'numpy.empty', 'np.empty', (['(M + pred)'], {'dtype': 'trace.dtype'}), '(M + pred, dtype=trace.dtype)\n', (41957, 41986), True, 'import numpy as np\n'), ((15507, 15557), 'numpy.empty', 'np.empty', (['(2 * N_0 - 1, N_1 + pred)'], {'dtype': 'x.dtype'}), '((2 * N_0 - 1, N_1 + pred), dtype=x.dtype)\n', (15515, 15557), True, 'import numpy as np\n'), ((15644, 15690), 'numpy.empty', 'np.empty', (['(2 * N_0, N_1 + pred)'], {'dtype': 'x.dtype'}), '((2 * N_0, N_1 + pred), dtype=x.dtype)\n', (15652, 15690), True, 'import numpy as np\n'), ((20804, 20840), 'numpy.array', 'np.array', (['[(l - s2 / l) for l in sl]'], {}), '([(l - s2 / l) for l in sl])\n', (20812, 20840), True, 'import numpy as np\n'), ((20856, 20867), 'numpy.diag', 'np.diag', (['sl'], {}), '(sl)\n', (20863, 20867), True, 'import numpy as np\n'), ((25399, 25410), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (25407, 25410), True, 'import numpy as np\n'), ((26251, 26283), 'numpy.arctan2', 'np.arctan2', (['pole.imag', 'pole.real'], {}), '(pole.imag, pole.real)\n', (26261, 26283), True, 'import numpy as np\n'), ((28148, 28177), 'numpy.concatenate', 'np.concatenate', (['(x[:0:-1], x)'], {}), '((x[:0:-1], x))\n', (28162, 28177), True, 'import numpy as np\n'), ((29267, 29276), 'numpy.mat', 'np.mat', (['U'], {}), '(U)\n', (29273, 29276), True, 'import numpy as np\n'), ((29278, 29288), 'numpy.mat', 'np.mat', (['Vh'], {}), '(Vh)\n', (29284, 29288), True, 'import numpy as np\n'), ((30200, 30209), 'numpy.mat', 'np.mat', (['q'], {}), '(q)\n', (30206, 30209), True, 'import numpy as np\n'), ((30211, 30220), 'numpy.mat', 'np.mat', (['r'], {}), '(r)\n', (30217, 30220), True, 'import numpy as np\n'), ((30942, 30956), 'numpy.dot', 'np.dot', (['D.H', 'D'], {}), '(D.H, D)\n', (30948, 30956), True, 'import numpy as np\n'), ((30975, 30989), 'numpy.dot', 'np.dot', (['D.H', 'd'], {}), '(D.H, d)\n', (30981, 30989), True, 'import numpy as np\n'), ((4490, 4519), 'numpy.empty', 'np.empty', (['s'], {'dtype': 'data.dtype'}), '(s, dtype=data.dtype)\n', (4498, 4519), True, 'import numpy as np\n'), ((16321, 16359), 'numpy.sum', 'np.sum', (['(new[i:i + P, j:j + M].flat * c)'], {}), '(new[i:i + P, j:j + M].flat * c)\n', (16327, 16359), True, 'import numpy as np\n'), ((26012, 26024), 'numpy.abs', 'np.abs', (['pole'], {}), '(pole)\n', (26018, 26024), True, 'import numpy as np\n'), ((28217, 28245), 'numpy.concatenate', 'np.concatenate', (['(x[::-1], x)'], {}), '((x[::-1], x))\n', (28231, 28245), True, 'import numpy as np\n'), ((21112, 21135), 'numpy.diag', 'np.diag', (['Xp[:, ::-1]', 'v'], {}), '(Xp[:, ::-1], v)\n', (21119, 21135), True, 'import numpy as np\n'), ((29994, 30007), 'numpy.conj', 'np.conj', (['s[i]'], {}), '(s[i])\n', (30001, 30007), True, 'import numpy as np\n'), ((40590, 40602), 'numpy.abs', 'np.abs', (['pole'], {}), '(pole)\n', (40596, 40602), True, 'import numpy as np\n'), ((40905, 40917), 'numpy.abs', 'np.abs', (['pole'], {}), '(pole)\n', (40911, 40917), True, 'import numpy as np\n'), ((42228, 42272), 'numpy.multiply', 'np.multiply', (['ntrace[M - m + i:M + i]', 'a.flat'], {}), '(ntrace[M - m + i:M + i], a.flat)\n', (42239, 42272), True, 'import numpy as np\n'), ((42483, 42533), 'numpy.multiply', 'np.multiply', (['ntrace[pred - i:pred + m - i]', 'a.flat'], {}), '(ntrace[pred - i:pred + m - i], a.flat)\n', (42494, 42533), True, 'import numpy as np\n'), ((20766, 20784), 'numpy.power', 'np.power', (['s[K:]', '(2)'], {}), '(s[K:], 2)\n', (20774, 20784), True, 'import numpy as np\n'), ((36984, 37008), 'numpy.eye', 'np.eye', (['K'], {'dtype': 'u.dtype'}), '(K, dtype=u.dtype)\n', (36990, 37008), True, 'import numpy as np\n'), ((39214, 39228), 'numpy.poly', 'np.poly', (['poles'], {}), '(poles)\n', (39221, 39228), True, 'import numpy as np\n'), ((39302, 39316), 'numpy.poly', 'np.poly', (['poles'], {}), '(poles)\n', (39309, 39316), True, 'import numpy as np\n'), ((40725, 40737), 'numpy.abs', 'np.abs', (['pole'], {}), '(pole)\n', (40731, 40737), True, 'import numpy as np\n'), ((40795, 40808), 'numpy.conj', 'np.conj', (['pole'], {}), '(pole)\n', (40802, 40808), True, 'import numpy as np\n'), ((41040, 41052), 'numpy.abs', 'np.abs', (['pole'], {}), '(pole)\n', (41046, 41052), True, 'import numpy as np\n'), ((41110, 41123), 'numpy.conj', 'np.conj', (['pole'], {}), '(pole)\n', (41117, 41123), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from rfvision.components.utils import (xyz2uv, affine_transform, get_K, generate_heatmap_2d)
from rfvision.datasets import PIPELINES
@PIPELINES.register_module()
class GetJointsUV:
# Compute joints uv toward joints xyz and K.
def get_joints_bbox(self, joints_uv):
# get bbox info of joints_uv
min_u, min_v = joints_uv.min(0)
max_u, max_v = joints_uv.max(0)
joints_bbox_uv = np.array([min_u, min_v, max_u, max_v]) # bbox left-top uv , right_bottom uv
# get the center minimum bounding rectangle as joints_uv center
c_u = int((max_u + min_u) / 2)
c_v = int((max_v + min_v) / 2)
joints_center_uv = (c_u, c_v)
return joints_bbox_uv, joints_center_uv
def __call__(self, results):
img_shape = np.array(results['img_shape'][:2]).reshape(-1, 2)
joints_uv = xyz2uv(results['joints_xyz'], results['K'])
# joints_uv_visible: joints_uv inside image boundary is visible (set to 1) and joints_uv out of image
# boundary is invisible (set to 0) Typically, joint_uv = (126, 262), image boundary wh = (224, 224),
# thus this joint is invisible. (Due to v (262) > h (224) )
bool_matrix = joints_uv < img_shape
joints_uv_visible = np.logical_and(bool_matrix[:, 0], bool_matrix[:, 1]).astype('int32')
joints_bbox_uv, joints_center_uv = self.get_joints_bbox(joints_uv)
results['joints_uv'] = joints_uv
results['joints_uv_visible'] = joints_uv_visible
results['joints_bbox_uv'] = joints_bbox_uv
results['joints_center_uv'] = joints_center_uv
return results
@PIPELINES.register_module()
class AffineCorp(object):
"""
Centralize the joints to image center for better performance of
models.
Require keys : joints_xyz, K, img
Generate keys : joints_uv, img_shape, rot_angle,
Update keys : joints_xyz, K, img
Args:
img_outsize: image output size
rot_angle_range: rotate angle range, uint: degree
"""
def __init__(self,
centralize=True,
img_outsize=(256, 256),
rot_angle_range=(-180, 180),
):
# TODO: Add more argumentations.
self.rot_angle_range = rot_angle_range
self.img_outsize = img_outsize
self.centralize = centralize
def __call__(self, results):
if self.rot_angle_range is None:
rot_angle = 0
else:
rot_angle = np.random.randint(low=self.rot_angle_range[0],
high=self.rot_angle_range[1])
if self.centralize == True:
joints_center_uv = results['joints_center_uv']
# rotate first
affine_matrix = cv2.getRotationMatrix2D(joints_center_uv, rot_angle, scale=1)
img_center_uv = np.array(results['img_shape'][:2][::-1]) // 2
# then shift joints_center_uv to img_center_uv
delta = np.array(joints_center_uv) - img_center_uv
affine_matrix[:, 2] -= delta
else:
center_uv = (self.img_outsize[1] // 2, self.img_outsize[0] // 2)
affine_matrix = cv2.getRotationMatrix2D(center_uv, rot_angle, scale=1)
# affine
img_affine = cv2.warpAffine(results['img'], affine_matrix, self.img_outsize)
joints_uv_affine = affine_transform(results['joints_uv'], affine_matrix)
# rotate xy only
joints_xy_affine = affine_transform(results['joints_xyz'][:, :2], affine_matrix)
joints_xyz_affine = np.hstack((joints_xy_affine, results['joints_xyz'][:, 2:]))
# compute new K
K_new = get_K(joints_xyz_affine, joints_uv_affine)
results['img'] = img_affine
results['joints_xyz'] = joints_xyz_affine
results['joints_uv'] = joints_uv_affine
results['img_shape'] = self.img_outsize
results['rot_angle'] = rot_angle
results['K'] = K_new
return results
@PIPELINES.register_module()
class GenerateHeatmap2D:
'''
Generate 2D heatmap
Require keys: joints_uv, img_shape
Generate keys: heatmap, heatmap weight
Args:
heatmap_shape: heatmap outsize.
sigma: Gaussian sigma.
'''
def __init__(self,
heatmap_shape=(64, 64),
sigma=1):
self.heatmap_shape = np.array(heatmap_shape)
self.sigma = sigma
def __call__(self, results):
joints_uv_for_hm = results['joints_uv'] / results['img_shape'][:2] * self.heatmap_shape
hm = np.array([generate_heatmap_2d(uv, self.heatmap_shape, self.sigma) \
if visible == 1 else np.zeros(self.heatmap_shape) \
for uv, visible in zip(np.int32(joints_uv_for_hm), results['joints_uv_visible'])])
hm_weight = np.ones((results['joints_uv'].shape[0], 1))
# for num_joints = 21
# hm shape (21, 64, 64)
# hm_weight shape (21, 1)
results['heatmap'] = hm
results['heatmap_weight'] = hm_weight
return results
@PIPELINES.register_module()
class JointsUVNormalize:
def __call__(self, results):
joints_uv = results['joints_uv'] / results['img_shape'][:2][::-1] # [::-1] img_shape (h, w) to img_shape (w, h)
results['joints_uv'] = joints_uv
return results
if __name__ == '__main__':
pass | [
"cv2.warpAffine",
"numpy.ones",
"numpy.logical_and",
"numpy.hstack",
"rfvision.components.utils.generate_heatmap_2d",
"numpy.int32",
"rfvision.components.utils.get_K",
"numpy.array",
"rfvision.components.utils.xyz2uv",
"numpy.random.randint",
"numpy.zeros",
"rfvision.components.utils.affine_tr... | [((166, 193), 'rfvision.datasets.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (191, 193), False, 'from rfvision.datasets import PIPELINES\n'), ((1662, 1689), 'rfvision.datasets.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (1687, 1689), False, 'from rfvision.datasets import PIPELINES\n'), ((4009, 4036), 'rfvision.datasets.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (4034, 4036), False, 'from rfvision.datasets import PIPELINES\n'), ((5093, 5120), 'rfvision.datasets.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (5118, 5120), False, 'from rfvision.datasets import PIPELINES\n'), ((446, 484), 'numpy.array', 'np.array', (['[min_u, min_v, max_u, max_v]'], {}), '([min_u, min_v, max_u, max_v])\n', (454, 484), True, 'import numpy as np\n'), ((883, 926), 'rfvision.components.utils.xyz2uv', 'xyz2uv', (["results['joints_xyz']", "results['K']"], {}), "(results['joints_xyz'], results['K'])\n", (889, 926), False, 'from rfvision.components.utils import xyz2uv, affine_transform, get_K, generate_heatmap_2d\n'), ((3298, 3361), 'cv2.warpAffine', 'cv2.warpAffine', (["results['img']", 'affine_matrix', 'self.img_outsize'], {}), "(results['img'], affine_matrix, self.img_outsize)\n", (3312, 3361), False, 'import cv2\n'), ((3389, 3442), 'rfvision.components.utils.affine_transform', 'affine_transform', (["results['joints_uv']", 'affine_matrix'], {}), "(results['joints_uv'], affine_matrix)\n", (3405, 3442), False, 'from rfvision.components.utils import xyz2uv, affine_transform, get_K, generate_heatmap_2d\n'), ((3495, 3556), 'rfvision.components.utils.affine_transform', 'affine_transform', (["results['joints_xyz'][:, :2]", 'affine_matrix'], {}), "(results['joints_xyz'][:, :2], affine_matrix)\n", (3511, 3556), False, 'from rfvision.components.utils import xyz2uv, affine_transform, get_K, generate_heatmap_2d\n'), ((3585, 3644), 'numpy.hstack', 'np.hstack', (["(joints_xy_affine, results['joints_xyz'][:, 2:])"], {}), "((joints_xy_affine, results['joints_xyz'][:, 2:]))\n", (3594, 3644), True, 'import numpy as np\n'), ((3686, 3728), 'rfvision.components.utils.get_K', 'get_K', (['joints_xyz_affine', 'joints_uv_affine'], {}), '(joints_xyz_affine, joints_uv_affine)\n', (3691, 3728), False, 'from rfvision.components.utils import xyz2uv, affine_transform, get_K, generate_heatmap_2d\n'), ((4386, 4409), 'numpy.array', 'np.array', (['heatmap_shape'], {}), '(heatmap_shape)\n', (4394, 4409), True, 'import numpy as np\n'), ((4850, 4893), 'numpy.ones', 'np.ones', (["(results['joints_uv'].shape[0], 1)"], {}), "((results['joints_uv'].shape[0], 1))\n", (4857, 4893), True, 'import numpy as np\n'), ((2517, 2593), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'self.rot_angle_range[0]', 'high': 'self.rot_angle_range[1]'}), '(low=self.rot_angle_range[0], high=self.rot_angle_range[1])\n', (2534, 2593), True, 'import numpy as np\n'), ((2786, 2847), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['joints_center_uv', 'rot_angle'], {'scale': '(1)'}), '(joints_center_uv, rot_angle, scale=1)\n', (2809, 2847), False, 'import cv2\n'), ((3204, 3258), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center_uv', 'rot_angle'], {'scale': '(1)'}), '(center_uv, rot_angle, scale=1)\n', (3227, 3258), False, 'import cv2\n'), ((813, 847), 'numpy.array', 'np.array', (["results['img_shape'][:2]"], {}), "(results['img_shape'][:2])\n", (821, 847), True, 'import numpy as np\n'), ((1286, 1338), 'numpy.logical_and', 'np.logical_and', (['bool_matrix[:, 0]', 'bool_matrix[:, 1]'], {}), '(bool_matrix[:, 0], bool_matrix[:, 1])\n', (1300, 1338), True, 'import numpy as np\n'), ((2876, 2916), 'numpy.array', 'np.array', (["results['img_shape'][:2][::-1]"], {}), "(results['img_shape'][:2][::-1])\n", (2884, 2916), True, 'import numpy as np\n'), ((3001, 3027), 'numpy.array', 'np.array', (['joints_center_uv'], {}), '(joints_center_uv)\n', (3009, 3027), True, 'import numpy as np\n'), ((4591, 4646), 'rfvision.components.utils.generate_heatmap_2d', 'generate_heatmap_2d', (['uv', 'self.heatmap_shape', 'self.sigma'], {}), '(uv, self.heatmap_shape, self.sigma)\n', (4610, 4646), False, 'from rfvision.components.utils import xyz2uv, affine_transform, get_K, generate_heatmap_2d\n'), ((4693, 4721), 'numpy.zeros', 'np.zeros', (['self.heatmap_shape'], {}), '(self.heatmap_shape)\n', (4701, 4721), True, 'import numpy as np\n'), ((4770, 4796), 'numpy.int32', 'np.int32', (['joints_uv_for_hm'], {}), '(joints_uv_for_hm)\n', (4778, 4796), True, 'import numpy as np\n')] |
import librosa
import stempeg
import sys
import time
import pyaudio
import aubio
import numpy as np
import sys
import os
print("Starting Code")
def aubiorun(a,rate):
win_s = 1024 # fft size
hop_s = win_s // 2 # hop size
bpmlist=[]
timelist=[]
filename = a
samplerate = rate
# create aubio source
a_source =aubio.source(filename,samplerate,hop_s)
# a_source = aubio.source(filename, samplerate, hop_s)
samplerate = a_source.samplerate
# create aubio tempo detection
a_tempo = aubio.tempo("default", win_s, hop_s, samplerate)
# create a simple click sound
click = 0.7 * np.sin(2. * np.pi * np.arange(hop_s) / hop_s * samplerate / 3000.)
firsttime=time.time()
previoustime=firsttime
def pyaudio_callback(_in_data, _frame_count, _time_info, _status):
global previoustime
samples, read = a_source()
is_beat = a_tempo(samples)
if is_beat:
nowtime=time.time()
bpm= 60// (nowtime-previoustime)
bpmlist.append(str(bpm))
previoustime= nowtime
timelist.append(str(nowtime-firsttime))
#print(bpm," ",nowtime-firsttime)
#print ('tick') # avoid print in audio callback
audiobuf = samples.tobytes()
if read < hop_s:
return (audiobuf, pyaudio.paComplete)
return (audiobuf, pyaudio.paContinue)
# create pyaudio stream with frames_per_buffer=hop_s and format=paFloat32
p = pyaudio.PyAudio()
pyaudio_format = pyaudio.paFloat32
frames_per_buffer = hop_s
n_channels = 1
stream = p.open(format=pyaudio_format, channels=n_channels, rate=samplerate,
output=True, frames_per_buffer=frames_per_buffer,
stream_callback=pyaudio_callback)
# start pyaudio stream
stream.start_stream()
# wait for stream to finish
while stream.is_active():
time.sleep(0.1)
# stop pyaudio stream
stream.stop_stream()
stream.close()
# close pyaudio
p.terminate()
return bpmlist,timelist
def makedataset(a):
filename=a
path ="C:/Users/ROG/Downloads/musdb18/Tempwav/"
path2 ="C:/Users/ROG/Downloads/musdb18/Temp/"
filepath= "C:/Users/ROG/Downloads/musdb18/train/" +filename+".mp4"
print(filepath)
S, rate = stempeg.read_stems(filepath)
librosa.output.write_wav(path + '0.wav', S[0].T, rate)
librosa.output.write_wav(path + '1.wav', S[1].T, rate)
librosa.output.write_wav(path + '2.wav', S[2].T, rate)
librosa.output.write_wav(path + '3.wav', S[3].T, rate)
librosa.output.write_wav(path + '4.wav', S[4].T, rate)
print("Temporary wav files Created")
col1,col2=aubiorun(path +'0.wav',rate)
print("part 0 done")
col3,col4=aubiorun(path +'1.wav',rate)
print("part 1 done")
col5,col6=aubiorun(path +'2.wav',rate)
print("part 2 done")
col7,col8=aubiorun(path +'3.wav',rate)
print("part 3 done")
col9,col10=aubiorun(path +'4.wav',rate)
print("part 4 done")
savepath= path2+filename+"0bpm.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col1))
savepath= path2+filename+"0time.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col2))
savepath= path2+filename+"1bpm.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col3))
savepath= path2+filename+"1time.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col4))
savepath= path2+filename+"2bpm.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col5))
savepath= path2+filename+"2time.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col6))
savepath= path2+filename+"3bpm.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col7))
savepath= path2+filename+"3time.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col8))
savepath= path2+filename+"4bpm.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col9))
savepath= path2+filename+"4time.txt"
with open(savepath, "w") as outfile:
outfile.write(",".join(col10))
os.remove(path +'0.wav')
os.remove(path +'1.wav')
os.remove(path +'2.wav')
os.remove(path +'3.wav')
os.remove(path +'4.wav')
print("Temporary wav files Deleted")
import os
txtfile= 'C:/Users/ROG/Downloads/musdb18/list2.txt'
with open(txtfile) as f:
lines = f.read().splitlines()
for i in lines:
makedataset(i)
| [
"aubio.source",
"librosa.output.write_wav",
"time.sleep",
"stempeg.read_stems",
"aubio.tempo",
"pyaudio.PyAudio",
"time.time",
"numpy.arange",
"os.remove"
] | [((371, 412), 'aubio.source', 'aubio.source', (['filename', 'samplerate', 'hop_s'], {}), '(filename, samplerate, hop_s)\n', (383, 412), False, 'import aubio\n'), ((557, 605), 'aubio.tempo', 'aubio.tempo', (['"""default"""', 'win_s', 'hop_s', 'samplerate'], {}), "('default', win_s, hop_s, samplerate)\n", (568, 605), False, 'import aubio\n'), ((745, 756), 'time.time', 'time.time', ([], {}), '()\n', (754, 756), False, 'import time\n'), ((1526, 1543), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (1541, 1543), False, 'import pyaudio\n'), ((2351, 2379), 'stempeg.read_stems', 'stempeg.read_stems', (['filepath'], {}), '(filepath)\n', (2369, 2379), False, 'import stempeg\n'), ((2385, 2439), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(path + '0.wav')", 'S[0].T', 'rate'], {}), "(path + '0.wav', S[0].T, rate)\n", (2409, 2439), False, 'import librosa\n'), ((2444, 2498), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(path + '1.wav')", 'S[1].T', 'rate'], {}), "(path + '1.wav', S[1].T, rate)\n", (2468, 2498), False, 'import librosa\n'), ((2503, 2557), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(path + '2.wav')", 'S[2].T', 'rate'], {}), "(path + '2.wav', S[2].T, rate)\n", (2527, 2557), False, 'import librosa\n'), ((2562, 2616), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(path + '3.wav')", 'S[3].T', 'rate'], {}), "(path + '3.wav', S[3].T, rate)\n", (2586, 2616), False, 'import librosa\n'), ((2621, 2675), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(path + '4.wav')", 'S[4].T', 'rate'], {}), "(path + '4.wav', S[4].T, rate)\n", (2645, 2675), False, 'import librosa\n'), ((4289, 4314), 'os.remove', 'os.remove', (["(path + '0.wav')"], {}), "(path + '0.wav')\n", (4298, 4314), False, 'import os\n'), ((4318, 4343), 'os.remove', 'os.remove', (["(path + '1.wav')"], {}), "(path + '1.wav')\n", (4327, 4343), False, 'import os\n'), ((4347, 4372), 'os.remove', 'os.remove', (["(path + '2.wav')"], {}), "(path + '2.wav')\n", (4356, 4372), False, 'import os\n'), ((4376, 4401), 'os.remove', 'os.remove', (["(path + '3.wav')"], {}), "(path + '3.wav')\n", (4385, 4401), False, 'import os\n'), ((4405, 4430), 'os.remove', 'os.remove', (["(path + '4.wav')"], {}), "(path + '4.wav')\n", (4414, 4430), False, 'import os\n'), ((1946, 1961), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1956, 1961), False, 'import time\n'), ((995, 1006), 'time.time', 'time.time', ([], {}), '()\n', (1004, 1006), False, 'import time\n'), ((679, 695), 'numpy.arange', 'np.arange', (['hop_s'], {}), '(hop_s)\n', (688, 695), True, 'import numpy as np\n')] |
from nlp_id.stopword import StopWord
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import csv
import numpy as np
def create_dataset(file_path):
stopword = StopWord()
sentences = []
labels = []
with open(file_path, 'r') as file:
reader = csv.reader(file, delimiter=',')
next(reader)
for row in reader:
labels.append(int(row[0]))
text = row[1].lower()
sentence = stopword.remove_stopword(text)
sentences.append(sentence)
training_size = int(0.8 * len(sentences))
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
print(f'training labels length: {len(training_labels)}')
print(f'training sentences length: {len(training_sentences)}')
print(f'testing labels length: {len(testing_labels)}')
print(f'testing sentences length: {len(testing_sentences)}')
return training_sentences, testing_sentences, training_labels, testing_labels
def create_model(x_train, x_test, y_train, y_test):
oov_tok = "<OOV>"
max_length = 120
padding_type = 'post'
trunc_type = 'post'
embedding_dim = 16
tokenizer = Tokenizer(num_words=1000, oov_token=oov_tok)
tokenizer.fit_on_texts(x_train)
vocab_size = len(tokenizer.word_index)+1
training_sequences = tokenizer.texts_to_sequences(x_train)
training_padded = pad_sequences(training_sequences, maxlen=max_length,
padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(x_test)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length,
padding=padding_type, truncating=trunc_type)
training_padded = np.array(training_padded)
training_labels = np.array(y_train)
testing_padded = np.array(testing_padded)
testing_labels = np.array(y_test)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
input_length=max_length),
# tf.keras.layers.GlobalAveragePooling1D(),
# tf.keras.layers.SpatialDropout1D(0.25),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.fit(training_padded, training_labels,
epochs=30, verbose=2,
validation_data=(testing_padded, testing_labels))
return model
if __name__ == '__main__':
dataset_dir = '/Users/fttiunjani/gemastik/practice/dataset/dataset-idsa-master/labelled-sentiment-copy.csv'
training_sentences, testing_sentences, training_labels, testing_labels = create_dataset(dataset_dir)
model = create_model(training_sentences, testing_sentences,
training_labels, testing_labels)
model.save('sentiment-model.h5')
# print(df.info())
| [
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.Embedding",
"numpy.array",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.preprocessing.text.Tokenizer",
"nlp_id.stopword.StopWord",
"tensorflow.keras.layers.Dense",
"csv.reader"
] | [((263, 273), 'nlp_id.stopword.StopWord', 'StopWord', ([], {}), '()\n', (271, 273), False, 'from nlp_id.stopword import StopWord\n'), ((1375, 1419), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': '(1000)', 'oov_token': 'oov_tok'}), '(num_words=1000, oov_token=oov_tok)\n', (1384, 1419), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((1587, 1688), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['training_sequences'], {'maxlen': 'max_length', 'padding': 'padding_type', 'truncating': 'trunc_type'}), '(training_sequences, maxlen=max_length, padding=padding_type,\n truncating=trunc_type)\n', (1600, 1688), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1804, 1904), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['testing_sequences'], {'maxlen': 'max_length', 'padding': 'padding_type', 'truncating': 'trunc_type'}), '(testing_sequences, maxlen=max_length, padding=padding_type,\n truncating=trunc_type)\n', (1817, 1904), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1959, 1984), 'numpy.array', 'np.array', (['training_padded'], {}), '(training_padded)\n', (1967, 1984), True, 'import numpy as np\n'), ((2007, 2024), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2015, 2024), True, 'import numpy as np\n'), ((2046, 2070), 'numpy.array', 'np.array', (['testing_padded'], {}), '(testing_padded)\n', (2054, 2070), True, 'import numpy as np\n'), ((2092, 2108), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2100, 2108), True, 'import numpy as np\n'), ((366, 397), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (376, 397), False, 'import csv\n'), ((2152, 2229), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['vocab_size', 'embedding_dim'], {'input_length': 'max_length'}), '(vocab_size, embedding_dim, input_length=max_length)\n', (2177, 2229), True, 'import tensorflow as tf\n'), ((2375, 2422), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (2395, 2422), True, 'import tensorflow as tf\n'), ((2432, 2479), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (2452, 2479), True, 'import tensorflow as tf\n'), ((2489, 2533), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (2510, 2533), True, 'import tensorflow as tf\n'), ((2543, 2589), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2564, 2589), True, 'import tensorflow as tf\n')] |
"""This is a plotting script for one particular configuration specified via command
line argument. It can be called like this:
`python plot_eigvals_vivit_vs_fb.py --config_str fmnist_2c2d_sgd`
`run_plot_eigvals_vivit_vs_fb.py` automatically calls this script for all
configurations.
This script consists of 3 steps:
1) Parse the command line argument and map to actual configuration.
2) Compute and store the plotting data (if neccessary). We compute the eigenvalues using
ViViT (using its approximations) and compare this to the full-batch directional
derivative.
3) Compute and store the figure (if neccessary). This figure shows a metric for the
error between the quantities computed in 2).
"""
import os
import numpy as np
import torch
from config import _add_decimal_checkpoints, config_str_to_config
from deepobs.pytorch import testproblems
from eval import load_checkpoint
from matplotlib import pyplot as plt
from plot_shared import get_case_color_marker, get_xticks_labels
from run_plot_eigvals_vivit_vs_fb import get_plot_savedir
from torch import cuda, device
from utils_shared import (
check_cases,
directional_derivatives,
dump_json,
eval_eigspace_pi,
eval_eigspace_vivit,
get_case_label,
get_config_str_parser,
get_deepobs_dataloader,
load_json,
tensor_to_list,
)
from exp.utils.deepobs import get_deterministic_deepobs_train_loader
from vivit.hessianfree import GGNLinearOperator
def get_plot_savepath(file_name, extension=".pdf"):
"""Get savepath for some result of the evaluation named `file_name`."""
savedir = get_plot_savedir()
return os.path.join(savedir, f"{file_name}{extension}")
DEVICE = device("cuda" if cuda.is_available() else "cpu")
VERBOSE = True
RECOMPUTE_DATA = False
RECOMPUTE_FIG = True
CHECK_DETERMINISTIC_BATCH = True
CHECK_DETERMINISTIC_FB = False
# ======================================================================================
# Define cases
# ======================================================================================
BATCH_SIZE = 128
STD_CASES = [
{
"batch_size": BATCH_SIZE,
"subsampling": None,
"mc_samples": 0,
"method": "PI",
},
{
"batch_size": BATCH_SIZE,
"subsampling": list(range(16)),
"mc_samples": 0,
"method": "ViViT",
},
]
def get_cases(config):
"""Extend the standard cases ``STD_CASES`` (used by all configurations) by some
configuration-specific case(s). Check that the given method is reasonable by
calling ``check_cases`` and add the case label.
"""
problem_cls = config["problem_cls"]
if problem_cls in [
testproblems.cifar10_3c3d,
testproblems.fmnist_2c2d,
testproblems.cifar10_resnet32,
]:
mc_samples = 1
elif problem_cls in [testproblems.cifar100_allcnnc]:
mc_samples = 10
extra_cases = [
{
"batch_size": BATCH_SIZE,
"subsampling": None,
"mc_samples": mc_samples,
"method": "ViViT",
},
{
"batch_size": BATCH_SIZE,
"subsampling": list(range(16)),
"mc_samples": mc_samples,
"method": "ViViT",
},
]
all_cases = STD_CASES + extra_cases
# Add label and check all cases
for case in all_cases:
case["label"] = get_case_label(case)
check_cases(all_cases)
return all_cases
def get_nof_reps(config):
"""Choose number of repetitions per case based on ``config``."""
problem_cls = config["problem_cls"]
if problem_cls in [
testproblems.cifar10_3c3d,
testproblems.fmnist_2c2d,
testproblems.cifar10_resnet32,
]:
return 3
elif problem_cls in [testproblems.cifar100_allcnnc]:
return 1
else:
raise NotImplementedError(f"Problem class {problem_cls} not supported.")
def get_fewer_checkpoints(config):
"""Choose grid of checkpoints based on ``config``."""
problem_cls = config["problem_cls"]
checkpoints = config["checkpoints"]
if problem_cls in [testproblems.cifar10_3c3d, testproblems.fmnist_2c2d]:
return checkpoints
elif problem_cls in [testproblems.cifar10_resnet32]:
return checkpoints[::2] + [checkpoints[-1]]
elif problem_cls in [testproblems.cifar100_allcnnc]:
return checkpoints[::4] + [checkpoints[-1]]
else:
raise NotImplementedError(f"Problem class {problem_cls} not supported.")
# ======================================================================================
# Computation of plotting data
# ======================================================================================
def eval_eigspace(case, model, loss_function, batch_data, top_C):
"""Evaluate the GGN's eigenspace either using ViViT or using the power iteration
implemented by ``GGNLinearOperator``.
"""
# Check inputs
_, labels = batch_data
if not len(labels) == case["batch_size"]:
raise ValueError(
"Provided data does not match the batchsize specified by case."
)
method = case["method"]
if method == "ViViT":
evals, evecs = eval_eigspace_vivit(
case,
model,
loss_function,
batch_data,
top_C,
device=DEVICE,
verbose=VERBOSE,
)
elif method == "PI":
evals, evecs = eval_eigspace_pi(
model,
loss_function,
batch_data,
top_C,
device=DEVICE,
check_deterministic=CHECK_DETERMINISTIC_BATCH,
verbose=VERBOSE,
)
else:
raise ValueError(f"Unknown computing method {method}")
return evals, evecs
def eval_config(config, cases, json_path, nof_reps):
"""For the given configuration, evaluate all cases given by ``cases``. Store
the results at ``json_path``. ``results`` is basically a copy of ``cases``, where
each case has additionals keys for the results.
"""
if VERBOSE:
print("\nWorking on config = \n", config)
problem_cls = config["problem_cls"]
optimizer_cls = config["optimizer_cls"]
num_classes = config["num_classes"]
# Get deterministic training set data loader for full-batch GGN
torch.manual_seed(0)
training_dataloader = get_deterministic_deepobs_train_loader(
problem_cls, BATCH_SIZE
)
# Get data with batch size ``BATCH_SIZE`` for approximations (NOTE: all cases use
# this batch size)
torch.manual_seed(0)
batch_data_list = list(get_deepobs_dataloader(problem_cls, BATCH_SIZE))[0:nof_reps]
results = []
for case in cases:
eigvals_vivit = torch.zeros(nof_reps, len(config["checkpoints"]), num_classes)
eigvals_fb = torch.zeros_like(eigvals_vivit)
for checkpoint_idx, checkpoint in enumerate(config["checkpoints"]):
if VERBOSE:
case_label = case["label"]
print(
f"Working on case {case_label} checkpoint {checkpoint}",
flush=True,
)
# Load checkpoint data (i.e. model and loss-function)
checkpoint_data = load_checkpoint(problem_cls, optimizer_cls, checkpoint)
if checkpoint_data is None:
print("No checkpoint data was found. Skipping computations.")
continue
model = checkpoint_data.pop("model")
loss_function = checkpoint_data.pop("loss_func") # Must use ``mean``
# Reference: Full-batch GGN (evaluated on the entire training set)
fb_ggn = GGNLinearOperator(
model,
loss_function,
training_dataloader,
DEVICE,
progressbar=False,
check_deterministic=CHECK_DETERMINISTIC_FB,
)
for batch_idx in range(nof_reps):
# Evaluate the eigenvalues and -vectors on a new batch
evals_approx, evecs_approx = eval_eigspace(
case, model, loss_function, batch_data_list[batch_idx], num_classes
)
eigvals_vivit[batch_idx, checkpoint_idx, :] = evals_approx
# Evaluate the directional derivatives on the entire data set
eigvals_fb[batch_idx, checkpoint_idx, :] = directional_derivatives(
fb_ggn, evecs_approx, DEVICE
)
# Store results in case dict
case["eigvals_vivit"] = tensor_to_list(eigvals_vivit)
case["eigvals_fb"] = tensor_to_list(eigvals_fb)
results.append(case)
dump_json(results, json_path)
return results
# ======================================================================================
# Plotting
# ======================================================================================
def rel_error(vec_approx, vec_exact):
"""Compute the relative error for two arrays of same shape."""
assert vec_approx.shape == vec_exact.shape, "Arrays must have the same shape"
err = np.abs(vec_approx - vec_exact)
return np.divide(err, vec_exact, where=(vec_exact != 0))
def av_rel_error(vec_approx, vec_exact):
"""Compute the average relative error for two arrays of same shape."""
assert vec_approx.shape == vec_exact.shape, "Arrays must have the same shape"
return np.mean(rel_error(vec_approx, vec_exact))
def plot_case(config, case, ax):
color, marker = get_case_color_marker(case)
label = get_case_label(case) # HOTFIX: Update case label
eigvals_vivit = np.array(case["eigvals_vivit"])
eigvals_fb = np.array(case["eigvals_fb"])
for batch_idx in range(eigvals_vivit.shape[0]):
for checkpoint_idx in range(eigvals_vivit.shape[1]):
evals_vivit = eigvals_vivit[batch_idx, checkpoint_idx, :]
evals_fb = eigvals_fb[batch_idx, checkpoint_idx, :]
# Plot average relative error
cp = config["decimal_checkpoints"][checkpoint_idx] + 1
ax.plot(
cp,
av_rel_error(evals_vivit, evals_fb),
marker,
color=color,
ms=1.0,
label=label,
alpha=0.6,
)
label = None
def plot(config, plot_data, fig_path):
"""Plot all cases in one figure."""
fig, ax = plt.subplots()
for case in plot_data:
plot_case(config, case, ax)
ax.set_xlabel("epoch (log scale)")
ax.set_ylabel("av. rel. error (log scale)")
ax.set_xscale("log")
ax.set_yscale("log")
ax.tick_params( # Remove minor ticks
axis="y", which="minor", left=False, right=False, labelleft=False
)
# Ticks (shift epochs back, see above) and grid
xticks, xticklabels = get_xticks_labels(config["num_epochs"])
xticks = (np.array(xticks) + 1).astype(int).tolist()
ax.tick_params( # Remove minor ticks
axis="x", which="minor", bottom=False, top=False, labelbottom=False
)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
# Additional settings
ax.grid(which="major", ls="dashed", lw=0.4, dashes=(7, 7))
for axis in ["top", "bottom", "left", "right"]:
ax.spines[axis].set_visible(True)
ax.spines[axis].set_color("k")
ax.spines[axis].set_linewidth(0.4)
ax.xaxis.set_tick_params(width=0.5, direction="in", length=5)
ax.yaxis.set_tick_params(width=0.5, direction="in", length=5)
[t.set_color("k") for t in ax.xaxis.get_ticklabels()]
[t.set_color("k") for t in ax.yaxis.get_ticklabels()]
ncol = 2
if config["problem_cls"] in [testproblems.cifar100_allcnnc]:
ncol = 1
leg = ax.legend(ncol=ncol)
leg.get_frame().set_linewidth(0.5)
fig.savefig(fig_path)
plt.close(fig)
# ======================================================================================
# Main-function: Coordinate the computation of the plotting data and the figure
# ======================================================================================
if __name__ == "__main__":
# Parse command line argument
parser = get_config_str_parser()
args = parser.parse_args()
config_str = args.config_str
print(f"\nconfig_str = {config_str}")
config = config_str_to_config(config_str)
# Reduce number of checkpoints and number of repetitions
nof_reps = get_nof_reps(config)
fewer_checkpoints = get_fewer_checkpoints(config)
config["checkpoints"] = fewer_checkpoints
config = _add_decimal_checkpoints(config) # update decimal checkpoints
print(f"\nUsing nof_reps = {nof_reps} and {len(fewer_checkpoints)} checkpoints.")
# Set up and check cases
cases = get_cases(config)
if VERBOSE:
print(f"\ncases (total: {len(cases)}):")
for case in cases:
print(case)
# Compute plotting data if necessary
json_path = get_plot_savepath(config_str + "_plot_data", extension=".json")
if VERBOSE:
print(f"\nChecking for json file at {json_path}")
if not os.path.exists(json_path) or RECOMPUTE_DATA:
print("Computing plotting data.")
plot_data = eval_config(config, cases, json_path, nof_reps)
else:
print(f"Skipping computation. Using existing file {json_path}")
plot_data = load_json(json_path)
# Compute figure if necessary
fig_path = get_plot_savepath(config_str + "_plot", extension=".pdf")
if VERBOSE:
print(f"\nChecking for figure at {fig_path}")
if not os.path.exists(fig_path) or RECOMPUTE_FIG:
print("Computing figure.")
plot(config, plot_data, fig_path)
else:
print(f"Skipping computation. Using existing file {fig_path}")
| [
"utils_shared.eval_eigspace_pi",
"exp.utils.deepobs.get_deterministic_deepobs_train_loader",
"utils_shared.dump_json",
"numpy.array",
"torch.cuda.is_available",
"numpy.divide",
"plot_shared.get_case_color_marker",
"os.path.exists",
"utils_shared.get_deepobs_dataloader",
"utils_shared.tensor_to_lis... | [((1597, 1615), 'run_plot_eigvals_vivit_vs_fb.get_plot_savedir', 'get_plot_savedir', ([], {}), '()\n', (1613, 1615), False, 'from run_plot_eigvals_vivit_vs_fb import get_plot_savedir\n'), ((1627, 1675), 'os.path.join', 'os.path.join', (['savedir', 'f"""{file_name}{extension}"""'], {}), "(savedir, f'{file_name}{extension}')\n", (1639, 1675), False, 'import os\n'), ((3399, 3421), 'utils_shared.check_cases', 'check_cases', (['all_cases'], {}), '(all_cases)\n', (3410, 3421), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((6305, 6325), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (6322, 6325), False, 'import torch\n'), ((6352, 6415), 'exp.utils.deepobs.get_deterministic_deepobs_train_loader', 'get_deterministic_deepobs_train_loader', (['problem_cls', 'BATCH_SIZE'], {}), '(problem_cls, BATCH_SIZE)\n', (6390, 6415), False, 'from exp.utils.deepobs import get_deterministic_deepobs_train_loader\n'), ((6544, 6564), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (6561, 6564), False, 'import torch\n'), ((8681, 8710), 'utils_shared.dump_json', 'dump_json', (['results', 'json_path'], {}), '(results, json_path)\n', (8690, 8710), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((9118, 9148), 'numpy.abs', 'np.abs', (['(vec_approx - vec_exact)'], {}), '(vec_approx - vec_exact)\n', (9124, 9148), True, 'import numpy as np\n'), ((9160, 9207), 'numpy.divide', 'np.divide', (['err', 'vec_exact'], {'where': '(vec_exact != 0)'}), '(err, vec_exact, where=vec_exact != 0)\n', (9169, 9207), True, 'import numpy as np\n'), ((9519, 9546), 'plot_shared.get_case_color_marker', 'get_case_color_marker', (['case'], {}), '(case)\n', (9540, 9546), False, 'from plot_shared import get_case_color_marker, get_xticks_labels\n'), ((9559, 9579), 'utils_shared.get_case_label', 'get_case_label', (['case'], {}), '(case)\n', (9573, 9579), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((9630, 9661), 'numpy.array', 'np.array', (["case['eigvals_vivit']"], {}), "(case['eigvals_vivit'])\n", (9638, 9661), True, 'import numpy as np\n'), ((9679, 9707), 'numpy.array', 'np.array', (["case['eigvals_fb']"], {}), "(case['eigvals_fb'])\n", (9687, 9707), True, 'import numpy as np\n'), ((10429, 10443), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10441, 10443), True, 'from matplotlib import pyplot as plt\n'), ((10847, 10886), 'plot_shared.get_xticks_labels', 'get_xticks_labels', (["config['num_epochs']"], {}), "(config['num_epochs'])\n", (10864, 10886), False, 'from plot_shared import get_case_color_marker, get_xticks_labels\n'), ((11841, 11855), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11850, 11855), True, 'from matplotlib import pyplot as plt\n'), ((12191, 12214), 'utils_shared.get_config_str_parser', 'get_config_str_parser', ([], {}), '()\n', (12212, 12214), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((12334, 12366), 'config.config_str_to_config', 'config_str_to_config', (['config_str'], {}), '(config_str)\n', (12354, 12366), False, 'from config import _add_decimal_checkpoints, config_str_to_config\n'), ((12578, 12610), 'config._add_decimal_checkpoints', '_add_decimal_checkpoints', (['config'], {}), '(config)\n', (12602, 12610), False, 'from config import _add_decimal_checkpoints, config_str_to_config\n'), ((1704, 1723), 'torch.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (1721, 1723), False, 'from torch import cuda, device\n'), ((3374, 3394), 'utils_shared.get_case_label', 'get_case_label', (['case'], {}), '(case)\n', (3388, 3394), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((5187, 5290), 'utils_shared.eval_eigspace_vivit', 'eval_eigspace_vivit', (['case', 'model', 'loss_function', 'batch_data', 'top_C'], {'device': 'DEVICE', 'verbose': 'VERBOSE'}), '(case, model, loss_function, batch_data, top_C, device=\n DEVICE, verbose=VERBOSE)\n', (5206, 5290), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((6803, 6834), 'torch.zeros_like', 'torch.zeros_like', (['eigvals_vivit'], {}), '(eigvals_vivit)\n', (6819, 6834), False, 'import torch\n'), ((8561, 8590), 'utils_shared.tensor_to_list', 'tensor_to_list', (['eigvals_vivit'], {}), '(eigvals_vivit)\n', (8575, 8590), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((8620, 8646), 'utils_shared.tensor_to_list', 'tensor_to_list', (['eigvals_fb'], {}), '(eigvals_fb)\n', (8634, 8646), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((13368, 13388), 'utils_shared.load_json', 'load_json', (['json_path'], {}), '(json_path)\n', (13377, 13388), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((5429, 5569), 'utils_shared.eval_eigspace_pi', 'eval_eigspace_pi', (['model', 'loss_function', 'batch_data', 'top_C'], {'device': 'DEVICE', 'check_deterministic': 'CHECK_DETERMINISTIC_BATCH', 'verbose': 'VERBOSE'}), '(model, loss_function, batch_data, top_C, device=DEVICE,\n check_deterministic=CHECK_DETERMINISTIC_BATCH, verbose=VERBOSE)\n', (5445, 5569), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((6592, 6639), 'utils_shared.get_deepobs_dataloader', 'get_deepobs_dataloader', (['problem_cls', 'BATCH_SIZE'], {}), '(problem_cls, BATCH_SIZE)\n', (6614, 6639), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((7227, 7282), 'eval.load_checkpoint', 'load_checkpoint', (['problem_cls', 'optimizer_cls', 'checkpoint'], {}), '(problem_cls, optimizer_cls, checkpoint)\n', (7242, 7282), False, 'from eval import load_checkpoint\n'), ((7658, 7793), 'vivit.hessianfree.GGNLinearOperator', 'GGNLinearOperator', (['model', 'loss_function', 'training_dataloader', 'DEVICE'], {'progressbar': '(False)', 'check_deterministic': 'CHECK_DETERMINISTIC_FB'}), '(model, loss_function, training_dataloader, DEVICE,\n progressbar=False, check_deterministic=CHECK_DETERMINISTIC_FB)\n', (7675, 7793), False, 'from vivit.hessianfree import GGNLinearOperator\n'), ((13111, 13136), 'os.path.exists', 'os.path.exists', (['json_path'], {}), '(json_path)\n', (13125, 13136), False, 'import os\n'), ((13579, 13603), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (13593, 13603), False, 'import os\n'), ((8399, 8452), 'utils_shared.directional_derivatives', 'directional_derivatives', (['fb_ggn', 'evecs_approx', 'DEVICE'], {}), '(fb_ggn, evecs_approx, DEVICE)\n', (8422, 8452), False, 'from utils_shared import check_cases, directional_derivatives, dump_json, eval_eigspace_pi, eval_eigspace_vivit, get_case_label, get_config_str_parser, get_deepobs_dataloader, load_json, tensor_to_list\n'), ((10901, 10917), 'numpy.array', 'np.array', (['xticks'], {}), '(xticks)\n', (10909, 10917), True, 'import numpy as np\n')] |
import numpy as np
from flask import Flask, request, jsonify, render_template, url_for
import pickle
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
standard_x = StandardScaler()
model = pickle.load(open('vadodara_house_model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
standard_value = standard_x.fit_transform(final_features)
prediction = model.predict(standard_x.transform(standard_value))
output = round(prediction[0], 2)
return render_template('index.html', prediction_text='Estimated House Price should be ₹ {}'.format(output))
@app.route('/predict_api', methods=['POST'])
def predict_api():
'''
For direct API calls through request
'''
data = request.get_json(force=True)
standard_value = standard_x.fit_transform([np.array(list(data.values()))])
prediction = model.predict(standard_x.transform(standard_value))
output = prediction[0]
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True)
| [
"flask.render_template",
"flask.Flask",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"flask.request.get_json",
"flask.request.form.values",
"flask.jsonify"
] | [((157, 172), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (162, 172), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((230, 246), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (244, 246), False, 'from sklearn.preprocessing import StandardScaler\n'), ((348, 377), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (363, 377), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((1011, 1039), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (1027, 1039), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((1227, 1242), 'flask.jsonify', 'jsonify', (['output'], {}), '(output)\n', (1234, 1242), False, 'from flask import Flask, request, jsonify, render_template, url_for\n'), ((571, 593), 'numpy.array', 'np.array', (['int_features'], {}), '(int_features)\n', (579, 593), True, 'import numpy as np\n'), ((526, 547), 'flask.request.form.values', 'request.form.values', ([], {}), '()\n', (545, 547), False, 'from flask import Flask, request, jsonify, render_template, url_for\n')] |
import nnef
import os, fnmatch
import argparse
import struct
from bitarray import bitarray
import numpy as np
import string
'''
Compiler for 3PXNet. Compiles a neural network stored in NNEF format to C using inference engine.
'''
'''
NOTIFICATIONS:
variablen is a dictionary, its keys are variable_# and value is a list, [non-pruned inputs, the operation object whose output name is variable_#]
variables is also a dictionary, its keys are variable_# and value is the file name of this data
batchn is also a dictionary, its keys are indices of graph object, where there is a batchnorm operation, the values are variable_#, who are input parameter to this operation
The workflow is like this: given a graph, first this program will read all of its operations and determine whether a given operation is able to be compiled or not.
Then it reads the data files and put the values into header files, i.e., decoding_data. After that, threshold and sign needed for some batchnorm layers are computed.
Then it starts writing source file. total_ops stores indices of graph where matrix multiplication, whether conv or fc, takes place.
To decide whether there is a batchnorm to one layer or not, it look ahead for another matrix multiplication, if there is a batchnorm operation between these two, then there will be a
batchnorm, and vice versa.
'''
class convert(object):
def __init__(self,input_dir,dataset,test_start_id,test_end_id):
'''
initialize a convert object
:param input_dir: the input directory, its name should end with .nnef
:param dataset: dataset to test against
:param test_start_id: testing dataset start index
:param test_end_id: testing dataset end index
'''
self.input_dir=input_dir
self.dataset=dataset
self.test_start_id=int(test_start_id)
self.test_end_id=int(test_end_id)
self.graph=nnef.Graph
# batch norm variables
self.var = {}
self.mean = {}
self.gamma = {}
self.beta = {}
# store variables with their names as keys
# for specific information, please see NOTIFICATIONS above
self.variablen = {}
self.variables = {}
# store index of propagation in the graph
self.matrixmul = []
self.conv = []
self.batchn = {}
#input shape
self.in_shape = []
self.rank = []
# which batch norm layer is the last one
self.batch_last=" "
# source code we are writing to
self.source=0
#permutation list. For specific information, please see training engine
#as well as the paper the whole project is based on.
self.list=[]
self.tempweight=[]
self.tempsparse=False
self.tempoutput=0
self.name=" "
self.lastlist=[]
self.tempvar=" "
self.tempmean=""
self.tempgamma=""
self.tempbeta=""
def replace_non_ascii(self,stri):
'''
Replace all non ascii, including . , in the file name to _
:param stri: input string
:return: the input string with non-character or non-digit being replaced by _
'''
return ''.join([i if i in string.ascii_letters or i in string.digits else '_' for i in stri])
def search_non_ascii(self,stri):
'''
Search for the first letter that is not letter or digit
Needed for determine last layer of batch norm
:param stri: input string
:return: the first index of non-character or non-digit char
'''
for i in range(len(stri)):
if not (stri[i] in string.ascii_letters or stri[i] in string.digits):
return i
def loadgraph(self):
'''
load the nnef graph into compiler
'''
print(self.input_dir)
os.chdir(self.input_dir)
if "graph.nnef" not in os.listdir("."):
print("ERROR: BAD NNEF DIRECTORY!")
exit()
else:
self.graph = nnef.load_graph('graph.nnef')
print("Graph loaded")
def find_batch_last(self):
'''
Determines the last layer
The last layer will not be binarized, so batchnorm has to be delt with differently
Requires NNEF graph to be loaded (using loadgraph())
:return: NA
'''
#find out the last matrix multiplication or convolution operation
batch_last = next(i for i in reversed(range(len(self.graph.operations))) if
self.graph.operations[i].name == 'matmul' or self.graph.operations[i].name == 'conv')
# If True, last layer is batch norm, otherwise false
lastBnFound=False
#if there is batch norm after the last matmul or conv, then that is the last batch norm layer
for i in range(batch_last,len(self.graph.operations)):
if self.graph.operations[i].name=='batch_normalization':
for ops in self.graph.operations:
# Get the variable which is the input to the batch_normalization layer
if ops.outputs['output']==self.graph.operations[i].inputs['mean']: #get the name for the last batcch norm layer
batch_last=ops.attribs['label']
lastBnFound=True
break
if lastBnFound:
# If found, save the label for the last batchnorm layer
self.batch_last=batch_last[0:self.search_non_ascii(batch_last)] #cutoff the ".mean" part from batch_last
else:
self.batch_last=" "
def write_source_first(self):
'''
Write to the source file include headers for variables
:return: NA
'''
os.chdir("../3pxnet-compiler")
if "autogen" not in os.listdir("."):
os.mkdir("autogen")
os.chdir("autogen")
source = open("source.c", 'w+')
self.source=source
# Write generic headers
source.write("#include <stdio.h>\n")
source.write("#include <stdlib.h>\n")
source.write("#include <stdint.h>\n")
source.write("#include <string.h>\n")
source.write("#include <math.h>\n")
source.write("#include <time.h>\n")
source.write("#include <errno.h>\n")
# Write model specific headers
# TODO: don't include headers that are not neeed for a particular model
source.write("#include \"datatypes.h\"\n")
source.write("#include \"utils.h\"\n")
source.write("#include \"xnor_base.h\"\n")
source.write("#include \"xnor_fc.h\"\n")
source.write("#include \"3pxnet_fc.h\"\n")
source.write("#include \"3pxnet_cn.h\"\n")
source.write("#include \"xnor_fc.h\"\n")
source.write("#include \"bwn_dense_cn.h\"\n")
os.chdir("..")
os.chdir(self.input_dir)
def writefc(self, write, rank, temp_array, sparse, output, name):
'''
Write fc layer's data into C headers
:param write: whether to write or not(related to permutation issue)
:param rank: weight's shape
:param temp_array: weight data
:param sparse: whether the layer is sparse or not
:param output: IO object, corresponding to the header file it's writing to
:param name: name of the header file
:return: indices: if it is a sparse layer, indices are used to calculate # non-pruned inputs
'''
indices = []
if write:
print("Writing to header " + name + ".h ...")
output.write("#define _" + name + " {\\\n")
# NNEF format weight values are stored in row-major order.
# So for a fc layer, its shape is [input, output]
for i in range(rank[1]):
# outtemp is used to store packs
# mask is used to check whether a given pack is all zero
outtemp = bitarray()
mask = bitarray()
for j in range(rank[0]):
temp = temp_array[j, i]
if temp >= 0:
outtemp.append(1)
else:
outtemp.append(0)
mask.append(temp == 0)
if j % 32 == 31:
if sparse:
# a pack is all zero
if int(mask.to01(), 2) == 2 ** 32 - 1:
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
indices.append(int(j % rank[0] / 32))
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
outtemp = bitarray()
mask = bitarray()
if write:
output.write("}\n")
if sparse:
output.write("#define _" + name + "_indices {\\\n")
for i in range(len(indices)):
output.write(str(indices[i]) + ", \\\n")
output.write("}\n")
output.close()
return indices
def writecn(self, write, rank, temp_array, sparse, output, name):
'''
Write conv layer's data into C headers
The same as fc layer, NNEF format stores value in row-major order
So for a conv layer, the shape is [n,z,y,x]
But, I modified this order during decoding data time.
So now the input rank has a shape [x,y,z,n]
:param write: whether to write or not(related to permutation issue)
:param rank: weight's shape
:param temp_array: weight data
:param sparse: whether the layer is sparse or not
:param output: IO object, corresponding to the header file it's writing to
:param name: name of the header file
:return: indices: if it is a sparse layer, indices are used to calculate # non-pruned inputs
'''
indices = []
if write:
print("Writing to header " + name + '.h ...')
output.write("#define _" + name + " {\\\n")
for n in range(rank[0]):
# outtemp is used to store packs
# mask is used to check whether a given pack is all zero
outtemp = bitarray()
mask = bitarray()
for y in range(rank[2]):
for x in range(rank[3]):
for z in range(rank[1]):
temp = temp_array[x, y, z, n]
if temp >= 0:
outtemp.append(1)
else:
outtemp.append(0)
mask.append(temp == 0)
if z % 32 == 31:
if sparse:
# a pack is all zero
if int(mask.to01(), 2) == 2 ** 32 - 1:
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
indices.append(int(z / 32) + x * int(rank[1] / 32) + y * rank[3] * int(
rank[1] / 32))
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
outtemp = bitarray()
mask = bitarray()
if write:
output.write("}\n")
if sparse:
output.write("#define _" + name + "_indices {\\\n")
for i in range(len(indices)):
output.write(str(indices[i]) + ", \\\n")
output.write("}\n")
output.close()
return indices
def decoding_data(self, input, output, name, last, identity, first):
'''
Processing a given .dat file stored in NNEF format
To be specific, a NNEF formatted neural network contains a .graph file and several .dat files.
This function deals with a given .dat file. it first reads in specifications of this file, such as
Its length and its shape. Then it will translate weights stored in binary in this .dat file into packs or digits.
The actual writing-to-header process is done by writecn and writefc functions.
:param input: IO object, corresponding to the .dat file it's reading from
:param output: IO object, corresponding to the header file it's writing to
:param name: name of the header file
:param last: whether the given .dat file is corresponded with the last batch norm layer
:param identity: whether the given .dat file contains values for conv/fc/batchnorm layer
:param first: whether it is the first matrix operations in the graph. Needed for permutation issue
:return: if the input file is a fc or cn layer and it's sparse, then non-pruned inputs number is returned.
if the input file is a batch norm layer and it's not the last one, a list with all its values are returned.
otherwise, return 0
'''
# Skip NNEF header
input.read(4)
# Length of the data in bytes
length = int.from_bytes(input.read(4), byteorder='little')
# Number of dimensions the data
rank_n = int.from_bytes(input.read(4), byteorder='little')
rank = [] # n,z,y,x
# Determine layer type
batch = (identity == 0)
fc = (identity == 1)
cn = (identity == 2)
# Get dimension sizes
for i in range(0, rank_n):
rank.append(int.from_bytes(input.read(4), byteorder='little'))
# Skip padding
input.read((8 - rank_n) * 4)
bits_per_item = int.from_bytes(input.read(4), byteorder='little')
input.read(2)
size = int(bits_per_item / 8)
# interpret as float or int
# Variables used for quantization
algo = int.from_bytes(input.read(2), byteorder='big')
signess = int.from_bytes(input.read(4), byteorder='little')
# TODO: more about linear and log quantize later
# reference: https://www.khronos.org/registry/NNEF/specs/1.0/nnef-1.0.2.html#container-structure
input.seek(128, 0)
# start reading data
# Flag for sparse operations
sparse = False
indices = []
result = []
# fc needs to be packed in column-major order
if fc:
# Holds decoded weight values
temp_array = np.zeros((rank[0], rank[1]))
for i in range(rank[0]):
for j in range(rank[1]):
temp = list(input.read(size))
# changing endianess
for b in range(0, int(len(temp) / 2)):
temp1 = temp[b]
temp[b] = temp[len(temp) - b - 1]
temp[len(temp) - b - 1] = temp1
temp = bytes(temp)
# decode as float
# If there is a zero, treat as sparse
if struct.unpack('!f', temp)[0] == 0:
sparse = True
temp_array[i, j] = struct.unpack('!f', temp)[0]
# permutation
os.chdir('..')
os.chdir(self.input_dir)
# True if permutation is required
flag = False
for root, dirs, files in os.walk("."):
for name1 in files:
if fnmatch.fnmatch(name1.replace('_', ''), name.replace('weight', 'list.npy').replace('_', '')):
print("Permuting...")
flag = True
temp_weight = np.zeros((rank[0], rank[1]))
permute_list = np.load(name1)
if first:
self.list = permute_list
# permute input channel for current layer so that we can pack weights
for i in range(rank[0]):
temp_weight[i, 0:] = np.copy(temp_array[permute_list[0, i], 0:])
# permute output channel for last layer so that channels match
if len(self.tempweight) != 0:
tt = np.copy(self.tempweight)
if len(tt.shape) == 4:
for j in range(tt.shape[3]):
self.tempweight[0:, 0:, 0:, j] = np.copy(tt[0:, 0:, 0:, permute_list[0, j]])
self.writecn(True, [tt.shape[3], tt.shape[2], tt.shape[1], tt.shape[0]], self.tempweight,
self.tempsparse, self.tempoutput, self.name)
else:
for i in range(rank[0]):
self.tempweight[0:, i] = np.copy(tt[0:, permute_list[0, i]])
self.writefc(True, tt.shape, self.tempweight, self.tempsparse, self.tempoutput, self.name)
# permute the last batch layer as well
tt = np.copy(self.var[self.tempvar])
for i in range(rank[0]):
self.var[self.tempvar][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.mean[self.tempmean])
for i in range(rank[0]):
self.mean[self.tempmean][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.gamma[self.tempgamma])
for i in range(rank[0]):
self.gamma[self.tempgamma][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.beta[self.tempbeta])
for i in range(rank[0]):
self.beta[self.tempbeta][i] = np.copy(tt[permute_list[0, i]])
temp_array = temp_weight
# save this layer's state so that later we can permute its output channel
self.tempweight = temp_array
self.tempoutput = output
self.tempsparse = sparse
self.name = name
self.lastlist = permute_list
break
# if there is nothing to be permuted, meaning this layer is not on the temp state in this class
# so we directly write them into header file
# otherwise, wait for it to be permuted by next layer
if flag:
indices = self.writefc(False, rank, temp_array, sparse, output, name)
else:
indices = self.writefc(True, rank, temp_array, sparse, output, name)
os.chdir("../3pxnet-compiler/autogen")
elif cn:
# first layer in a cnn
# it uses binarized dense layer, so we don't pack it
if rank[1] % 32 != 0:
output.write("#define _" + name + " {\\\n")
temp_array = np.zeros((rank[0], rank[1], rank[2], rank[3]))
for n in range(rank[0]):
for z in range(rank[1]):
for y in range(rank[2]):
for x in range(rank[3]):
temp = list(input.read(size))
# changing endianess
for b in range(0, int(len(temp) / 2)):
temp1 = temp[b]
temp[b] = temp[len(temp) - b - 1]
temp[len(temp) - b - 1] = temp1
temp = bytes(temp)
if struct.unpack('!f', temp)[0] == 0:
sparse = True
temp_array[n, z, y, x] = struct.unpack('!f', temp)[0]
print("Sparse?: " + str(sparse))
for n in range(rank[0]):
for y in range(rank[2]):
for x in range(rank[3]):
for z in range(rank[1]):
temp = temp_array[n, z, y, x]
output.write(str(int(temp)) + ", ")
output.write('\\\n')
output.write("}\n")
output.close()
# other conv layers in a cnn
else:
temp_array = np.zeros((rank[3], rank[2], rank[1], rank[0]))
for n in range(rank[0]):
for z in range(rank[1]):
for y in range(rank[2]):
for x in range(rank[3]):
temp = list(input.read(size))
# changing endianess
for b in range(0, int(len(temp) / 2)):
temp1 = temp[b]
temp[b] = temp[len(temp) - b - 1]
temp[len(temp) - b - 1] = temp1
temp = bytes(temp)
if struct.unpack('!f', temp)[0] == 0:
sparse = True
temp_array[x, y, z, n] = struct.unpack('!f', temp)[0]
print("Sparse?: " + str(sparse))
# permutation
os.chdir('..')
os.chdir(self.input_dir)
flag = False
for root, dirs, files in os.walk("."):
for name1 in files:
if fnmatch.fnmatch(name1.replace('_', ''), name.replace('weight', 'list.npy').replace('_', '')):
print("Permuting...")
flag = True
temp_weight = np.zeros((rank[3], rank[2], rank[1], rank[0]))
permute_list = np.load(name1)
if first:
self.list = permute_list
# permute input channel of current layer
for j in range(rank[0]):
for i in range(rank[1]):
temp_weight[0:, 0:, i, j] = np.copy(temp_array[0:, 0:, permute_list[0, i], j])
# permute output channel of last layer
# since it's not possible to have a fc layer before a conv layer,
# we don't consider that case here.
if len(self.tempweight) != 0:
tt = np.copy(self.tempweight)
for j in range(tt.shape[3]):
self.tempweight[0:, 0:, 0:, j] = np.copy(tt[0:, 0:, 0:, permute_list[0, j]])
self.writecn(True, [tt.shape[3], tt.shape[2], tt.shape[1], tt.shape[0]],
self.tempweight, self.tempsparse, self.tempoutput, self.name)
# permute the last batch layer as well
tt = np.copy(self.var[self.tempvar])
for i in range(rank[0]):
self.var[self.tempvar][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.mean[self.tempmean])
for i in range(rank[0]):
self.mean[self.tempmean][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.gamma[self.tempgamma])
for i in range(rank[0]):
self.gamma[self.tempgamma][i] = np.copy(tt[permute_list[0, i]])
tt = np.copy(self.beta[self.tempbeta])
for i in range(rank[0]):
self.beta[self.tempbeta][i] = np.copy(tt[permute_list[0, i]])
temp_array = temp_weight
# save this layer's state so that later we can permute its output channel
self.tempweight = temp_array
self.tempoutput = output
self.tempsparse = sparse
self.name = name
self.lastlist = permute_list
break
if flag:
indices = self.writecn(False, rank, temp_array, sparse, output, name)
else:
indices = self.writecn(True, rank, temp_array, sparse, output, name)
os.chdir("../3pxnet-compiler/autogen")
# batchnorm
else:
if last:
print("Writing to header " + name + ".h ...")
for i in range(int(length / size)):
# One great feature of NNEF is it doesn't use many concrete data types. Therefore, there are several
# encoding algorithms provided. Since current training engine will not train weights whose data types
# are not float, this converter does not support any other encoding algorithm
# TODO: depending on encoding algorithm, theoretically we should decode numbers in different ways
# TODO: more support for this later
# reference: https://www.khronos.org/registry/NNEF/specs/1.0/nnef-1.0.2.html#container-structure
if algo == 0:
temp = list(input.read(size))
# changing endianess
for j in range(0, int(len(temp) / 2)):
temp1 = temp[j]
temp[j] = temp[len(temp) - j - 1]
temp[len(temp) - j - 1] = temp1
temp = bytes(temp)
if last and "var" in name:
# what we really need is standard deviation, not variance
# because it will be considered in inference
output.write(str(np.sqrt(struct.unpack('!f', temp)[0])) + ", \\\n")
elif last:
output.write(str(struct.unpack('!f', temp)[0]) + ", \\\n")
else:
result.append(struct.unpack('!f', temp)[0])
elif algo == 1 and signess == 0:
output.write(str(int.from_bytes(input.read(size), byteorder='little')) + ", \\\n")
else:
output.write(str(int.from_bytes(input.read(size), byteorder='little', signed=True)) + ", \\\n")
if last:
output.write("}\n")
if batch:
return result
# return non-pruned input number
elif sparse and fc:
return int(32 * len(indices) / rank[1])
elif sparse and cn:
return int(32 * len(indices) / rank[0])
else:
return 0
def processing_graph(self):
'''
For every operation in the graph, determine whether we can translate into C using inference engine or not
If not, then there will be WARNING or ERROR printed on screen
If we can, then corresponding data files are decoded and written
It uses a lot of dictionary type data structures. For detailed information, please see NOTIFICATIONS
:return: NA
'''
rank=[]
i=0
# NNEF loaded graph.operations is guaranteed to be in order
for ops in self.graph.operations:
print("-----------------------------------------")
print("Operation #"+str(i)+": Start")
print("Operation name: "+ops.name)
#a convolutional layer
if ops.name =='conv':
#the convolution filter/kernel
mat=ops.inputs['filter']
for t in self.graph.operations:
#find out which file is its data
if t.outputs['output']==mat:
self.variablen[t.outputs['output']]=[] #t.outputs['output'] has form variable_xx
print("Reading weight data from "+t.attribs['label']+".dat ...")
ma=open(t.attribs['label']+'.dat','rb')
os.chdir("../3pxnet-compiler/autogen")
# Create C header file
# Replace dots with slashes
head=open(self.replace_non_ascii(t.attribs['label'])+'.h','w+')
npi=self.decoding_data(ma,head,self.replace_non_ascii(t.attribs['label']),False,2,
len(self.matrixmul)==0 and len(self.conv)==0)
# For dense, npi=0
if npi!=0:
print("Packs per kernel #: "+str(int(npi/32)))
# Store weight information
self.variablen[t.outputs['output']].append(npi)
self.variablen[t.outputs['output']].append(t)
self.source.write("#include \""+self.replace_non_ascii(t.attribs['label'])+'.h'+"\" \n")
self.variables[t.outputs['output']]=t.attribs['label']
ma.close()
os.chdir("..")
os.chdir(self.input_dir)
assert len(rank)==4
print("Padding: "+str(ops.attribs['padding'][0][0]))
#update current data shape
rank[3]=rank[3]+2*ops.attribs['padding'][0][0]-t.attribs['shape'][3]+1
rank[2] = rank[2] + 2 * ops.attribs['padding'][0][0] - t.attribs['shape'][2] + 1
rank[1] = t.attribs['shape'][0]
rank[0]=1
if ops.attribs['stride']!=[1,1]:
print("ERROR: current 3PXNet does not support stride")
exit()
break
self.conv.append(i)
#a fc layer
elif ops.name=='matmul':
#the kernel of fc layer
mat=ops.inputs['B']
for t in self.graph.operations:
if t.outputs['output']==mat:
self.variablen[t.outputs['output']]=[]
ma=open(t.attribs['label']+'.dat','rb')
os.chdir("../3pxnet-compiler/autogen")
head=open(self.replace_non_ascii(t.attribs['label'])+'.h','w+')
print("Reading weight data from " + t.attribs['label'] + ".dat ...")
npi=self.decoding_data(ma,head,self.replace_non_ascii(t.attribs['label']),False,1,
len(self.matrixmul)==0 and len(self.conv)==0)
if npi!=0:
print("Packs per kernel #: "+str(int(npi/32)))
self.variablen[t.outputs['output']].append(npi)
self.variablen[t.outputs['output']].append(t)
self.source.write("#include \""+self.replace_non_ascii(t.attribs['label'])+'.h'+"\" \n")
self.variables[t.outputs['output']]=t.attribs['label']
ma.close()
os.chdir("..")
os.chdir(self.input_dir)
assert len(rank)==2
rank[1]=t.attribs['shape'][1]
rank[0]=rank[0]
break
self.matrixmul.append(i)
#externally imported data, currently treated as input
elif ops.name=='external':
print("externally imported data, currently treated as input")
print("WARNING: if it is not used as input, there will be errors.")
self.in_shape=ops.attribs['shape']
rank=self.in_shape.copy()
# batch norm
elif ops.name=='batch_normalization':
mat=ops.inputs['mean']
last=False
#determine the last batch layer
for t in self.graph.operations:
if 'output' in t.outputs.keys() and t.outputs['output']==mat and t.attribs['label'].find(self.batch_last,0,-1) !=-1:
last=True
break
print("Is the last batch normalization layer: "+str(last))
#if it is the last batch norm layer, write out everything into the header file
if last:
for b in range(4):
if b ==0:
mat=ops.inputs['mean']
elif b ==1:
mat=ops.inputs['variance']
elif b ==2:
mat=ops.inputs['offset']
else :
mat=ops.inputs['scale']
for t in self.graph.operations:
if t.outputs['output']==mat:
assert t.attribs['shape'][0]==rank[0]
assert t.attribs['shape'][1] == rank[1]
self.variablen[t.outputs['output']]=[]
ma=open(t.attribs['label']+'.dat','rb')
os.chdir("../3pxnet-compiler/autogen")
print("Reading weight data from " + t.attribs['label'] + ".dat ...")
head=open(self.replace_non_ascii(t.attribs['label'])+'.h','w+')
head.write("#define _"+self.replace_non_ascii(t.attribs['label'])+" {\\\n")
self.decoding_data(ma,head,self.replace_non_ascii(t.attribs['label']),True,0,False)
head.close()
self.source.write("#include \""+self.replace_non_ascii(t.attribs['label'])+'.h'+"\" \n")
self.variables[t.outputs['output']]=t.attribs['label']
ma.close()
os.chdir("..")
os.chdir(self.input_dir)
break
#else, collect all four data needed to computer threshold and sign
else:
for b in range(4):
if b ==0:
mat=ops.inputs['mean']
elif b ==1:
mat=ops.inputs['variance']
elif b ==2:
mat=ops.inputs['offset']
else :
mat=ops.inputs['scale']
for t in self.graph.operations:
if t.outputs['output']==mat:
assert t.attribs['shape'][0] == rank[0]
assert t.attribs['shape'][1] == rank[1]
self.variablen[t.outputs['output']]=[]
ma=open(t.attribs['label']+'.dat','rb')
os.chdir("../3pxnet-compiler/autogen")
if b==0:
print("Reading weight data from " + t.attribs['label'] + ".dat ...")
self.mean[t.outputs['output']]=self.decoding_data(
ma,None,self.replace_non_ascii(t.attribs['label']),False,0,False)
self.tempmean = t.outputs['output']
elif b==1:
print("Reading weight data from " + t.attribs['label'] + ".dat ...")
self.var[t.outputs['output']]=self.decoding_data(
ma,None,self.replace_non_ascii(t.attribs['label']),False,0,False)
self.tempvar = t.outputs['output']
elif b==2:
print("Reading weight data from " + t.attribs['label'] + ".dat ...")
self.beta[t.outputs['output']]=self.decoding_data(
ma,None,self.replace_non_ascii(t.attribs['label']),False,0,False)
self.tempbeta = t.outputs['output']
else :
print("Reading weight data from " + t.attribs['label'] + ".dat ...")
self.gamma[t.outputs['output']]=self.decoding_data(
ma,None,self.replace_non_ascii(t.attribs['label']),False,0,False)
self.tempgamma = t.outputs['output']
self.variables[t.outputs['output']]=t.attribs['label']
ma.close()
os.chdir("..")
os.chdir(self.input_dir)
break
self.batchn[i]=[]
self.batchn[i].append(ops.inputs['mean'])
self.batchn[i].append(ops.inputs['variance'])
self.batchn[i].append(ops.inputs['offset'])#bias,beta
self.batchn[i].append(ops.inputs['scale'])#weight,gamma
#if pooling
elif ops.name=='max_pool':
rank[3]=int(rank[3]/ops.attribs['size'][3])
rank[2]=int(rank[2]/ops.attribs['size'][2])
rank[1] = int(rank[1] / ops.attribs['size'][1])
rank[0] = int(rank[0] / ops.attribs['size'][0])
#clamp is considered as binarize output. If it is not used in this way, error would be given
elif ops.name=='clamp':
print("WARNING: clamp is considered as binarize output. If it is not used in this way, error would be given")
if ops.inputs['a']!=-1 or ops.inputs['b']!=1:
print("ERROR: 3PXNet inference library only holds 1 or -1.")
exit()
#current library does not have reshape function. so if the reshaped dimension is not found in the
#current shape, then error would be given
elif ops.name=='reshape':
rank_flag=False
temp=1
for r in range(len(rank)):
temp*=rank[r]
for r in range(len(ops.attribs['shape'])):
if temp==ops.attribs['shape'][r]:
rank_flag=True
if not rank_flag:
print("ERROR: current 3PXNet library does not support reshaping")
exit()
rank=ops.attribs['shape']
for r in range(len(rank)):
if rank[r]==-1:
rank[r]=1
#softmax is always added at the end. If not, then the compiler would only give an error
elif ops.name=='softmax':
print("WARNING: current 3PXNet library does not support softmax function")
#these three operations don't have impact on performance
elif ops.name=='squeeze':
for r in ops.attribs['axes']:
del rank[r]
print("Squeeze has no effect on inference, therefore it is skipped")
elif ops.name=='unsqueeze' :
for r in ops.attribs['axes']:
rank.insert(r,1)
print("Unsqueeze has no effect on inference, therefore it is skipped")
elif ops.name=='variable':
print('Define '+ops.attribs['label']+ ' as '+ops.outputs['output'])
i+=1
continue
#same as softmax, if it's not used in the end, error would be given
elif ops.name=='log' :
if ops!=self.graph.operations[-1]:
print("ERROR: current 3PXNet does not support log function")
exit()
else:
print("WARNING: current 3PXNet does not support log function, but it doesn't affect the result")
elif ops.name=='slice':
rank[ops.attribs['axes'][0]]=ops.attribs['end'][0]
print("WARNING: slice operation is skipped. If this is not operating on the input, the result will be wrong")
else:
print("ERROR: current 3PXNet does not support "+ops.name+" function")
exit()
print("Operation output shape:",end=' ')
print(rank)
i+=1
def write_last_layer(self):
if len(self.tempweight) != 0:
tt = np.copy(self.tempweight)
if len(tt.shape) == 4:
self.writecn(True, [tt.shape[3], tt.shape[2], tt.shape[1], tt.shape[0]],
self.tempweight, self.tempsparse, self.tempoutput, self.name)
else:
self.writefc(True, tt.shape, self.tempweight, self.tempsparse, self.tempoutput,
self.name)
def calculate_batch(self):
'''
Calculate batch normalization threshold and signs
:return: NA
'''
os.chdir("../3pxnet-compiler/autogen")
if len(self.var.keys()) != len(self.mean.keys()) or len(self.var.keys()) != len(self.gamma.keys()) or \
len(self.var.keys()) != len(self.beta.keys()):
print("error with batch normalization number")
exit()
thresh={}
sign={}
k=0
#calculate threshold and sign
#variables: map a variable_# to its name stored in nnef directory
for i in self.batchn.keys():
if self.variables[self.batchn[i][0]][0:self.search_non_ascii(self.variables[self.batchn[i][0]])] == self.batch_last:
continue
temp=bitarray()
epsilon=[self.graph.operations[i].attribs['epsilon']]*len(self.var[self.batchn[i][1]])
thresh[k]=[]
sign[k]=[]
for j in range(len(self.var[self.batchn[i][1]])):
thresh[k].append(self.mean[self.batchn[i][0]][j]-np.sqrt(self.var[self.batchn[i][1]][j]+epsilon[j])/
self.gamma[self.batchn[i][3]][j]*self.beta[self.batchn[i][2]][j])
for j in range(len(self.var[self.batchn[i][1]])):
temp.append(int(self.gamma[self.batchn[i][3]][j]>0))
if j%32 == 31 :
sign[k].append(str("0x%x" % int(temp.to01(), 2)))
temp=bitarray()
head=open("bn"+str(k+1)+".h",'w+')
head.write("#define bn"+str(k+1)+"_thresh {\\\n")
for j in range(len(self.var[self.batchn[i][1]])):
head.write(str(thresh[k][j])+", \\\n")
head.write("} \n")
head.write("#define bn"+str(k+1)+"_sign {\\\n")
for j in range(int(len(self.var[self.batchn[i][1]])/32)):
head.write(str(sign[k][j])+", \\\n")
head.write("} \n")
self.source.write("#include \"bn"+str(k+1)+".h\" \n")
k+=1
def testsparse(self,i,total_ops):
'''
Test sparsity.
The way to do this is to search for the word "indices" in a given header file
:param i: index of current operation in "total_ops" list
:param total_ops: a list of indices of all matrix multiplication/convolution in graph.operations
:return: whether this layer is sparse or not
'''
if 'filter' in self.graph.operations[total_ops[i]].inputs:
test_sparse = open(
self.replace_non_ascii(self.variables[self.graph.operations[total_ops[i]].inputs['filter']]) + '.h', 'r')
sparse = test_sparse.read().find("indices", 0, -1)
else:
test_sparse = open(self.replace_non_ascii(self.variables[self.graph.operations[total_ops[i]].inputs['B']]) + '.h',
'r')
sparse = test_sparse.read().find("indices", 0, -1)
if sparse == -1:
sparse = False
else:
sparse = True
test_sparse.close()
return sparse
def write_source_second(self):
'''
Write out all remaining source code
It can be considered as two parts: the first one writes out all specifications of one layer, such as its input
size, kernel size, and output size. For a convolutional layer, padding and pooling information are also defined
in this part. Besides, batch normalization information is defined in this part as well.
The second part writes out used functions defined in inference engine according to different layer settings.
:return: NA
'''
#write source file: first part
print("-----------------------------------------")
self.source.write('#include \"image.h\"\n')
self.source.write("static uint8_t l1_act[] = IMAGES ; \n")
self.source.write("static uint8_t labels[] = LABELS; \n")
total_ops=self.matrixmul+self.conv
total_ops=sorted(total_ops)
for i in range(1, len(total_ops) + 1):
sparse=self.testsparse(i-1,total_ops)
#fc layer
if total_ops[i-1] in self.matrixmul:
# number of inputs
self.source.write("#define F"+str(i)+"I "+str(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['B']][1].attribs['shape'][0])+"\n")
#unpruned inputs
self.source.write("#define F"+str(i)+"NPI "+str(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['B']][0])+"\n")
#number of outputs
self.source.write("#define F"+str(i)+"O "+str(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['B']][1].attribs['shape'][1])+"\n")
#weight data
self.source.write("static pckDtype l"+str(i)+"wght[] = _"+self.replace_non_ascii(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['B']][1].attribs['label'])+" ;\n")
if sparse:
self.source.write("static uint8_t l"+str(i)+"ind[] = _"+self.replace_non_ascii(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['B']][1].attribs['label'])+"_indices ;\n")
#previous layer is fc
if i!=1 and total_ops[i-2] in self.matrixmul:
self.source.write("static pckDtype l"+str(i)+"act_bin[F"+str(i-1)+"O/pckWdt]; \n")
#first layer
else:
self.source.write("static pckDtype l"+str(i)+"act_bin[F"+str(i)+"I/pckWdt]; \n")
#conv layer
else:
self.source.write("#define C"+str(i)+"KXY "+str(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][1].attribs['shape'][2])+"\n")
#input of the first layer
if i == 1:
#input number will change according to input size
self.source.write("#define C1XY "+str(int(self.in_shape[2]))+"\n")
self.source.write("#define C1Z "+str(self.in_shape[1])+"\n")
self.source.write("#define C"+str(i)+"KZ "+str(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][1].attribs['shape'][0])+"\n")
else:
#determine the XY dimension from pervious layer's padding and pooling number
self.source.write('#define C'+str(i)+"XY ((2*C"+str(i-1)+"PD+C"+str(i-1)+"XY-C"+str(i-1)+"KXY+1)/C"+
str(i-1)+"PL) \n" )
#determine the Z dimension for activation
self.source.write("#define C"+str(i)+"Z "+str(
self.variablen[self.graph.operations[total_ops[i-2]].inputs['filter']][1].attribs['shape'][0])+'\n')
#determine kernel's Z dimension
self.source.write("#define C"+str(i)+"KZ "+str(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][1].attribs['shape'][0])+"\n")
#size of activation
self.source.write("static pckDtype l"+str(i)+"act_bin[C"+str(i)+"XY*C"+str(i)+"XY*C"+str(i)+"Z/pckWdt]; \n")
#size of padding
self.source.write("#define C"+str(i)+"PD "+str(self.graph.operations[total_ops[i-1]].attribs['padding'][0][0])+'\n')
#determine pooling
#use "look ahead" method
if i != len(total_ops):
#search between two matrix operations for max pooling function
for p in range(total_ops[i-1],total_ops[i]):
if self.graph.operations[p].name=='max_pool':
self.source.write("#define C"+str(i)+"PL "+str(self.graph.operations[p].attribs['size'][3])+'\n')
break
if p == total_ops[i]-1:
self.source.write("#define C"+str(i)+"PL 1 \n")
#the last layer
else:
for p in range(total_ops[i-1],len(self.graph.operations)):
if self.graph.operations[p].name=='max_pool':
self.source.write("#define C"+str(i)+"PL "+str(self.graph.operations[p].attribs['size'][3])+'\n')
break
if p == len(self.graph.operations)-1:
self.source.write("#define C"+str(i)+"PL 1 \n")
#unpruned inputs
if self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][0] != 0:
self.source.write("#define C"+str(i)+"NPI "+str(self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][0])+'\n')
if sparse:
self.source.write("static uint8_t l"+str(i)+"ind[] = _"+self.replace_non_ascii(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][1].attribs['label'])+"_indices ;\n")
if i == 1:
self.source.write("static int8_t l"+str(i)+"wght[] = _"+self.replace_non_ascii(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][1].attribs['label'])+" ;\n")
else:
self.source.write("static pckDtype l"+str(i)+"wght[] = _"+self.replace_non_ascii(
self.variablen[self.graph.operations[total_ops[i-1]].inputs['filter']][1].attribs['label'])+" ;\n")
#theoretically the output can be other number, we pick 10 right now
self.source.write("static float output[10]; \n")
number=0
#write batch norm
for i in self.batchn.keys():
number=len([k for k in self.batchn.keys() if k < i])+1
#the last batch norm layer
if number == len(self.batchn.keys()):
self.source.write("static bnDtype bn"+str(number)+"mean[] = _"+self.replace_non_ascii(self.variables[self.batchn[i][0]])+" ; \n")
self.source.write("static bnDtype bn"+str(number)+"var[] = _"+self.replace_non_ascii(self.variables[self.batchn[i][1]])+" ; \n")
self.source.write("static bnDtype bn"+str(number)+"gamma[] = _"+self.replace_non_ascii(self.variables[self.batchn[i][3]])+" ; \n")
self.source.write("static bnDtype bn"+str(number)+"beta[] = _"+self.replace_non_ascii(self.variables[self.batchn[i][2]])+" ; \n")
#otherwise use only threshold and sign
else:
self.source.write("static bnDtype bn"+str(number)+"thr[] = bn"+str(number)+"_thresh ; \n")
self.source.write("static pckDtype bn"+str(number)+"sign[] = bn"+str(number)+"_sign ; \n")
self.source.write("int main(){ \n\tint correct = 0; \n\tfor(int img = 0; img < "+str(int(self.test_end_id-self.test_start_id))+
"; img++) {\n\t\tuint8_t *curr_im = l1_act + img*")
#fc network
if len(self.conv)==0:
self.source.write("784*sizeof(uint8_t);\n\t\t")
self.source.write("packBinThrsArr(curr_im, l1act_bin, F1I, 1);\n\t\t")
#mixed
else:
self.source.write(str(self.in_shape[2])+"*"+str(self.in_shape[3])+"*"+str(self.in_shape[1])+"*sizeof(uint8_t);\n\t\t")
# write out source code: second part
batch_index = 1
for i in range(len(total_ops)):
sparse=self.testsparse(i,total_ops)
print("Operation ", end='')
have_batch = False
if i < len(total_ops) - 1:
for r in range(total_ops[i], total_ops[i + 1]):
if self.graph.operations[r].name == 'unsqueeze' or self.graph.operations[r].name == 'squeeze' or \
self.graph.operations[r].name == 'clamp' or self.graph.operations[
r].name == 'batch_normalization' or self.graph.operations[r].name=='max_pool':
print('# ' + str(r), end=' ')
if self.graph.operations[r].name == 'batch_normalization':
have_batch=True
else:
for r in range(total_ops[i], len(self.graph.operations)):
if self.graph.operations[r].name == 'unsqueeze' or self.graph.operations[r].name == 'squeeze' or \
self.graph.operations[r].name == 'clamp' or self.graph.operations[
r].name == 'batch_normalization' or self.graph.operations[r].name=='max_pool':
print('# ' + str(r), end=' ')
if self.graph.operations[r].name == 'batch_normalization':
have_batch=True
print("uses C library function ", end='')
if total_ops[i] in self.matrixmul:
#normal layer with batchnorm
if i<len(total_ops)-1 and have_batch:
if sparse:
print("Fc3pxnWrap")
self.source.write("Fc3pxnWrap(l"+str(i+1)+"act_bin, l"+str(i+1)+"wght, l"+str(i+1)+"ind, F"+str(i+1)+"NPI, F"+str(i+1)+"O, l"+str(i+2)+"act_bin, bn"+str(i+1)+"thr, bn"+str(i+1)+"sign);\n\t\t")
else:
print("FcXnorWrap")
self.source.write("FcXnorWrap(l" + str(i + 1) + "act_bin, l" + str(i + 1) + "wght, F" + str(i + 1) + "I, F" + str(i + 1) + "O, l" + str(
i + 2) + "act_bin, bn" + str(i + 1) + "thr, bn" + str(i + 1) + "sign);\n\t\t")
batch_index+=1
#normal layer without batchnorm
elif i<len(total_ops)-1:
if sparse:
print("Fc3pxnWrap")
self.source.write("Fc3pxnWrap(l"+str(i+1)+"act_bin, l"+str(i+1)+"wght, l"+str(i+1)+"ind, F"+str(i+1)+"NPI, F"+str(i+1)+"O, l"+str(i+2)+"act_bin, NULL, NULL);\n\t\t")
else:
print("FcXnorWrap")
self.source.write("FcXnorWrap(l" + str(i + 1) + "act_bin, l" + str(i + 1) + "wght, F" + str(i + 1) + "I, F" + str(i + 1) + "O, l" + str(
i + 2) + "act_bin, NULL, NULL);\n\t\t")
#last layer
elif i==len(total_ops)-1 and have_batch:
if sparse:
print("Fc3pxnNoBinWrap")
self.source.write("int res = Fc3pxnNoBinWrap(l"+str(i+1)+"act_bin, l"+str(i+1)+"wght, l"+str(i+1)+"ind, F"+str(i+1)+"NPI, F"+str(i+1)+"O, output, bn")
self.source.write(str(i+1)+"mean, bn"+str(i+1)+"var, bn"+str(i+1)+"gamma, bn"+str(i+1)+"beta);\n\t\t")
else:
print("FcXnorNoBinWrap")
self.source.write(
"int res = FcXnorNoBinWrap(l" + str(i + 1) + "act_bin, l" + str(i + 1) + "wght, F" + str(i + 1) + "I, F" + str(i + 1) + "O, output, bn")
self.source.write(str(i + 1) + "mean, bn" + str(i + 1) + "var, bn" + str(i + 1) + "gamma, bn" + str(
i + 1) + "beta);\n\t\t")
else:
#first layer. should be BWN
#with batchnorm
if i==0 and have_batch:
print("CnBnBwn")
self.source.write("CnBnBwn(curr_im, l1wght, C1Z, C1XY, C1XY, C1Z, C1KXY, C1KXY, C1KZ, C1PD, C1PL, l2act_bin, bn1thr, bn1sign);\n\t\t")
batch_index+=1
#without batch norm
elif i==0:
print("CnBnBwn")
self.source.write("CnBnBwn(curr_im, l1wght, C1Z, C1XY, C1XY, C1Z, C1KXY, C1KXY, C1KZ, C1PD, C1PL, l2act_bin, NULL,NULL)\n\t\t")
#normal layer
elif i<len(total_ops)-1 and have_batch:
if sparse:
print("Cn3pxnWrap")
self.source.write("Cn3pxnWrap(l"+str(i+1)+"act_bin, l"+str(i+1)+"wght, l"+str(i+1)+"ind, C"+str(i+1)+"NPI, C"+str(i+1))
self.source.write("Z, C"+str(i+1)+"XY, C"+str(i+1)+"XY, C"+str(i+1)+"Z, C"+str(i+1)+"KXY, C"+str(i+1)+"KXY, C"+str(i+1)+"KZ, l"+str(i+2)+"act_bin, C"+str(i+1))
self.source.write("PD, C"+str(i+1)+"PL, bn"+str(i+1)+"thr, bn"+str(i+1)+"sign);\n\t\t")
else:
print("CnXnorWrap")
self.source.write(
"CnXnorWrap(l" + str(i + 1) + "act_bin, l" + str(i + 1) + "wght, C" + str(i + 1))
self.source.write("Z, C" + str(i + 1) + "XY, C" + str(i + 1) + "XY, C" + str(i + 1) + "Z, C" + str(
i + 1) + "KXY, C" + str(i + 1) + "KXY, C" + str(i + 1) + "KZ, l" + str(i + 2) + "act_bin, C" + str(
i + 1))
self.source.write("PD, C" + str(i + 1) + "PL, bn" + str(i + 1) + "thr, bn" + str(i + 1) + "sign);\n\t\t")
batch_index+=1
#last layer
elif i == len(total_ops)-1:
if sparse:
print("Cn3pxnNoBinWrap")
self.source.write("int res = Cn3pxnNoBinWrap(l"+str(i+1)+"act_bin, l"+str(i+1)+"wght, l"+str(i+1)+"ind, C"+str(i+1)+"NPI, C"+str(i+1))
self.source.write("Z, C"+str(i+1)+"XY, C"+str(i+1)+"XY, C"+str(i+1)+"Z, C"+str(i+1)+"KXY, C"+str(i+1)+"KXY, C"+str(i+1)+"KZ, output, C"+str(i+1))
self.source.write("PD, C"+str(i+1)+"PL, bn"+str(i+1)+"mean, bn"+str(i+1)+"var, bn"+str(i+1)+"gamma, bn"+str(i+1)+"beta);\n\t\t")
else:
print("CnXnorNoBinWrap")
self.source.write("int res = CnXnorNoBinWrap(l" + str(i + 1) + "act_bin, l" + str(i + 1) + "wght, C" + str(i + 1))
self.source.write("Z, C" + str(i + 1) + "XY, C" + str(i + 1) + "XY, C" + str(i + 1) + "Z, C" + str(
i + 1) + "KXY, C" + str(i + 1) + "KXY, C" + str(i + 1) + "KZ, output, C" + str(i + 1))
self.source.write("PD, C" + str(i + 1) + "PL, bn" + str(i + 1) + "mean, bn" + str(i + 1) + "var, bn" + str(
i + 1) + "gamma, bn" + str(i + 1) + "beta);\n\t\t")
#without batch norm
else:
if sparse:
print("Cn3pxnWrap")
self.source.write("Cn3pxnWrap(l"+str(i+1)+"act_bin, l"+str(i+1)+"wght, l"+str(i+1)+"ind, C"+str(i+1)+"NPI, C"+str(i+1))
self.source.write("Z, C"+str(i+1)+"XY, C"+str(i+1)+"XY, C"+str(i+1)+"Z, C"+str(i+1)+"KXY, C"+str(i+1)+"KXY, C"+str(i+1)+"KZ, l"+str(i+2)+"act_bin, C"+str(i+1))
self.source.write("PD, C"+str(i+1)+"PL, NULL, NULL);\n\t\t")
else:
print("CnXnorWrap")
self.source.write(
"CnXnorWrap(l" + str(i + 1) + "act_bin, l" + str(i + 1) + "wght, C" + str(i + 1))
self.source.write("Z, C" + str(i + 1) + "XY, C" + str(i + 1) + "XY, C" + str(i + 1) + "Z, C" + str(
i + 1) + "KXY, C" + str(i + 1) + "KXY, C" + str(i + 1) + "KZ, l" + str(i + 2) + "act_bin, C" + str(
i + 1))
self.source.write("PD, C" + str(i + 1) + "PL, NULL, NULL);\n\t\t")
print("-----------------------------------------")
#testing and inference
self.source.write("float max = -INFINITY; \n\t\tint maxIdx = 0; \n\t\tfor (int i = 0; i <10; i++) { \n\t\t\t printf(\"%f, \", output[i]);\n\t\t\t if (output[i] > max) { \n\t\t\t\t max = output[i]; \n\t\t\t\t")
self.source.write("maxIdx = i;\n\t\t\t }\n\t\t}\n\t\t")
self.source.write("printf(\"\\n\");")
self.source.write("printf(\"Image %d: label: %d, actual: %d\\n\",img, maxIdx, labels[img]); \n\t\t")
self.source.write("if (maxIdx == labels[img]) correct += 1; \n\t}\n\tprintf(\"Accuracy: %f%%\\n\", 100.0*(float)correct/"
+str(int(self.test_end_id-self.test_start_id))+"); \n\treturn (EXIT_SUCCESS); \n}")
self.source.close()
def write_images(self):
'''
write out images for both testing and inference
:return: NA
'''
image=open('image.h','w+')
os.chdir('..')
from shutil import copyfile
copyfile('../3pxnet-training/utils_own.py','utils_own.py')
copyfile('../3pxnet-training/utils.py', 'utils.py')
import torch
from utils_own import load_dataset
'''
Imported from training engine
Give training/testing data loader and class information for several image datasets
'''
trainset, testset, classes = load_dataset(self.dataset)
testloader = torch.utils.data.DataLoader(testset, batch_size=256, shuffle=True, num_workers=2)
label=testloader.dataset.targets[self.test_start_id:self.test_end_id]
if self.dataset=='MNIST':
testdata=torch.tensor([2,2,2,2])
#testdata.cuda()
testdata=testloader.dataset.data[self.test_start_id:self.test_end_id,:,:]
testdata = torch.reshape(testdata, (self.test_end_id-self.test_start_id, 784))
rank=testdata.shape
temp_array = testdata.clone()
if len(self.list)!=0:
for i in range(768):
temp_array[0:, i] = testdata[:, self.list[0][i]]
testdata = temp_array
image.write('#define IMAGES {\\\n')
for n in range(self.test_end_id-self.test_start_id):
for y in range(28):
for x in range(28):
image.write(str(int(testdata[n][y*28+x].item()>0))+', ')
image.write('\\\n')
image.write('\\\n')
image.write('}\n')
image.write('#define LABELS {\\\n')
for i in range(self.test_end_id-self.test_start_id):
image.write(str(label[i].item())+', ')
image.write('}\n')
image.close()
else:
testdata=torch.from_numpy(testloader.dataset.data[self.test_start_id:self.test_end_id,:,:,:]).permute(0,3,1,2)
#testdata.cuda()
rank = testdata.shape
image.write('#define IMAGES {\\\n')
for n in range(self.test_end_id-self.test_start_id):
for y in range(rank[2]):
for x in range(rank[3]):
for z in range(rank[1]):
image.write(str(int(testdata[n][z][y][x].item()))+', ')
image.write('\\\n')
image.write('}\n')
image.write('#define LABELS {\\\n')
for i in range(self.test_end_id-self.test_start_id):
image.write(str(label[i])+', ')
image.write('}\n')
image.close()
def main():
print("WARNING: Current 3PXNet inference library does not support operations "
"other than convolution or matrix multiplication")
print("All other operations will be skipped.")
# Argument parsing
parser = argparse.ArgumentParser(description='Automatically generate inference code')
parser.add_argument('--input', help="""Name of input directory. This should be the converted NNEF "
"formatted neural network which ends with .nnef with no other modifications. Example: --input=FC_Small.nnef""")
parser.add_argument('--dataset', metavar='DATASET', default='MNIST',
help='Dataset to test on. Currently choose from MNIST and CIFAR10')
parser.add_argument('--test_start_id',default=0,help='The starting index of dataset for testing')
parser.add_argument('--test_end_id', default=100, help='The ending index of dataset for testing')
args = parser.parse_args()
dataset = args.dataset
input_dir = args.input
test_start_id=args.test_start_id
test_end_id=args.test_end_id
converter=convert(input_dir,dataset,test_start_id,test_end_id)
#load the nnef graph into compiler
converter.loadgraph()
# the last layer will not be binarized, so its batch norm has to be dealt differently
# this function finds such batch norm operation
converter.find_batch_last()
# write included headers into source code
converter.write_source_first()
# for each operation shown in the graph, compile it
converter.processing_graph()
# the last layer should be written out as well
converter.write_last_layer()
# calculate batch normalization threshold and sign
converter.calculate_batch()
# write out all remaining source code
converter.write_source_second()
# write out images for both inference and testing
converter.write_images()
if __name__ == '__main__':
main()
| [
"numpy.copy",
"os.listdir",
"numpy.sqrt",
"argparse.ArgumentParser",
"nnef.load_graph",
"os.walk",
"torch.from_numpy",
"os.chdir",
"torch.tensor",
"shutil.copyfile",
"numpy.zeros",
"struct.unpack",
"os.mkdir",
"utils_own.load_dataset",
"torch.utils.data.DataLoader",
"numpy.load",
"to... | [((60846, 60922), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Automatically generate inference code"""'}), "(description='Automatically generate inference code')\n", (60869, 60922), False, 'import argparse\n'), ((3752, 3776), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (3760, 3776), False, 'import os, fnmatch\n'), ((5562, 5592), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler"""'], {}), "('../3pxnet-compiler')\n", (5570, 5592), False, 'import os, fnmatch\n'), ((5671, 5690), 'os.chdir', 'os.chdir', (['"""autogen"""'], {}), "('autogen')\n", (5679, 5690), False, 'import os, fnmatch\n'), ((6594, 6608), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (6602, 6608), False, 'import os, fnmatch\n'), ((6615, 6639), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (6623, 6639), False, 'import os, fnmatch\n'), ((38848, 38886), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler/autogen"""'], {}), "('../3pxnet-compiler/autogen')\n", (38856, 38886), False, 'import os, fnmatch\n'), ((58185, 58199), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (58193, 58199), False, 'import os, fnmatch\n'), ((58240, 58299), 'shutil.copyfile', 'copyfile', (['"""../3pxnet-training/utils_own.py"""', '"""utils_own.py"""'], {}), "('../3pxnet-training/utils_own.py', 'utils_own.py')\n", (58248, 58299), False, 'from shutil import copyfile\n'), ((58305, 58356), 'shutil.copyfile', 'copyfile', (['"""../3pxnet-training/utils.py"""', '"""utils.py"""'], {}), "('../3pxnet-training/utils.py', 'utils.py')\n", (58313, 58356), False, 'from shutil import copyfile\n'), ((58597, 58623), 'utils_own.load_dataset', 'load_dataset', (['self.dataset'], {}), '(self.dataset)\n', (58609, 58623), False, 'from utils_own import load_dataset\n'), ((58643, 58728), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(256)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(testset, batch_size=256, shuffle=True,\n num_workers=2)\n', (58670, 58728), False, 'import torch\n'), ((3806, 3821), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3816, 3821), False, 'import os, fnmatch\n'), ((3918, 3947), 'nnef.load_graph', 'nnef.load_graph', (['"""graph.nnef"""'], {}), "('graph.nnef')\n", (3933, 3947), False, 'import nnef\n'), ((5619, 5634), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (5629, 5634), False, 'import os, fnmatch\n'), ((5645, 5664), 'os.mkdir', 'os.mkdir', (['"""autogen"""'], {}), "('autogen')\n", (5653, 5664), False, 'import os, fnmatch\n'), ((7620, 7630), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (7628, 7630), False, 'from bitarray import bitarray\n'), ((7647, 7657), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (7655, 7657), False, 'from bitarray import bitarray\n'), ((9990, 10000), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (9998, 10000), False, 'from bitarray import bitarray\n'), ((10017, 10027), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (10025, 10027), False, 'from bitarray import bitarray\n'), ((14227, 14255), 'numpy.zeros', 'np.zeros', (['(rank[0], rank[1])'], {}), '((rank[0], rank[1]))\n', (14235, 14255), True, 'import numpy as np\n'), ((14898, 14912), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (14906, 14912), False, 'import os, fnmatch\n'), ((14922, 14946), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (14930, 14946), False, 'import os, fnmatch\n'), ((15046, 15058), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (15053, 15058), False, 'import os, fnmatch\n'), ((18159, 18197), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler/autogen"""'], {}), "('../3pxnet-compiler/autogen')\n", (18167, 18197), False, 'import os, fnmatch\n'), ((38331, 38355), 'numpy.copy', 'np.copy', (['self.tempweight'], {}), '(self.tempweight)\n', (38338, 38355), True, 'import numpy as np\n'), ((39474, 39484), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (39482, 39484), False, 'from bitarray import bitarray\n'), ((58851, 58877), 'torch.tensor', 'torch.tensor', (['[2, 2, 2, 2]'], {}), '([2, 2, 2, 2])\n', (58863, 58877), False, 'import torch\n'), ((59004, 59073), 'torch.reshape', 'torch.reshape', (['testdata', '(self.test_end_id - self.test_start_id, 784)'], {}), '(testdata, (self.test_end_id - self.test_start_id, 784))\n', (59017, 59073), False, 'import torch\n'), ((18420, 18466), 'numpy.zeros', 'np.zeros', (['(rank[0], rank[1], rank[2], rank[3])'], {}), '((rank[0], rank[1], rank[2], rank[3]))\n', (18428, 18466), True, 'import numpy as np\n'), ((19683, 19729), 'numpy.zeros', 'np.zeros', (['(rank[3], rank[2], rank[1], rank[0])'], {}), '((rank[3], rank[2], rank[1], rank[0]))\n', (19691, 19729), True, 'import numpy as np\n'), ((20528, 20542), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (20536, 20542), False, 'import os, fnmatch\n'), ((20555, 20579), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (20563, 20579), False, 'import os, fnmatch\n'), ((20642, 20654), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (20649, 20654), False, 'import os, fnmatch\n'), ((23505, 23543), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler/autogen"""'], {}), "('../3pxnet-compiler/autogen')\n", (23513, 23543), False, 'import os, fnmatch\n'), ((40127, 40137), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (40135, 40137), False, 'from bitarray import bitarray\n'), ((59882, 59974), 'torch.from_numpy', 'torch.from_numpy', (['testloader.dataset.data[self.test_start_id:self.test_end_id, :, :, :]'], {}), '(testloader.dataset.data[self.test_start_id:self.\n test_end_id, :, :, :])\n', (59898, 59974), False, 'import torch\n'), ((8546, 8556), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (8554, 8556), False, 'from bitarray import bitarray\n'), ((8582, 8592), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (8590, 8592), False, 'from bitarray import bitarray\n'), ((14837, 14862), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (14850, 14862), False, 'import struct\n'), ((15306, 15334), 'numpy.zeros', 'np.zeros', (['(rank[0], rank[1])'], {}), '((rank[0], rank[1]))\n', (15314, 15334), True, 'import numpy as np\n'), ((15368, 15382), 'numpy.load', 'np.load', (['name1'], {}), '(name1)\n', (15375, 15382), True, 'import numpy as np\n'), ((26897, 26935), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler/autogen"""'], {}), "('../3pxnet-compiler/autogen')\n", (26905, 26935), False, 'import os, fnmatch\n'), ((27831, 27845), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (27839, 27845), False, 'import os, fnmatch\n'), ((27864, 27888), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (27872, 27888), False, 'import os, fnmatch\n'), ((8055, 8065), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (8063, 8065), False, 'from bitarray import bitarray\n'), ((8094, 8104), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (8102, 8104), False, 'from bitarray import bitarray\n'), ((8336, 8346), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (8344, 8346), False, 'from bitarray import bitarray\n'), ((8375, 8385), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (8383, 8385), False, 'from bitarray import bitarray\n'), ((14736, 14761), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (14749, 14761), False, 'import struct\n'), ((15630, 15673), 'numpy.copy', 'np.copy', (['temp_array[permute_list[0, i], 0:]'], {}), '(temp_array[permute_list[0, i], 0:])\n', (15637, 15673), True, 'import numpy as np\n'), ((15829, 15853), 'numpy.copy', 'np.copy', (['self.tempweight'], {}), '(self.tempweight)\n', (15836, 15853), True, 'import numpy as np\n'), ((16616, 16647), 'numpy.copy', 'np.copy', (['self.var[self.tempvar]'], {}), '(self.var[self.tempvar])\n', (16623, 16647), True, 'import numpy as np\n'), ((16804, 16837), 'numpy.copy', 'np.copy', (['self.mean[self.tempmean]'], {}), '(self.mean[self.tempmean])\n', (16811, 16837), True, 'import numpy as np\n'), ((16996, 17031), 'numpy.copy', 'np.copy', (['self.gamma[self.tempgamma]'], {}), '(self.gamma[self.tempgamma])\n', (17003, 17031), True, 'import numpy as np\n'), ((17192, 17225), 'numpy.copy', 'np.copy', (['self.beta[self.tempbeta]'], {}), '(self.beta[self.tempbeta])\n', (17199, 17225), True, 'import numpy as np\n'), ((28864, 28902), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler/autogen"""'], {}), "('../3pxnet-compiler/autogen')\n", (28872, 28902), False, 'import os, fnmatch\n'), ((29716, 29730), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (29724, 29730), False, 'import os, fnmatch\n'), ((29749, 29773), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (29757, 29773), False, 'import os, fnmatch\n'), ((11210, 11220), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (11218, 11220), False, 'from bitarray import bitarray\n'), ((11252, 11262), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (11260, 11262), False, 'from bitarray import bitarray\n'), ((16746, 16777), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (16753, 16777), True, 'import numpy as np\n'), ((16938, 16969), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (16945, 16969), True, 'import numpy as np\n'), ((17134, 17165), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (17141, 17165), True, 'import numpy as np\n'), ((17326, 17357), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (17333, 17357), True, 'import numpy as np\n'), ((20917, 20963), 'numpy.zeros', 'np.zeros', (['(rank[3], rank[2], rank[1], rank[0])'], {}), '((rank[3], rank[2], rank[1], rank[0]))\n', (20925, 20963), True, 'import numpy as np\n'), ((21000, 21014), 'numpy.load', 'np.load', (['name1'], {}), '(name1)\n', (21007, 21014), True, 'import numpy as np\n'), ((39743, 39795), 'numpy.sqrt', 'np.sqrt', (['(self.var[self.batchn[i][1]][j] + epsilon[j])'], {}), '(self.var[self.batchn[i][1]][j] + epsilon[j])\n', (39750, 39795), True, 'import numpy as np\n'), ((10574, 10584), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (10582, 10584), False, 'from bitarray import bitarray\n'), ((10619, 10629), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (10627, 10629), False, 'from bitarray import bitarray\n'), ((10970, 10980), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (10978, 10980), False, 'from bitarray import bitarray\n'), ((11015, 11025), 'bitarray.bitarray', 'bitarray', ([], {}), '()\n', (11023, 11025), False, 'from bitarray import bitarray\n'), ((16011, 16054), 'numpy.copy', 'np.copy', (['tt[0:, 0:, 0:, permute_list[0, j]]'], {}), '(tt[0:, 0:, 0:, permute_list[0, j]])\n', (16018, 16054), True, 'import numpy as np\n'), ((16379, 16414), 'numpy.copy', 'np.copy', (['tt[0:, permute_list[0, i]]'], {}), '(tt[0:, permute_list[0, i]])\n', (16386, 16414), True, 'import numpy as np\n'), ((21642, 21666), 'numpy.copy', 'np.copy', (['self.tempweight'], {}), '(self.tempweight)\n', (21649, 21666), True, 'import numpy as np\n'), ((22112, 22143), 'numpy.copy', 'np.copy', (['self.var[self.tempvar]'], {}), '(self.var[self.tempvar])\n', (22119, 22143), True, 'import numpy as np\n'), ((22309, 22342), 'numpy.copy', 'np.copy', (['self.mean[self.tempmean]'], {}), '(self.mean[self.tempmean])\n', (22316, 22342), True, 'import numpy as np\n'), ((22510, 22545), 'numpy.copy', 'np.copy', (['self.gamma[self.tempgamma]'], {}), '(self.gamma[self.tempgamma])\n', (22517, 22545), True, 'import numpy as np\n'), ((22715, 22748), 'numpy.copy', 'np.copy', (['self.beta[self.tempbeta]'], {}), '(self.beta[self.tempbeta])\n', (22722, 22748), True, 'import numpy as np\n'), ((19153, 19178), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (19166, 19178), False, 'import struct\n'), ((20416, 20441), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (20429, 20441), False, 'import struct\n'), ((21307, 21357), 'numpy.copy', 'np.copy', (['temp_array[0:, 0:, permute_list[0, i], j]'], {}), '(temp_array[0:, 0:, permute_list[0, i], j])\n', (21314, 21357), True, 'import numpy as np\n'), ((21780, 21823), 'numpy.copy', 'np.copy', (['tt[0:, 0:, 0:, permute_list[0, j]]'], {}), '(tt[0:, 0:, 0:, permute_list[0, j]])\n', (21787, 21823), True, 'import numpy as np\n'), ((22248, 22279), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (22255, 22279), True, 'import numpy as np\n'), ((22449, 22480), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (22456, 22480), True, 'import numpy as np\n'), ((22654, 22685), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (22661, 22685), True, 'import numpy as np\n'), ((22855, 22886), 'numpy.copy', 'np.copy', (['tt[permute_list[0, i]]'], {}), '(tt[permute_list[0, i]])\n', (22862, 22886), True, 'import numpy as np\n'), ((25034, 25059), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (25047, 25059), False, 'import struct\n'), ((19028, 19053), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (19041, 19053), False, 'import struct\n'), ((20291, 20316), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (20304, 20316), False, 'import struct\n'), ((31582, 31620), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler/autogen"""'], {}), "('../3pxnet-compiler/autogen')\n", (31590, 31620), False, 'import os, fnmatch\n'), ((32298, 32312), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (32306, 32312), False, 'import os, fnmatch\n'), ((32337, 32361), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (32345, 32361), False, 'import os, fnmatch\n'), ((33198, 33236), 'os.chdir', 'os.chdir', (['"""../3pxnet-compiler/autogen"""'], {}), "('../3pxnet-compiler/autogen')\n", (33206, 33236), False, 'import os, fnmatch\n'), ((34841, 34855), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (34849, 34855), False, 'import os, fnmatch\n'), ((34880, 34904), 'os.chdir', 'os.chdir', (['self.input_dir'], {}), '(self.input_dir)\n', (34888, 34904), False, 'import os, fnmatch\n'), ((24835, 24860), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (24848, 24860), False, 'import struct\n'), ((24939, 24964), 'struct.unpack', 'struct.unpack', (['"""!f"""', 'temp'], {}), "('!f', temp)\n", (24952, 24964), False, 'import struct\n')] |
import algopy
import numpy
class Model:
# Function evaluations
def eval_f(self, x):
return (x[0]-10)**2 + 5*(x[1]-12)**2 + x[2]**4 + 3*(x[3]-11)**2 + 10*x[4]**6
+ 7*x[5]**2 + x[6]**4 - 4*x[5]*x[6] - 10*x[5] - 8*x[6]
def eval_g(self, x):
out = algopy.zeros(3, dtype=x)
out[0] = 2*x[0]**2 + 3*x[1]**4 + x[2] + 4*x[3]**2 + 5*x[4]
out[1] = 7*x[0] + 3*x[1] + 10*x[2]**2 + x[3] - x[4]
out[2] = 23*x[0] + x[1]**2 + 6*x[5]**2 - 8*x[6] -4*x[0]**2 - x[1]**2 + 3*x[0]*x[1] -2*x[2]**2 - 5*x[5]+11*x[6]
return out
def eval_Lagrangian(self, lam,x):
return self.eval_f(x) + algopy.dot(lam, self.eval_g(x))
# Forward Mode Derivative Evaluations
def eval_grad_f_forward(self, x):
x = algopy.UTPM.init_jacobian(x)
return algopy.UTPM.extract_jacobian(self.eval_f(x))
def eval_jac_g_forward(self, x):
x = algopy.UTPM.init_jacobian(x)
return algopy.UTPM.extract_jacobian(self.eval_g(x))
def eval_jac_vec_g_forward(self, x, v):
x = algopy.UTPM.init_jac_vec(x, v)
return algopy.UTPM.extract_jac_vec(self.eval_g(x))
def eval_grad_Lagrangian_forward(self, lam, x):
return self.eval_grad_f_forward(x) + algopy.dot(lam, self.eval_jac_g_forward(x))
def eval_hess_Lagrangian_forward(self, lam, x):
x = algopy.UTPM.init_hessian(x)
return algopy.UTPM.extract_hessian(x.size, self.eval_Lagrangian(lam, x))
def eval_vec_hess_g_forward(self, w, x):
x = algopy.UTPM.init_hessian(x)
tmp = algopy.dot(w, self.eval_g(x))
return algopy.UTPM.extract_hessian(x.size, tmp)
# Reverse Mode Derivative Evaluations
def trace_eval_f(self, x):
cg = algopy.CGraph()
x = algopy.Function(x)
y = self.eval_f(x)
cg.trace_off()
cg.independentFunctionList = [x]
cg.dependentFunctionList = [y]
self.cg = cg
def trace_eval_g(self, x):
cg2 = algopy.CGraph()
x = algopy.Function(x)
y = self.eval_g(x)
cg2.trace_off()
cg2.independentFunctionList = [x]
cg2.dependentFunctionList = [y]
self.cg2 = cg2
def eval_grad_f_reverse(self, x):
return self.cg.gradient(x)
def eval_jac_g_reverse(self, x):
return self.cg2.jacobian(x)
def eval_hess_f_reverse(self, x):
return self.cg.hessian(x)
def eval_hess_vec_f_reverse(self, x, v):
return self.cg.hess_vec(x,v)
def eval_vec_hess_g_reverse(self, w, x):
return self.cg2.vec_hess(w, x)
def eval_grad_Lagrangian_reverse(self, lam, x):
return self.cg.gradient(x) + self.cg2.vec_jac(lam, x)
def eval_hess_Lagrangian_reverse(self, lam, x):
return self.cg.hessian(x) + self.cg2.vec_hess(lam, x)
lam = numpy.array([1,1,1],dtype=float)
x = numpy.array([1,2,3,4,0,1,1],dtype=float)
v = numpy.array([1,1,1,1,1,1,1],dtype=float)
lagra = numpy.array([1,2,0],dtype=float)
V = numpy.eye(7)
m = Model()
print('normal function evaluation')
m.eval_f(x)
m.eval_g(x)
print('Forward Mode')
grad_f_forward = m.eval_grad_f_forward(x)
jac_g_forward = m.eval_jac_g_forward(x)
jac_vec_g_forward = m.eval_jac_vec_g_forward(x,[1,0,0,0,0,0,0])
grad_Lagrangian_forward = m.eval_grad_Lagrangian_forward(lam, x)
hess_Lagrangian_forward = m.eval_hess_Lagrangian_forward(lam, x)
vec_hess_g_forward = m.eval_vec_hess_g_forward(lagra, x)
print(grad_f_forward)
print(jac_g_forward)
print(jac_vec_g_forward)
print(grad_Lagrangian_forward)
print(hess_Lagrangian_forward)
print(vec_hess_g_forward)
print('Reverse Mode')
m.trace_eval_f(x)
m.trace_eval_g(x)
grad_f_reverse = m.eval_grad_f_reverse(x)
jac_g_reverse = m.eval_jac_g_reverse(x)
hess_f_reverse = m.eval_hess_f_reverse(x)
hess_vec_f_reverse = m.eval_hess_vec_f_reverse(x,v)
vec_hess_g_reverse = m.eval_vec_hess_g_reverse(lagra, x)
grad_Lagrangian_reverse = m.eval_grad_Lagrangian_reverse(lam, x)
hess_Lagrangian_reverse = m.eval_hess_Lagrangian_reverse(lam, x)
print(grad_f_reverse)
print(jac_g_reverse)
print(hess_f_reverse)
print(hess_vec_f_reverse)
print(vec_hess_g_reverse)
print(grad_Lagrangian_reverse)
from numpy.testing import assert_almost_equal
assert_almost_equal(grad_f_forward, grad_f_reverse)
assert_almost_equal(jac_g_forward, jac_g_reverse)
assert_almost_equal(grad_Lagrangian_forward, grad_Lagrangian_reverse)
assert_almost_equal(hess_Lagrangian_forward, hess_Lagrangian_reverse)
assert_almost_equal(vec_hess_g_forward, vec_hess_g_reverse) | [
"numpy.eye",
"algopy.UTPM.init_jacobian",
"algopy.zeros",
"algopy.UTPM.extract_hessian",
"algopy.Function",
"numpy.array",
"numpy.testing.assert_almost_equal",
"algopy.UTPM.init_hessian",
"algopy.CGraph",
"algopy.UTPM.init_jac_vec"
] | [((2810, 2845), 'numpy.array', 'numpy.array', (['[1, 1, 1]'], {'dtype': 'float'}), '([1, 1, 1], dtype=float)\n', (2821, 2845), False, 'import numpy\n'), ((2847, 2894), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4, 0, 1, 1]'], {'dtype': 'float'}), '([1, 2, 3, 4, 0, 1, 1], dtype=float)\n', (2858, 2894), False, 'import numpy\n'), ((2892, 2939), 'numpy.array', 'numpy.array', (['[1, 1, 1, 1, 1, 1, 1]'], {'dtype': 'float'}), '([1, 1, 1, 1, 1, 1, 1], dtype=float)\n', (2903, 2939), False, 'import numpy\n'), ((2941, 2976), 'numpy.array', 'numpy.array', (['[1, 2, 0]'], {'dtype': 'float'}), '([1, 2, 0], dtype=float)\n', (2952, 2976), False, 'import numpy\n'), ((2978, 2990), 'numpy.eye', 'numpy.eye', (['(7)'], {}), '(7)\n', (2987, 2990), False, 'import numpy\n'), ((4272, 4323), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['grad_f_forward', 'grad_f_reverse'], {}), '(grad_f_forward, grad_f_reverse)\n', (4291, 4323), False, 'from numpy.testing import assert_almost_equal\n'), ((4324, 4373), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['jac_g_forward', 'jac_g_reverse'], {}), '(jac_g_forward, jac_g_reverse)\n', (4343, 4373), False, 'from numpy.testing import assert_almost_equal\n'), ((4374, 4443), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['grad_Lagrangian_forward', 'grad_Lagrangian_reverse'], {}), '(grad_Lagrangian_forward, grad_Lagrangian_reverse)\n', (4393, 4443), False, 'from numpy.testing import assert_almost_equal\n'), ((4444, 4513), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hess_Lagrangian_forward', 'hess_Lagrangian_reverse'], {}), '(hess_Lagrangian_forward, hess_Lagrangian_reverse)\n', (4463, 4513), False, 'from numpy.testing import assert_almost_equal\n'), ((4514, 4573), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['vec_hess_g_forward', 'vec_hess_g_reverse'], {}), '(vec_hess_g_forward, vec_hess_g_reverse)\n', (4533, 4573), False, 'from numpy.testing import assert_almost_equal\n'), ((284, 308), 'algopy.zeros', 'algopy.zeros', (['(3)'], {'dtype': 'x'}), '(3, dtype=x)\n', (296, 308), False, 'import algopy\n'), ((772, 800), 'algopy.UTPM.init_jacobian', 'algopy.UTPM.init_jacobian', (['x'], {}), '(x)\n', (797, 800), False, 'import algopy\n'), ((911, 939), 'algopy.UTPM.init_jacobian', 'algopy.UTPM.init_jacobian', (['x'], {}), '(x)\n', (936, 939), False, 'import algopy\n'), ((1057, 1087), 'algopy.UTPM.init_jac_vec', 'algopy.UTPM.init_jac_vec', (['x', 'v'], {}), '(x, v)\n', (1081, 1087), False, 'import algopy\n'), ((1354, 1381), 'algopy.UTPM.init_hessian', 'algopy.UTPM.init_hessian', (['x'], {}), '(x)\n', (1378, 1381), False, 'import algopy\n'), ((1521, 1548), 'algopy.UTPM.init_hessian', 'algopy.UTPM.init_hessian', (['x'], {}), '(x)\n', (1545, 1548), False, 'import algopy\n'), ((1608, 1648), 'algopy.UTPM.extract_hessian', 'algopy.UTPM.extract_hessian', (['x.size', 'tmp'], {}), '(x.size, tmp)\n', (1635, 1648), False, 'import algopy\n'), ((1736, 1751), 'algopy.CGraph', 'algopy.CGraph', ([], {}), '()\n', (1749, 1751), False, 'import algopy\n'), ((1764, 1782), 'algopy.Function', 'algopy.Function', (['x'], {}), '(x)\n', (1779, 1782), False, 'import algopy\n'), ((1980, 1995), 'algopy.CGraph', 'algopy.CGraph', ([], {}), '()\n', (1993, 1995), False, 'import algopy\n'), ((2008, 2026), 'algopy.Function', 'algopy.Function', (['x'], {}), '(x)\n', (2023, 2026), False, 'import algopy\n')] |
import copy
import matplotlib as mpl
import matplotlib.pylab as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
from ..import extractroi
CM_PHASE = "viridis"
CM_PHASE_ERROR = copy.copy(plt.get_cmap("seismic"))
CM_PHASE_ERROR.set_over("#37FF32")
CM_PHASE_ERROR.set_under("#05C500")
CM_INTENSITY = "gray"
CM_REFRACTIVE_INDEX = "gnuplot2"
try:
mpl.rcParams['font.family'] = "sans-serif"
mpl.rcParams["text.usetex"] = False
except TypeError: # building the docs
pass
def add_cbar(ax, mapper, fmt="%.2f", units="",
loc="right", size="5%", labelloc=None,
extend="neither"):
"""Add a colorbar to a plot"""
if labelloc is None:
labelloc = loc
divider = make_axes_locatable(ax)
cax = divider.append_axes(loc, size=size, pad=0.05)
if units:
cax.set_title(units, ha="left", loc="center", fontsize=11)
acbar = plt.colorbar(mapper, cax=cax, format=fmt, extend=extend)
acbar.ax.yaxis.set_ticks_position(labelloc)
acbar.ax.yaxis.set_label_position(labelloc)
return acbar
def plot_image(data, ax=None, imtype="phase", cbar=True, px_um=None,
ret_cbar=False, cbformat=None, **kwargs):
"""Plot an image
Parameters
----------
data: 2d np.ndarray
Input image
ax: matplotlib.Axes
Axis to plot to
imtype: str
One of ["intensity", "phase", "phase error",
"refractive index"].
cbar: bool
Whether to add a colorbar.
px_um: float
Pixel size [µm]
ret_cbar: bool
Whether to return the colorbar.
kwargs: dict
Keyword arguments to `plt.imshow`.
Returns
-------
ax [, cbar]:
Axis and colorbar.
"""
if ax is None:
ax = plt.subplot(111)
cbkw = {}
if cbformat is None:
cbkw["fmt"] = "%.3f"
else:
cbkw["fmt"] = cbformat
if imtype == "phase":
cmap = CM_PHASE
gridcolor = "w"
if cbformat is None:
cbkw["fmt"] = "%.1f"
cbkw["units"] = "[rad]"
elif imtype == "intensity":
cmap = CM_INTENSITY
gridcolor = "k"
cbkw["units"] = "[a.u.]"
# Make sure gray is at 1 in the colormap
if "vmin" in kwargs and "vmax" in kwargs:
vmin = kwargs["vmin"]
vmax = kwargs["vmax"]
if vmin < 1 and vmax > 1:
diff = max(1 - vmin, vmax - 1)
kwargs["vmin"] = 1 - diff
kwargs["vmax"] = 1 + diff
elif imtype == "refractive index":
cmap = CM_REFRACTIVE_INDEX
gridcolor = "w"
cbkw["units"] = ""
elif imtype == "phase error":
cmap = CM_PHASE_ERROR
gridcolor = "k"
if "vmin" not in kwargs and "vmax" not in kwargs:
vmax = np.max(np.abs(data))
kwargs["vmax"] = vmax
kwargs["vmin"] = -vmax
cbkw["units"] = "[rad]"
cbkw["extend"] = "both"
else:
raise ValueError("Unknown image type: {}".format(imtype))
if px_um is None:
shx, shy = np.array(data.shape)
unit = "px"
else:
shx, shy = np.array(data.shape) * px_um
unit = "µm"
mapper = ax.imshow(data,
cmap=cmap,
extent=(0, shy, 0, shx),
interpolation="bilinear",
origin="lower",
**kwargs)
ax.set_xlabel("x [{}]".format(unit))
ax.set_ylabel("y [{}]".format(unit))
ax.grid(color=gridcolor, lw="1", alpha=.1)
retval = [ax]
if cbar:
acbar = add_cbar(ax=ax,
mapper=mapper,
**cbkw)
if ret_cbar:
retval.append(acbar)
if len(retval) == 1:
return retval[0]
else:
return retval
def plot_qpi_phase(qpi, rois=None, path=None, labels_excluded=[]):
"""Plot phase data"""
fig = plt.figure(figsize=(6, 4))
ax1 = plt.subplot(111)
px_um = qpi["pixel size"] * 1e6
plot_image(data=qpi.pha,
ax=ax1,
imtype="phase",
cbar=True,
px_um=px_um)
if rois:
for roi in rois:
slx, sly = roi.roi_slice
x0 = slx.start * px_um
x1 = slx.stop * px_um
y0 = sly.start * px_um
y1 = sly.stop * px_um
if extractroi.is_ignored_roi(roi=roi, ignore_data=labels_excluded):
color = "r"
ax1.text(y1, x1, "excluded",
horizontalalignment="right",
verticalalignment="top",
color=color)
else:
color = "w"
box = mpl.patches.Rectangle(xy=(y0, x0),
width=y1 - y0,
height=x1 - x0,
facecolor="none",
edgecolor=color,
)
ax1.add_patch(box)
ax1.text(y0, x0, roi.identifier,
horizontalalignment="left",
verticalalignment="bottom",
color=color)
plt.tight_layout(rect=(0, 0, 1, .93), pad=.1)
fig.text(x=.5, y=.99, s="sensor phase image",
verticalalignment="top",
horizontalalignment="center",
fontsize=14)
if path:
fig.savefig(path)
plt.close()
else:
return fig
def plot_qpi_sphere(qpi_real, qpi_sim, path=None, simtype="simulation"):
"""Plot QPI sphere analysis data"""
fig = plt.figure(figsize=(9, 5))
px_um = qpi_real["pixel size"] * 1e6
radius_um = qpi_sim["sim radius"] * 1e6
center = qpi_sim["sim center"]
index = qpi_sim["sim index"]
real_phase = qpi_real.pha
kw_phase = {"px_um": px_um,
"cbar": True,
"imtype": "phase",
"vmin": real_phase.min(),
"vmax": real_phase.max(),
}
real_inten = qpi_real.amp**2
kw_inten = {"px_um": px_um,
"cbar": True,
"imtype": "intensity",
"vmin": real_inten.min(),
"vmax": real_inten.max(),
}
# real phase
ax1 = plt.subplot(231, title="data (phase)")
plot_image(data=real_phase, ax=ax1, **kw_phase)
# simulated phase
ax2 = plt.subplot(232, title=simtype + " (phase)")
plot_image(data=qpi_sim.pha, ax=ax2, **kw_phase)
ax2.text(0.01, .99,
"index: {:.5f}\n".format(index)
+ "radius: {:.3f}µm".format(radius_um),
horizontalalignment="left",
verticalalignment="top",
color="w",
transform=ax2.transAxes,
)
# phase residuals
ax3 = plt.subplot(233, title="phase residuals")
errmax = qpi_sim.pha.max() * .2
plot_image(data=qpi_sim.pha - real_phase, ax=ax3,
imtype="phase error", vmax=errmax, vmin=-errmax,
px_um=px_um)
# real intensity
ax4 = plt.subplot(234, title="data (intensity)")
plot_image(data=real_inten, ax=ax4, **kw_inten)
# computed intensity
ax5 = plt.subplot(235)
if len(simtype) > 9:
# sometimes the title is too long and is printed on top of the units
kw5 = {"loc": "right",
"ha": "right"}
else:
kw5 = {}
ax5.set_title(simtype + " (intensity)", **kw5)
plot_image(data=qpi_sim.amp**2, ax=ax5, **kw_inten)
# plot detected radius
for ax in [ax1, ax2, ax4, ax5]:
circ = mpl.patches.Circle(xy=((center[1] + .5) * px_um,
(center[0] + .5) * px_um),
radius=radius_um,
facecolor="none",
edgecolor="w",
ls=(0, (3, 8)),
lw=.5,
)
ax.add_patch(circ)
# line plot through center
ax6 = plt.subplot(236, title="phase line plot")
if int(center[0]) >= 0 and int(center[0]) < qpi_sim.shape[0]:
x = np.arange(qpi_real.shape[1]) * px_um
ax6.plot(x, qpi_sim.pha[int(center[0])], label=simtype)
ax6.plot(x, qpi_real.pha[int(center[0])], label="data")
ax6.set_xlabel("[µm]")
ax6.legend(loc="center right")
# remove unused labels
for ax in [ax1, ax2, ax3]:
ax.set_xlabel("")
for ax in [ax2, ax3, ax5]:
ax.set_ylabel("")
plt.tight_layout(rect=(0, 0, 1, .93), pad=.1, h_pad=.6)
# add identifier
fig.text(x=.5, y=.99, s=qpi_sim["identifier"],
verticalalignment="top",
horizontalalignment="center",
fontsize=14)
if path:
fig.savefig(path)
plt.close()
else:
return fig
| [
"numpy.abs",
"matplotlib.patches.Rectangle",
"matplotlib.pylab.figure",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.colorbar",
"numpy.array",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotlib.pylab.subplot",
"matplotlib.pylab.get_cmap",
"matplotlib.pylab.close",
"matplotlib.patch... | [((220, 243), 'matplotlib.pylab.get_cmap', 'plt.get_cmap', (['"""seismic"""'], {}), "('seismic')\n", (232, 243), True, 'import matplotlib.pylab as plt\n'), ((743, 766), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (762, 766), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((916, 972), 'matplotlib.pylab.colorbar', 'plt.colorbar', (['mapper'], {'cax': 'cax', 'format': 'fmt', 'extend': 'extend'}), '(mapper, cax=cax, format=fmt, extend=extend)\n', (928, 972), True, 'import matplotlib.pylab as plt\n'), ((3949, 3975), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (3959, 3975), True, 'import matplotlib.pylab as plt\n'), ((3986, 4002), 'matplotlib.pylab.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3997, 4002), True, 'import matplotlib.pylab as plt\n'), ((5264, 5311), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'rect': '(0, 0, 1, 0.93)', 'pad': '(0.1)'}), '(rect=(0, 0, 1, 0.93), pad=0.1)\n', (5280, 5311), True, 'import matplotlib.pylab as plt\n'), ((5682, 5708), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (5692, 5708), True, 'import matplotlib.pylab as plt\n'), ((6356, 6394), 'matplotlib.pylab.subplot', 'plt.subplot', (['(231)'], {'title': '"""data (phase)"""'}), "(231, title='data (phase)')\n", (6367, 6394), True, 'import matplotlib.pylab as plt\n'), ((6480, 6524), 'matplotlib.pylab.subplot', 'plt.subplot', (['(232)'], {'title': "(simtype + ' (phase)')"}), "(232, title=simtype + ' (phase)')\n", (6491, 6524), True, 'import matplotlib.pylab as plt\n'), ((6889, 6930), 'matplotlib.pylab.subplot', 'plt.subplot', (['(233)'], {'title': '"""phase residuals"""'}), "(233, title='phase residuals')\n", (6900, 6930), True, 'import matplotlib.pylab as plt\n'), ((7145, 7187), 'matplotlib.pylab.subplot', 'plt.subplot', (['(234)'], {'title': '"""data (intensity)"""'}), "(234, title='data (intensity)')\n", (7156, 7187), True, 'import matplotlib.pylab as plt\n'), ((7276, 7292), 'matplotlib.pylab.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (7287, 7292), True, 'import matplotlib.pylab as plt\n'), ((8133, 8174), 'matplotlib.pylab.subplot', 'plt.subplot', (['(236)'], {'title': '"""phase line plot"""'}), "(236, title='phase line plot')\n", (8144, 8174), True, 'import matplotlib.pylab as plt\n'), ((8627, 8685), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'rect': '(0, 0, 1, 0.93)', 'pad': '(0.1)', 'h_pad': '(0.6)'}), '(rect=(0, 0, 1, 0.93), pad=0.1, h_pad=0.6)\n', (8643, 8685), True, 'import matplotlib.pylab as plt\n'), ((1775, 1791), 'matplotlib.pylab.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1786, 1791), True, 'import matplotlib.pylab as plt\n'), ((3086, 3106), 'numpy.array', 'np.array', (['data.shape'], {}), '(data.shape)\n', (3094, 3106), True, 'import numpy as np\n'), ((5516, 5527), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (5525, 5527), True, 'import matplotlib.pylab as plt\n'), ((7670, 7831), 'matplotlib.patches.Circle', 'mpl.patches.Circle', ([], {'xy': '((center[1] + 0.5) * px_um, (center[0] + 0.5) * px_um)', 'radius': 'radius_um', 'facecolor': '"""none"""', 'edgecolor': '"""w"""', 'ls': '(0, (3, 8))', 'lw': '(0.5)'}), "(xy=((center[1] + 0.5) * px_um, (center[0] + 0.5) * px_um\n ), radius=radius_um, facecolor='none', edgecolor='w', ls=(0, (3, 8)),\n lw=0.5)\n", (7688, 7831), True, 'import matplotlib as mpl\n'), ((8911, 8922), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (8920, 8922), True, 'import matplotlib.pylab as plt\n'), ((3156, 3176), 'numpy.array', 'np.array', (['data.shape'], {}), '(data.shape)\n', (3164, 3176), True, 'import numpy as np\n'), ((4749, 4854), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', ([], {'xy': '(y0, x0)', 'width': '(y1 - y0)', 'height': '(x1 - x0)', 'facecolor': '"""none"""', 'edgecolor': 'color'}), "(xy=(y0, x0), width=y1 - y0, height=x1 - x0, facecolor\n ='none', edgecolor=color)\n", (4770, 4854), True, 'import matplotlib as mpl\n'), ((8253, 8281), 'numpy.arange', 'np.arange', (['qpi_real.shape[1]'], {}), '(qpi_real.shape[1])\n', (8262, 8281), True, 'import numpy as np\n'), ((2821, 2833), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (2827, 2833), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.metrics import roc_auc_score
from src.model import VectorSpaceModel
from src.mongo_queries import db, create_user_profile, get_n_most_popular
import time
import os
n_users = 10
n_articles_2_rec = 10
threshold = 0.01
def get_articles_from_query_result(result):
articles = list(map(lambda x: list(db.articles.find().limit(1).skip(x))[0], result))
return articles
if __name__ == '__main__':
start = time.time()
model_path = os.getcwd() + '/model'
model = VectorSpaceModel()
model.load(path=model_path)
# profiles = create_article_profiles()
# model.build(profiles)
# model.save(path=model_path)
print("Building time:", time.time() - start)
user_profile_iterator = db.test_user_profiles.find()
n_articles = db.articles.find().count()
recall_list = []
precision_list = []
arhr_list = []
auc_list = []
for i in range(n_users):
print(i)
current_user = user_profile_iterator.next()["_id"]
user_profile = create_user_profile(current_user)
read = db.user_profiles.find_one()["events"]
read_ids = list(map(lambda x: x['articleId'], read))
test_read = db.test_user_profiles.find_one({"_id": current_user})["events"]
test_read_ids = list(map(lambda x: x['articleId'], test_read))
query_results = model.query(query=user_profile, threshold=threshold)
if len(query_results) > int(n_articles_2_rec):
query_results = query_results[:int(n_articles_2_rec)]
results = list(map(lambda x: x[0], query_results))
results = list(filter(lambda x: x not in read_ids, results)) # filters out already read articles
articles = get_articles_from_query_result(results)
if len(articles) < int(n_articles_2_rec):
n_missing = int(n_articles_2_rec) - len(articles) + len(read_ids)
most_popular = get_n_most_popular(n_missing)
tmp_recommended_ids = list(map(lambda x: x['_id'], articles))
for popular_article in most_popular:
if popular_article['_id'] not in read_ids and popular_article['_id'] not in tmp_recommended_ids:
articles.append(popular_article)
if len(articles) == int(n_articles_2_rec):
break
recommended_ids = list(map(lambda x: x['_id'], articles))
true_positives = len(set(recommended_ids).intersection(set(test_read_ids)))
false_positives = len(recommended_ids) - true_positives
false_negatives = len(test_read_ids) - true_positives
true_negatives = n_articles - true_positives - false_positives - false_negatives
y_true = []
y_score = []
for article in db.articles.find():
if article["_id"] in test_read_ids:
y_true.append(1)
else:
y_true.append(0)
if article["_id"] in recommended_ids:
y_score.append(1)
else:
y_score.append(0)
y_true = np.array(y_true)
y_score = np.array(y_score)
auc = roc_auc_score(y_true, y_score)
try:
precision = true_positives / (true_positives + false_positives)
except ZeroDivisionError:
precision = 0
try:
recall = true_positives / (true_positives + false_negatives)
except ZeroDivisionError:
recall = 0
try:
f_measure = 2 * (precision * recall / (precision + recall))
except ZeroDivisionError:
f_measure = 0
arhr = sum(
list(map(lambda x: 0 if x not in recommended_ids else 1 / (recommended_ids.index(x) + 1), test_read_ids)))
recall_list.append(recall)
precision_list.append(precision)
arhr_list.append(arhr)
auc_list.append(auc)
global_precision = sum(precision_list) / len(precision_list)
global_recall = sum(recall_list) / len(recall_list)
global_f_measure = 2 * (global_precision * global_recall / (global_precision + global_recall))
global_arhr = sum(arhr_list) / len(arhr_list)
global_auc = sum(auc_list) / len(auc_list)
print("Global Precision:", global_precision)
print("Global Recall:", global_recall)
print("Global F-Measure:", global_f_measure)
print("Global ARHR:", global_arhr)
print("Global AUC", global_auc)
print("Total time:", time.time() - start)
| [
"src.mongo_queries.db.test_user_profiles.find_one",
"src.mongo_queries.db.user_profiles.find_one",
"src.model.VectorSpaceModel",
"src.mongo_queries.get_n_most_popular",
"os.getcwd",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"src.mongo_queries.db.articles.find",
"time.time",
"src.mongo_querie... | [((445, 456), 'time.time', 'time.time', ([], {}), '()\n', (454, 456), False, 'import time\n'), ((509, 527), 'src.model.VectorSpaceModel', 'VectorSpaceModel', ([], {}), '()\n', (525, 527), False, 'from src.model import VectorSpaceModel\n'), ((743, 771), 'src.mongo_queries.db.test_user_profiles.find', 'db.test_user_profiles.find', ([], {}), '()\n', (769, 771), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n'), ((474, 485), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (483, 485), False, 'import os\n'), ((1029, 1062), 'src.mongo_queries.create_user_profile', 'create_user_profile', (['current_user'], {}), '(current_user)\n', (1048, 1062), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n'), ((2757, 2775), 'src.mongo_queries.db.articles.find', 'db.articles.find', ([], {}), '()\n', (2773, 2775), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n'), ((3062, 3078), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (3070, 3078), True, 'import numpy as np\n'), ((3097, 3114), 'numpy.array', 'np.array', (['y_score'], {}), '(y_score)\n', (3105, 3114), True, 'import numpy as np\n'), ((3129, 3159), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (3142, 3159), False, 'from sklearn.metrics import roc_auc_score\n'), ((693, 704), 'time.time', 'time.time', ([], {}), '()\n', (702, 704), False, 'import time\n'), ((789, 807), 'src.mongo_queries.db.articles.find', 'db.articles.find', ([], {}), '()\n', (805, 807), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n'), ((1078, 1105), 'src.mongo_queries.db.user_profiles.find_one', 'db.user_profiles.find_one', ([], {}), '()\n', (1103, 1105), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n'), ((1198, 1251), 'src.mongo_queries.db.test_user_profiles.find_one', 'db.test_user_profiles.find_one', (["{'_id': current_user}"], {}), "({'_id': current_user})\n", (1228, 1251), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n'), ((1913, 1942), 'src.mongo_queries.get_n_most_popular', 'get_n_most_popular', (['n_missing'], {}), '(n_missing)\n', (1931, 1942), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n'), ((4435, 4446), 'time.time', 'time.time', ([], {}), '()\n', (4444, 4446), False, 'import time\n'), ((334, 352), 'src.mongo_queries.db.articles.find', 'db.articles.find', ([], {}), '()\n', (350, 352), False, 'from src.mongo_queries import db, create_user_profile, get_n_most_popular\n')] |
import numpy as np
a = np.random.randint(4, size=(2, 3))
b = np.random.randint(4, size=(3, 2))
print('-------------')
print(a)
print('-------------')
print(b)
print('-------------')
print('array dot product')
print(a.shape, b.shape)
c = a.dot(b)
print('-------------')
print('array concat axis 0')
print(c)
print(c.shape)
d = np.concatenate((b, c), axis=0)
print('-------------')
print('array concat axis 1')
print(d)
print(d.shape)
e = np.concatenate((a, c), axis=1)
print('-------------')
print('indexing')
print(e)
print(e.shape)
f = e[1,1]
print('-------------')
print('scalar math')
print(f)
print(f.shape)
g = np.multiply(a, 22)
print('-------------')
print('vector math')
print(g)
print(g.shape)
h = np.add(a[:,:2], b[:2])
print('-------------')
print('statistic')
print(a[:,:2])
print(b[:2])
print(h)
print(h.shape)
print(np.mean(h))
print(a.T)
| [
"numpy.mean",
"numpy.multiply",
"numpy.add",
"numpy.random.randint",
"numpy.concatenate"
] | [((24, 57), 'numpy.random.randint', 'np.random.randint', (['(4)'], {'size': '(2, 3)'}), '(4, size=(2, 3))\n', (41, 57), True, 'import numpy as np\n'), ((62, 95), 'numpy.random.randint', 'np.random.randint', (['(4)'], {'size': '(3, 2)'}), '(4, size=(3, 2))\n', (79, 95), True, 'import numpy as np\n'), ((327, 357), 'numpy.concatenate', 'np.concatenate', (['(b, c)'], {'axis': '(0)'}), '((b, c), axis=0)\n', (341, 357), True, 'import numpy as np\n'), ((438, 468), 'numpy.concatenate', 'np.concatenate', (['(a, c)'], {'axis': '(1)'}), '((a, c), axis=1)\n', (452, 468), True, 'import numpy as np\n'), ((617, 635), 'numpy.multiply', 'np.multiply', (['a', '(22)'], {}), '(a, 22)\n', (628, 635), True, 'import numpy as np\n'), ((708, 731), 'numpy.add', 'np.add', (['a[:, :2]', 'b[:2]'], {}), '(a[:, :2], b[:2])\n', (714, 731), True, 'import numpy as np\n'), ((831, 841), 'numpy.mean', 'np.mean', (['h'], {}), '(h)\n', (838, 841), True, 'import numpy as np\n')] |
import numpy as np
import itertools
import sys
from datetime import datetime
PRINT_BENCHMARKS = True
PRINT_OUTPUT = True
FILE_OUTPUT = False
"""
Version 2.1
Instead of writing the Hamming distance in the table, just a boolean which says it's >= d or not.
Benchmark:
n=6, d=4: 0:00:00.004000 - correct answer: 4
n=7, d=4: 0:00:00.012001 - correct answer: 8
n=8, d=4: 0:00:00.050291 - correct answer: 16
n=9, d=4: 0:01:07.503750 - correct answer: 20
"""
def hamming_distance(vector_1: np.array, vector_2: np.array) -> int:
return np.count_nonzero(vector_1 != vector_2)
def generate_all_vectors(length: int, q: int=2) -> list:
"""
Creates all possible binary numbers with length n, and casts them to np.array
:param length: n
:param q: value for GF, if 2 binary codes only 0 or 1 are valid bits
:return: list of np.arrays representing all possible binary numbers of length n
"""
return list(map(np.array, itertools.product(list(range(q)), repeat=length)))
def generate_hamming_distance_table(vector_list: list, minimum_distance: int, print_result: bool=False) -> list:
"""
Generate a hamming distance table with integer indexes as in the input vectors list, and a Boolean value
Based on if they satisfy the given minimum distance or not.
:param vector_list: List of vectors
:param minimum_distance: Each two vectors that are 'minimum_distance' away from each other will be flagged as 'True'
:param print_result: Print the table in the end.
:return: Hamming distance of vectors (in order with integer indexes)
"""
global PRINT_BENCHMARKS
distance_table_timer = datetime.now()
distance_table = []
for needle_index, vector_needle in enumerate(vector_list):
distance_table.append([])
for in_stack_index, vector_in_stack in enumerate(vector_list):
if needle_index == in_stack_index:
is_distance = False
elif needle_index > in_stack_index:
is_distance = distance_table[in_stack_index][needle_index]
else:
is_distance = hamming_distance(vector_needle, vector_in_stack) >= minimum_distance
distance_table[needle_index].append(is_distance)
if PRINT_BENCHMARKS and PRINT_OUTPUT:
print('--- distance table pre-computation time: ' + str(datetime.now() - distance_table_timer))
if print_result:
for row in distance_table:
print(row)
return distance_table
def lexi_sorter(vectors_list: list) -> list:
return sorted(vectors_list, key=np.count_nonzero)
def is_word_satisfy_minimum_distance_of_code(code: list, hamming_distance_list_for_word: list) -> bool:
for codeword in reversed(code):
if not hamming_distance_list_for_word[codeword]:
return False
return True
def backtrack(level: int=0) -> (int, list):
global code, candidates, hamming_distance_table, promised_M, leading_bit_non_zero, q
for lexi_index, word in enumerate(candidates[level]):
hamming_distance_list_for_word = hamming_distance_table[word]
if len(code) <= level:
code.append(word)
else:
code[level] = word
if not leading_bit_non_zero[word] and level >= (promised_M / q):
return level, code
if level + 1 >= promised_M:
return level, code
if len(candidates) <= level + 1:
candidates.append([])
else:
candidates[level + 1] = []
for candidate_for_word in candidates[level][lexi_index:]:
if hamming_distance_list_for_word[candidate_for_word]:
candidates[level + 1].append(candidate_for_word)
if level + 1 + len(candidates[level + 1]) < promised_M:
return level, code
found_level, found_code = backtrack(level+1)
if found_level + 1 >= promised_M:
return found_level, found_code
return level
timer = datetime.now()
q = 2
n = 7
d = 4
promised_M = 4
try:
n = int(sys.argv[1])
except:
pass
try:
d = int(sys.argv[2])
except:
pass
try:
promised_M = int(sys.argv[3])
except:
pass
"""
Generates all vectors and sort them by their weight -> all possible binary numbers in lexicographical order
"""
vectors = sorted(generate_all_vectors(n, q), key=np.count_nonzero)
leading_bit_non_zero = {lexi_index: (vector[0] != 0) for lexi_index, vector in enumerate(vectors)}
# print(leading_bit_non_zero)
detailed_outputs = []
critical_outputs = []
if PRINT_OUTPUT:
print(str([str(i) + ': ' + ''.join(map(str, vector)) for i, vector in enumerate(vectors)]))
"""
Pre-Computing hamming distance satisfaction table, just store if two vectors have a distance more than d or not.
"""
hamming_distance_table = generate_hamming_distance_table(vectors, d)
init_candidates = list(range(len(vectors))) # list of vectors indexes from 'vectors' lexi-sorted list.
code = []
candidates = [init_candidates]
max_found_M, best_code_vector_indexes = backtrack()
critical_outputs.append('=== For n=' + str(n) + ' and d=' + str(d) + ' in GF(' + str(q) + '):')
detailed_outputs.append(critical_outputs[-1])
critical_outputs.append('max found M is: ' + str(max_found_M + 1))
detailed_outputs.append(critical_outputs[-1])
detailed_outputs.append('code is: ' + str([''.join(map(str, vectors[i])) for i in best_code_vector_indexes]))
if PRINT_BENCHMARKS:
critical_outputs.append('----------------------- process took: ' + str(datetime.now() - timer) + ' time ----')
detailed_outputs.append(critical_outputs[-1])
file = None
if FILE_OUTPUT:
file = open("output_backtracker.txt", "w")
for line in detailed_outputs:
if PRINT_OUTPUT:
print(line)
if FILE_OUTPUT:
file.write(line)
file.write('\n')
if not PRINT_OUTPUT:
for line in critical_outputs:
print(line)
if FILE_OUTPUT:
file.close()
| [
"numpy.count_nonzero",
"datetime.datetime.now"
] | [((3970, 3984), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3982, 3984), False, 'from datetime import datetime\n'), ((540, 578), 'numpy.count_nonzero', 'np.count_nonzero', (['(vector_1 != vector_2)'], {}), '(vector_1 != vector_2)\n', (556, 578), True, 'import numpy as np\n'), ((1641, 1655), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1653, 1655), False, 'from datetime import datetime\n'), ((2344, 2358), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2356, 2358), False, 'from datetime import datetime\n'), ((5499, 5513), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5511, 5513), False, 'from datetime import datetime\n')] |
# ported from MATLAB/Sandbox/GSpline/getExtraOrdCornerIndexMask.m
import numpy as np
from helper_functions import get_num_edges_meeting
from checkB1B2OrientationReversal import checkB1B2OrientationReversal
from checkB1B2Reversal_opt import checkB1B2Reversal_opt
def getExtraOrdCornerIndexMask(quad_list,AVertexList,B1VertexList,B2VertexList,CVertexList,quad_control_point_indices,quad_index,whichCorner):
# TODO: Understand and change code
mod_index = lambda i, modul: (i)%modul
shifted_indices = lambda ind, modul: mod_index(np.array(range(modul)) + ind,modul)
reverse_shifted_indices = lambda ind, modul: mod_index(np.arange(modul,0,-1) + ind,modul)
cornerVertexIndex = quad_list[quad_index,whichCorner]
numberOfEdges = get_num_edges_meeting(AVertexList, cornerVertexIndex) # todo <NAME>: with the objects this is very nice.
quadLocalIndex = np.where(AVertexList[cornerVertexIndex,:,1] == quad_index)
if checkB1B2Reversal_opt(B1VertexList,quad_list,quad_index,cornerVertexIndex,quad_control_point_indices):
if checkB1B2OrientationReversal(B2VertexList,B1VertexList,quad_list,quad_index,cornerVertexIndex):
aroundcorner_indices = reverse_shifted_indices(quadLocalIndex,numberOfEdges)
else:
aroundcorner_indices = shifted_indices(quadLocalIndex,numberOfEdges)
B1Indices = B2VertexList[cornerVertexIndex,aroundcorner_indices,0]
B2Indices = B1VertexList[cornerVertexIndex,aroundcorner_indices,0]
else:
if checkB1B2OrientationReversal(B1VertexList,B2VertexList,quad_list,quad_index,cornerVertexIndex):
aroundcorner_indices = reverse_shifted_indices(quadLocalIndex,numberOfEdges)
else:
aroundcorner_indices = shifted_indices(quadLocalIndex,numberOfEdges)
B1Indices = B1VertexList[cornerVertexIndex,aroundcorner_indices,0]
B2Indices = B2VertexList[cornerVertexIndex,aroundcorner_indices,0]
AIndices = AVertexList[cornerVertexIndex,aroundcorner_indices,0]
CIndices = CVertexList[cornerVertexIndex,aroundcorner_indices,0]
indexMask = np.array([AIndices[:].reshape([1, numberOfEdges]),
B1Indices[:].reshape([1, numberOfEdges]),
B2Indices[:].reshape([1, numberOfEdges]),
CIndices[:].reshape([1, numberOfEdges])], dtype=int)
return np.squeeze(indexMask)
| [
"checkB1B2OrientationReversal.checkB1B2OrientationReversal",
"numpy.where",
"numpy.squeeze",
"checkB1B2Reversal_opt.checkB1B2Reversal_opt",
"helper_functions.get_num_edges_meeting",
"numpy.arange"
] | [((750, 803), 'helper_functions.get_num_edges_meeting', 'get_num_edges_meeting', (['AVertexList', 'cornerVertexIndex'], {}), '(AVertexList, cornerVertexIndex)\n', (771, 803), False, 'from helper_functions import get_num_edges_meeting\n'), ((876, 936), 'numpy.where', 'np.where', (['(AVertexList[cornerVertexIndex, :, 1] == quad_index)'], {}), '(AVertexList[cornerVertexIndex, :, 1] == quad_index)\n', (884, 936), True, 'import numpy as np\n'), ((942, 1051), 'checkB1B2Reversal_opt.checkB1B2Reversal_opt', 'checkB1B2Reversal_opt', (['B1VertexList', 'quad_list', 'quad_index', 'cornerVertexIndex', 'quad_control_point_indices'], {}), '(B1VertexList, quad_list, quad_index,\n cornerVertexIndex, quad_control_point_indices)\n', (963, 1051), False, 'from checkB1B2Reversal_opt import checkB1B2Reversal_opt\n'), ((2374, 2395), 'numpy.squeeze', 'np.squeeze', (['indexMask'], {}), '(indexMask)\n', (2384, 2395), True, 'import numpy as np\n'), ((1056, 1158), 'checkB1B2OrientationReversal.checkB1B2OrientationReversal', 'checkB1B2OrientationReversal', (['B2VertexList', 'B1VertexList', 'quad_list', 'quad_index', 'cornerVertexIndex'], {}), '(B2VertexList, B1VertexList, quad_list,\n quad_index, cornerVertexIndex)\n', (1084, 1158), False, 'from checkB1B2OrientationReversal import checkB1B2OrientationReversal\n'), ((1509, 1611), 'checkB1B2OrientationReversal.checkB1B2OrientationReversal', 'checkB1B2OrientationReversal', (['B1VertexList', 'B2VertexList', 'quad_list', 'quad_index', 'cornerVertexIndex'], {}), '(B1VertexList, B2VertexList, quad_list,\n quad_index, cornerVertexIndex)\n', (1537, 1611), False, 'from checkB1B2OrientationReversal import checkB1B2OrientationReversal\n'), ((635, 658), 'numpy.arange', 'np.arange', (['modul', '(0)', '(-1)'], {}), '(modul, 0, -1)\n', (644, 658), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
from __future__ import division
import numpy
import sympy
from ..helpers import untangle
class Felippa(object):
"""
<NAME>,
A compendium of FEM integration formulas for symbolic work,
Engineering Computation,
Volume 21, Number 8, 2004, pages 867-890.
<https://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_wedge/quadrature_rules_wedge.html>
"""
def __init__(self, index, symbolic=False):
frac = sympy.Rational if symbolic else lambda x, y: x / y
sqrt = numpy.vectorize(sympy.sqrt) if symbolic else numpy.sqrt
if index == 1:
self.degree = 1
data = [(1, _s3(symbolic))]
elif index == 2:
self.degree = 2
data = [(frac(1, 6), _s21_z(frac(1, 6), sqrt(frac(1, 3))))]
elif index == 3:
self.degree = 2
data = [(frac(1, 6), _s21_z(frac(1, 2), sqrt(frac(1, 3))))]
elif index == 4:
self.degree = 4
# roots of 135 x^4 - 240 x^3 + 120 x^2 - 20 x + 1
a1, a2 = [
(40 - 5 * sqrt(10) - i * sqrt(950 - 220 * sqrt(10))) / 90
for i in [+1, -1]
]
data = [
(0.6205044157722541E-01, _s21_z(a2, sqrt(frac(3, 5)))),
(0.3054215101536719E-01, _s21_z(a1, sqrt(frac(3, 5)))),
(0.9928070652356065E-01, _s21(a2)),
(0.4886744162458750E-01, _s21(a1)),
]
elif index == 5:
self.degree = 5
a1, a2 = [(6 - i * sqrt(15)) / 21 for i in [+1, -1]]
data = [
(0.3498310570689643E-01, _s21_z(a1, sqrt(frac(3, 5)))),
(0.3677615355236283E-01, _s21_z(a2, sqrt(frac(3, 5)))),
(frac(1, 16), _s3_z(sqrt(frac(3, 5)), symbolic)),
(0.5597296913103428E-01, _s21(a1)),
(0.5884184568378053E-01, _s21(a2)),
(frac(1, 10), _s3(symbolic)),
]
else:
assert index == 6
self.degree = 6
data = [
(
0.8843323515718317E-02,
_s21_z(0.6308901449150223E-01, -0.8611363115940526),
),
(
0.2031233592848984E-01,
_s21_z(0.2492867451709104, -0.8611363115940526),
),
(
0.1441007403935041E-01,
_s111_z(
0.5314504984481695E-01, 0.3103524510337844, 0.8611363115940526
),
),
(
0.1657912966938509E-01,
_s21_z(0.6308901449150223E-01, 0.3399810435848563),
),
(
0.3808080193469984E-01,
_s21_z(0.2492867451709104, 0.3399810435848563),
),
(
0.2701546376983638E-01,
_s111_z(
0.5314504984481695E-01, 0.3103524510337844, 0.3399810435848563
),
),
]
self.points, self.weights = untangle(data)
return
def _s3(symbolic):
frac = sympy.Rational if symbolic else lambda x, y: x / y
return [[frac(1, 3), frac(1, 3), 0]]
def _s3_z(z, symbolic):
frac = sympy.Rational if symbolic else lambda x, y: x / y
return [[frac(1, 3), frac(1, 3), +z], [frac(1, 3), frac(1, 3), -z]]
def _s21(a):
b = 1 - 2 * a
return [[a, b, 0], [b, a, 0], [a, a, 0]]
def _s21_z(a, z):
b = 1 - 2 * a
return [[a, b, +z], [b, a, +z], [a, a, +z], [a, b, -z], [b, a, -z], [a, a, -z]]
def _s111_z(a, b, z):
c = 1 - a - b
return [
[b, c, +z],
[a, b, +z],
[c, a, +z],
[c, b, +z],
[a, c, +z],
[b, a, +z],
[b, c, -z],
[a, b, -z],
[c, a, -z],
[c, b, -z],
[a, c, -z],
[b, a, -z],
]
| [
"numpy.vectorize"
] | [((542, 569), 'numpy.vectorize', 'numpy.vectorize', (['sympy.sqrt'], {}), '(sympy.sqrt)\n', (557, 569), False, 'import numpy\n')] |
import sys
import numpy as np
import pickle
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.models import load_model
n_hidden = int(sys.argv[1])
quad_t = np.load('./numpy_files/quad_t.npy')
y_t = np.load('./numpy_files/y_t99.npy')
quad_v = np.load('./numpy_files/quad_v.npy')
model = Sequential()
model.add(Dense(n_hidden, activation='linear', input_dim=4950))
model.add(Dense(99, activation='linear'))
model.compile(optimizer='SGD', loss='mean_absolute_error')
es = EarlyStopping(monitor='val_loss', min_delta=0, mode='min', verbose=0, patience=200)
mc = ModelCheckpoint(('./savedmodels/hidden_%s.h5' % str(n_hidden)), monitor='val_loss', mode='min', verbose=0, save_best_only=True)
hist = model.fit(quad_t, y_t, batch_size=32, epochs=5000, validation_split=0.1, verbose=0, callbacks=[es,mc])
saved_model = load_model('./savedmodels/hidden_%s.h5' % str(n_hidden))
yp = saved_model.predict(quad_v)
res = {'pred': yp,
'hist': hist.history
}
with open('./pickles/keras/hidden/%s.pkl' % str(n_hidden), 'wb') as f:
pickle.dump(res,f)
| [
"pickle.dump",
"tensorflow.keras.Sequential",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"numpy.load"
] | [((347, 382), 'numpy.load', 'np.load', (['"""./numpy_files/quad_t.npy"""'], {}), "('./numpy_files/quad_t.npy')\n", (354, 382), True, 'import numpy as np\n'), ((389, 423), 'numpy.load', 'np.load', (['"""./numpy_files/y_t99.npy"""'], {}), "('./numpy_files/y_t99.npy')\n", (396, 423), True, 'import numpy as np\n'), ((434, 469), 'numpy.load', 'np.load', (['"""./numpy_files/quad_v.npy"""'], {}), "('./numpy_files/quad_v.npy')\n", (441, 469), True, 'import numpy as np\n'), ((479, 491), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (489, 491), False, 'from tensorflow.keras import Sequential\n'), ((663, 750), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'mode': '"""min"""', 'verbose': '(0)', 'patience': '(200)'}), "(monitor='val_loss', min_delta=0, mode='min', verbose=0,\n patience=200)\n", (676, 750), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((502, 554), 'tensorflow.keras.layers.Dense', 'Dense', (['n_hidden'], {'activation': '"""linear"""', 'input_dim': '(4950)'}), "(n_hidden, activation='linear', input_dim=4950)\n", (507, 554), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((566, 596), 'tensorflow.keras.layers.Dense', 'Dense', (['(99)'], {'activation': '"""linear"""'}), "(99, activation='linear')\n", (571, 596), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((1230, 1249), 'pickle.dump', 'pickle.dump', (['res', 'f'], {}), '(res, f)\n', (1241, 1249), False, 'import pickle\n')] |
import vedo
import trimesh
import torch
import time
import numpy as np
from scipy.spatial.transform import Rotation as R
from scipy import linalg
import os
def eye(n, batch_shape):
iden = np.zeros(np.concatenate([batch_shape, [n, n]]))
iden[..., 0, 0] = 1.0
iden[..., 1, 1] = 1.0
iden[..., 2, 2] = 1.0
return iden
def get_closest_rotmat(rotmats):
"""
Finds the rotation matrix that is closest to the inputs in terms of the Frobenius norm. For each input matrix
it computes the SVD as R = USV' and sets R_closest = UV'. Additionally, it is made sure that det(R_closest) == 1.
Args:
rotmats: np array of shape (..., 3, 3).
Returns:
A numpy array of the same shape as the inputs.
"""
u, s, vh = np.linalg.svd(rotmats)
r_closest = np.matmul(u, vh)
# if the determinant of UV' is -1, we must flip the sign of the last column of u
det = np.linalg.det(r_closest) # (..., )
iden = eye(3, det.shape)
iden[..., 2, 2] = np.sign(det)
r_closest = np.matmul(np.matmul(u, iden), vh)
return r_closest
def recover_to_axis_angles(motion):
print(motion.shape)
batch_size, seq_len, dim = motion.shape # motion.shape = (1, 帧数, 75)
assert dim == 75
transl = motion[:, :, 0:3]
axis_angles = motion[:, :, 3:]
axis_angles = axis_angles.reshape(batch_size, seq_len, 24, 3)
return axis_angles, transl
def visualize(motion, smpl_model):
smpl_poses, smpl_trans = recover_to_axis_angles(motion)
smpl_poses = np.squeeze(smpl_poses, axis=0) # (seq_len, 24, 3)
smpl_trans = np.squeeze(smpl_trans, axis=0) # (seq_len, 3)
smpl_output = smpl_model.forward(
global_orient=torch.from_numpy(smpl_poses[:, 0:1]).float(),
body_pose=torch.from_numpy(smpl_poses[:, 1:]).float(),
transl=torch.from_numpy(smpl_trans).float(),
)
keypoints3d = smpl_output.joints.detach().numpy() # (seq_len, 24, 3)
vertices = smpl_output.vertices.detach().numpy()
bbox_center = (
keypoints3d.reshape(-1, 3).max(axis=0)
+ keypoints3d.reshape(-1, 3).min(axis=0)
) / 2.0
bbox_size = (
keypoints3d.reshape(-1, 3).max(axis=0)
- keypoints3d.reshape(-1, 3).min(axis=0)
)
world = vedo.Box(bbox_center, bbox_size[0], bbox_size[1], bbox_size[2]).wireframe()
vedo.show(world, axes=True, viewup="y", interactive=0)
i = 0
while i < keypoints3d.shape[0]:
keypoints3d_i = keypoints3d[i]
vertices_i = vertices[i]
i += 1
mesh = trimesh.Trimesh(vertices_i, smpl_model.faces)
mesh.visual.face_colors = [200, 200, 250, 100]
pts = vedo.Points(keypoints3d_i, r=20)
plotter = vedo.show(world, mesh, interactive=0)
if plotter.escaped:
break # if ESC
time.sleep(0.005)
vedo.interactive().close()
if __name__ == "__main__":
import glob
import tqdm
from smplx import SMPL
smpl = SMPL(model_path="../others/smpl/", gender='MALE', batch_size=1)
result_files = sorted(glob.glob("../data/original_motion_data/*.npy"), key=os.path.getmtime) #glob.glob("samples/*.npy")
for result_file in tqdm.tqdm(result_files):
print("Visual %s" % result_file)
result_motion = np.load(result_file)[None, ...] # [1, 帧数, 75]
visualize(result_motion, smpl)
| [
"tqdm.tqdm",
"vedo.interactive",
"vedo.Points",
"numpy.squeeze",
"numpy.linalg.det",
"time.sleep",
"smplx.SMPL",
"torch.from_numpy",
"numpy.matmul",
"numpy.sign",
"numpy.concatenate",
"trimesh.Trimesh",
"numpy.linalg.svd",
"vedo.Box",
"numpy.load",
"vedo.show",
"glob.glob"
] | [((760, 782), 'numpy.linalg.svd', 'np.linalg.svd', (['rotmats'], {}), '(rotmats)\n', (773, 782), True, 'import numpy as np\n'), ((799, 815), 'numpy.matmul', 'np.matmul', (['u', 'vh'], {}), '(u, vh)\n', (808, 815), True, 'import numpy as np\n'), ((912, 936), 'numpy.linalg.det', 'np.linalg.det', (['r_closest'], {}), '(r_closest)\n', (925, 936), True, 'import numpy as np\n'), ((999, 1011), 'numpy.sign', 'np.sign', (['det'], {}), '(det)\n', (1006, 1011), True, 'import numpy as np\n'), ((1516, 1546), 'numpy.squeeze', 'np.squeeze', (['smpl_poses'], {'axis': '(0)'}), '(smpl_poses, axis=0)\n', (1526, 1546), True, 'import numpy as np\n'), ((1584, 1614), 'numpy.squeeze', 'np.squeeze', (['smpl_trans'], {'axis': '(0)'}), '(smpl_trans, axis=0)\n', (1594, 1614), True, 'import numpy as np\n'), ((2329, 2383), 'vedo.show', 'vedo.show', (['world'], {'axes': '(True)', 'viewup': '"""y"""', 'interactive': '(0)'}), "(world, axes=True, viewup='y', interactive=0)\n", (2338, 2383), False, 'import vedo\n'), ((2951, 3014), 'smplx.SMPL', 'SMPL', ([], {'model_path': '"""../others/smpl/"""', 'gender': '"""MALE"""', 'batch_size': '(1)'}), "(model_path='../others/smpl/', gender='MALE', batch_size=1)\n", (2955, 3014), False, 'from smplx import SMPL\n'), ((3165, 3188), 'tqdm.tqdm', 'tqdm.tqdm', (['result_files'], {}), '(result_files)\n', (3174, 3188), False, 'import tqdm\n'), ((203, 240), 'numpy.concatenate', 'np.concatenate', (['[batch_shape, [n, n]]'], {}), '([batch_shape, [n, n]])\n', (217, 240), True, 'import numpy as np\n'), ((1038, 1056), 'numpy.matmul', 'np.matmul', (['u', 'iden'], {}), '(u, iden)\n', (1047, 1056), True, 'import numpy as np\n'), ((2533, 2578), 'trimesh.Trimesh', 'trimesh.Trimesh', (['vertices_i', 'smpl_model.faces'], {}), '(vertices_i, smpl_model.faces)\n', (2548, 2578), False, 'import trimesh\n'), ((2648, 2680), 'vedo.Points', 'vedo.Points', (['keypoints3d_i'], {'r': '(20)'}), '(keypoints3d_i, r=20)\n', (2659, 2680), False, 'import vedo\n'), ((2699, 2736), 'vedo.show', 'vedo.show', (['world', 'mesh'], {'interactive': '(0)'}), '(world, mesh, interactive=0)\n', (2708, 2736), False, 'import vedo\n'), ((2801, 2818), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (2811, 2818), False, 'import time\n'), ((3042, 3089), 'glob.glob', 'glob.glob', (['"""../data/original_motion_data/*.npy"""'], {}), "('../data/original_motion_data/*.npy')\n", (3051, 3089), False, 'import glob\n'), ((2249, 2312), 'vedo.Box', 'vedo.Box', (['bbox_center', 'bbox_size[0]', 'bbox_size[1]', 'bbox_size[2]'], {}), '(bbox_center, bbox_size[0], bbox_size[1], bbox_size[2])\n', (2257, 2312), False, 'import vedo\n'), ((2824, 2842), 'vedo.interactive', 'vedo.interactive', ([], {}), '()\n', (2840, 2842), False, 'import vedo\n'), ((3255, 3275), 'numpy.load', 'np.load', (['result_file'], {}), '(result_file)\n', (3262, 3275), True, 'import numpy as np\n'), ((1692, 1728), 'torch.from_numpy', 'torch.from_numpy', (['smpl_poses[:, 0:1]'], {}), '(smpl_poses[:, 0:1])\n', (1708, 1728), False, 'import torch\n'), ((1756, 1791), 'torch.from_numpy', 'torch.from_numpy', (['smpl_poses[:, 1:]'], {}), '(smpl_poses[:, 1:])\n', (1772, 1791), False, 'import torch\n'), ((1816, 1844), 'torch.from_numpy', 'torch.from_numpy', (['smpl_trans'], {}), '(smpl_trans)\n', (1832, 1844), False, 'import torch\n')] |
# Copyright (C) 2019 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Author: <NAME>
import numpy as np
from mocasin.design_centering.volume import *
import mocasin.design_centering.sample as sample
import mocasin.util.random_distributions.discrete_random as rd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def random_s_set_gen(
n, procs, mu, Q, r, num_samples, threshold=0.05, num_points=10
):
ns = [procs] * n
# print np.linalg.det(Q)
# print np.linalg.eig(Q)
# eigenv,_ = np.linalg.eig(r**2 * Q*np.transpose(Q))
# print("Eigenvalues of Cov: " + str(eigenv))
# test discrete uniform plain
result = sample.SampleSet()
for i in range(num_samples):
sample_vec = rd.discrete_gauss(ns, mu.astype(int), r, np.array(Q))
s = sample.Sample(sample=sample_vec)
s.setFeasibility(True)
result.add_sample(s)
return result
def random_s_set_test_center(dim, num_procs, mu, num_samples, Q, r):
procs = num_procs
s_set = random_s_set_gen(dim, procs, mu, Q, r, num_samples)
component = np.random.randint(2)
other_component = 1 - component
for sample in s_set.get_samples():
tup = sample.sample2simpleTuple()
# skew upward
if tup[component] > mu[component] and (
tup[other_component] == mu[other_component]
):
sample.setFeasibility(True)
else:
sample.setFeasibility(False)
return s_set
# generates random sets of points around a feasible circle with radius r_set
def random_s_set_test_radius(r, point, dim, num_procs, num_samples, Q, r_set):
test_set = random_s_set_gen(dim, num_procs, point, Q, r, num_samples)
Qinv = np.linalg.inv(Q)
for s in test_set.get_feasible():
vecNotShifted = np.array(s.sample2tuple()) - np.array(point)
vec = np.dot(vecNotShifted, Qinv)
dist = np.sqrt(np.dot(vec, vec.transpose()))
if dist >= r_set:
s.setFeasibility(False)
return test_set
# center and radius should not really change
def random_s_set_test_covariance(
Q, Q_target, dim, num_procs, r, mu, num_samples
):
test_set = random_s_set_gen(dim, num_procs, mu, Q, r, num_samples)
Qinv = np.linalg.inv(Q_target)
for s in test_set.get_feasible():
vecNotShifted = np.array(s.sample2tuple()) - np.array(mu)
vec = np.dot(vecNotShifted, Qinv)
dist = np.sqrt(np.dot(vec, vec.transpose()))
# print(dist)
if dist >= r:
s.setFeasibility(False)
return test_set
def parse_s_set(s_set, center, coordinates):
x = []
y = []
colors = []
for s in s_set.get_feasible():
x.append(s.sample2tuple()[coordinates[0]])
y.append(s.sample2tuple()[coordinates[1]])
colors.append(0)
for s in s_set.get_infeasible():
x.append(s.sample2tuple()[coordinates[0]])
y.append(s.sample2tuple()[coordinates[1]])
colors.append(1)
x.append(center[coordinates[0]])
y.append(center[coordinates[1]])
colors.append(2)
return x, y, colors
def test_center_adaptation(
vol_mu, num_iter, num_procs, num_samples, Q, r, seed
):
points = []
centers = [vol_mu.center]
dim = len(vol_mu.center)
np.random.seed(seed)
for _ in range(num_iter):
sample_set = random_s_set_test_center(
dim, num_procs, vol_mu.center, num_samples, Q, r
)
vol_mu.adapt_center(sample_set)
x, y, colors = parse_s_set(
sample_set, vol_mu.center, coordinates=[0, 1]
)
points.append((x, y, colors))
centers.append(vol_mu.center)
itercenters = iter(centers)
old_center = next(itercenters)
for center in itercenters:
for new, old in zip(center, old_center):
assert new >= old
num_centers = len(set(map(tuple, centers)))
assert num_centers > 1
# Note on the radius adaptation: if the p value hits perfectly the target_p,
# the radius will still vary a bit (albeit not much).
# This follows directly from the forumlas in the original paper.
# We can see this with the following Sage code, for example:
# var('N', 'mu', 'lambd', 'Beta')
# Beta = 0.6/((2+1.3)**2 + mu)
# f = (1+Beta*(1-mu/lambd))^mu * (1-Beta * mu/lambd)^(lambd-mu)
# plot(f(lambd=1000),(mu,0,1000))
def test_radius_adaptation(
vol_mu,
num_samples,
seed,
num_iter,
num_procs,
dim,
Q,
r_set,
target_p,
mocker,
):
np.random.seed(seed)
# points = []
radii = [vol_mu.radius]
for _ in range(num_iter):
sample_set = random_s_set_test_radius(
vol_mu.radius, vol_mu.center, dim, num_procs, num_samples, Q, r_set
) # (r,point,dim,num_procs,num_samples,Q,r_set)
vol_mu.adapt_volume(sample_set, target_p, mocker.Mock())
radii.append(vol_mu.radius)
# x,y,colors = parse_s_set(sample_set,vol_mu.center,coordinates=[0,1])
# points.append((x,y,colors))
# distances to target p should improve:
target_r = (
target_p * r_set
) # I don't know why this is. I feel it should be: r_set * target_p ** (-1./dim)
iterradii = iter(radii)
old_radius = next(iterradii)
for new_radius in iterradii:
dist_old = np.abs(target_r - old_radius)
dist_new = np.abs(target_r - new_radius)
assert (
dist_old + 0.5 >= dist_new
) # allow a (generous) 0.5 margin of error
old_radius = new_radius
# in the end (actual) p should be close to target_p
# rel_vol_feasible = r_set**dim #ignoring pi and such factors
# rel_vol_found = vol_mu.radius**dim #ignoring pi and such factors
p = (
new_radius / r_set
) # again, no idea why it is this, I feel it should be: rel_vol_feasible / rel_vol_found
assert np.isclose(p, target_p, atol=0.1)
def test_covariance_adaptation(
seed,
num_iter,
num_samples,
r_small,
num_procs,
vol_mu,
Q,
Q_not_rotated,
mocker,
):
np.random.seed(seed)
points = []
mu = vol_mu.center
vol_mu.covariance = Q
r = r_small
vol_mu.radius = r
dim = len(mu)
p_target = 0.65
for _ in range(num_iter):
# print(f"\n radius: {vol_mu.radius} ;covariance (det: {np.linalg.det(vol_mu.covariance)}): \n {vol_mu.covariance}")
sample_set = random_s_set_test_covariance(
vol_mu.covariance,
Q_not_rotated,
dim,
num_procs,
r,
vol_mu.center,
num_samples,
)
vol_mu.adapt_volume(sample_set, p_target, mocker.Mock())
if vol_mu.radius > r:
p_target = 0.9
else:
p_target = 0.1
# assert(np.isclose(np.linalg.det(vol_mu.covariance),1,rtol=0.1))
# print(vol_mu.covariance)
# x,y,colors = parse_s_set(sample_set,vol_mu.center,coordinates=[0,1])
# points.append((x,y,colors))
print(vol_mu.covariance)
# visualize_s_sets(points)
def test_all_infeasible(lp_vol, num_samples, mocker):
sample_set = sample.SampleSet()
cov = lp_vol.covariance
radius = lp_vol.radius
center = lp_vol.center
lp_vol.adapt_volume(sample_set, 0.65, mocker.Mock())
# nothing should change, but algorithm should not crash
assert np.alltrue(cov == lp_vol.covariance)
assert radius == lp_vol.radius
assert np.alltrue(center == lp_vol.center)
def visualize_s_sets(points, num_procs):
ns = [num_procs, num_procs]
fig, ax = plt.subplots()
(x, y, colors) = points[0]
plot = plt.scatter(x, y, c=colors, alpha=0.5)
axes = plt.gca()
axes.set_xlim([0, ns[0] - 1])
axes.set_ylim([0, ns[0] - 1])
def animate(i):
if i < len(points):
(x, y, colors) = points[i]
else:
(x, y, colors) = points[len(points)]
plot.set_offsets(np.c_[x, y])
return (plot,)
myAnimation = animation.FuncAnimation(
fig, animate, interval=1000, blit=True, repeat=True
)
plt.show()
| [
"numpy.abs",
"numpy.alltrue",
"numpy.isclose",
"mocasin.design_centering.sample.setFeasibility",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.gca",
"numpy.array",
"numpy.random.randint",
"numpy.linalg.inv",
"mocasin.design_centering.sample.sample2simpleTuple",
"numpy.random.seed",
... | [((685, 703), 'mocasin.design_centering.sample.SampleSet', 'sample.SampleSet', ([], {}), '()\n', (701, 703), True, 'import mocasin.design_centering.sample as sample\n'), ((1108, 1128), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (1125, 1128), True, 'import numpy as np\n'), ((1738, 1754), 'numpy.linalg.inv', 'np.linalg.inv', (['Q'], {}), '(Q)\n', (1751, 1754), True, 'import numpy as np\n'), ((2258, 2281), 'numpy.linalg.inv', 'np.linalg.inv', (['Q_target'], {}), '(Q_target)\n', (2271, 2281), True, 'import numpy as np\n'), ((3281, 3301), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3295, 3301), True, 'import numpy as np\n'), ((4504, 4524), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4518, 4524), True, 'import numpy as np\n'), ((5845, 5878), 'numpy.isclose', 'np.isclose', (['p', 'target_p'], {'atol': '(0.1)'}), '(p, target_p, atol=0.1)\n', (5855, 5878), True, 'import numpy as np\n'), ((6039, 6059), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6053, 6059), True, 'import numpy as np\n'), ((7106, 7124), 'mocasin.design_centering.sample.SampleSet', 'sample.SampleSet', ([], {}), '()\n', (7122, 7124), True, 'import mocasin.design_centering.sample as sample\n'), ((7335, 7371), 'numpy.alltrue', 'np.alltrue', (['(cov == lp_vol.covariance)'], {}), '(cov == lp_vol.covariance)\n', (7345, 7371), True, 'import numpy as np\n'), ((7418, 7453), 'numpy.alltrue', 'np.alltrue', (['(center == lp_vol.center)'], {}), '(center == lp_vol.center)\n', (7428, 7453), True, 'import numpy as np\n'), ((7543, 7557), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7555, 7557), True, 'import matplotlib.pyplot as plt\n'), ((7600, 7638), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'colors', 'alpha': '(0.5)'}), '(x, y, c=colors, alpha=0.5)\n', (7611, 7638), True, 'import matplotlib.pyplot as plt\n'), ((7650, 7659), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7657, 7659), True, 'import matplotlib.pyplot as plt\n'), ((7960, 8036), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'interval': '(1000)', 'blit': '(True)', 'repeat': '(True)'}), '(fig, animate, interval=1000, blit=True, repeat=True)\n', (7983, 8036), True, 'import matplotlib.animation as animation\n'), ((8056, 8066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8064, 8066), True, 'import matplotlib.pyplot as plt\n'), ((824, 856), 'mocasin.design_centering.sample.Sample', 'sample.Sample', ([], {'sample': 'sample_vec'}), '(sample=sample_vec)\n', (837, 856), True, 'import mocasin.design_centering.sample as sample\n'), ((1218, 1245), 'mocasin.design_centering.sample.sample2simpleTuple', 'sample.sample2simpleTuple', ([], {}), '()\n', (1243, 1245), True, 'import mocasin.design_centering.sample as sample\n'), ((1876, 1903), 'numpy.dot', 'np.dot', (['vecNotShifted', 'Qinv'], {}), '(vecNotShifted, Qinv)\n', (1882, 1903), True, 'import numpy as np\n'), ((2400, 2427), 'numpy.dot', 'np.dot', (['vecNotShifted', 'Qinv'], {}), '(vecNotShifted, Qinv)\n', (2406, 2427), True, 'import numpy as np\n'), ((5288, 5317), 'numpy.abs', 'np.abs', (['(target_r - old_radius)'], {}), '(target_r - old_radius)\n', (5294, 5317), True, 'import numpy as np\n'), ((5337, 5366), 'numpy.abs', 'np.abs', (['(target_r - new_radius)'], {}), '(target_r - new_radius)\n', (5343, 5366), True, 'import numpy as np\n'), ((799, 810), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (807, 810), True, 'import numpy as np\n'), ((1395, 1422), 'mocasin.design_centering.sample.setFeasibility', 'sample.setFeasibility', (['(True)'], {}), '(True)\n', (1416, 1422), True, 'import mocasin.design_centering.sample as sample\n'), ((1449, 1477), 'mocasin.design_centering.sample.setFeasibility', 'sample.setFeasibility', (['(False)'], {}), '(False)\n', (1470, 1477), True, 'import mocasin.design_centering.sample as sample\n'), ((1846, 1861), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (1854, 1861), True, 'import numpy as np\n'), ((2373, 2385), 'numpy.array', 'np.array', (['mu'], {}), '(mu)\n', (2381, 2385), True, 'import numpy as np\n')] |
import csv
import datetime
import random
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
from gym import Env
from gym.spaces import Discrete, Box
# https://github.com/jmpf2018/ShipAI
## Creating Environment
class BeerGameEnv(Env):
def __init__(self):
# Define the action and observation spaces
# Definition of the limits on orders the AI agent can make
self.AI_Entity = True
self.AI_Order_Plus = True # If set to true, then the agent order is relative to the incoming order
# and the action_space above should reflect that by spanning both
# positive and negative values.
if self.AI_Order_Plus != True:
MaxOrder = 50
self.action_space = spaces.Discrete(MaxOrder+1) # a discrete array representing the possible entity order choices,
# starting from 0
else:
Relative_Min_Order = -20 # Amount relative to the incoming the order the agent
Relative_Max_Order = +20 # can order itself
#Force sign conventions on Relative_ Max_ or Min_Order just in case
self.Relative_Min_Order = (-1)*np.sign(Relative_Min_Order)*Relative_Min_Order
self.Relative_Max_Order = np.sign(Relative_Max_Order)*Relative_Max_Order
#Determine action space full span (including 0)
Action_Space_Span = (-1)*self.Relative_Min_Order+self.Relative_Max_Order+1
self.action_space = spaces.Discrete(Action_Space_Span)
# Set Global state parameters
self.Random_Teams = True
self.Fixed_Team_Nb = 0 # Team number to place the AI on.
# If Random_Teams flag is True, then is is ignored
self.Random_AI_Position = True
self.AI_Position = 3 # Position in the supply chain to place the AI in, between 0 and 3.
# If Random_AI_Position flag is True, then is is ignored
self.Random_Horizon = True
self.min_horizon = 24 # If the Horizon is random, the lower bound on the horizon length
self.max_horizon = 200 # If the Horizon is random, the upper bound on the horizon length
self.fixed_horizon = 104 # Fixed horizon, only used if above Random_Horizon flag is set to False
self.Integer_Ordering = True
self.Noisey_Ordering = True
# Customer Order String
# Classic Beer Game
Step_Round = 4
self.Orders = ([4] * Step_Round) + ([9] * (1000 - Step_Round))
Second_Step = 150
self.Orders = self.Orders[0:Second_Step] + ([9] * (1000 - Second_Step))
# Finanical and Physical layout of the game
self.Holding_Cost = 0.50
self.Backorder_Cost = 1.00
self.Initial_OrderFlows = self.Orders[0]
self.Initial_Inventory = 12
self.Information_Delay = 2
self.Shipping_Delay = 2
# State space for the problem. Need to decide the scale of this.... what can the AI see and remember?
Agent_Sees = {'Agent_Order_Received': Box(0, np.inf, shape=(1,)),
'Agent_OH_Inventory': Box(0, np.inf, shape=(1,)),
'Agent_Backorder': Box(0, np.inf, shape=(1,)),
'Agent_Recent_Order': Box(0, np.inf, shape=(1,)),
'period': Box(0, 1000, shape=(1,)),
# NOTE: This could be bounded by np.inf but realistically t=1000 is an order of magnitude larger than the expected maximum time horizon
'AI_Entity_Index': Box(0, 3, shape=(1,))
# NOTE: This should be upper bounded by the largest possible entity index (reminder that Python indexes at 0)
}
# self.observation_space = gym.spaces.Dict(Agent_Sees)
# State space coerced into a box shape to better work with Keras syntax, note that the ShippingFlows are two
# different items here. Furthermore, note that this is defined via the Box shape, which is continuous, even
# though some of these observations may always be discrete (AI_Entity_Index) as an example
obs_space = spaces.Box(low=np.array([0, 0, 0, 0, 0, 0]),
high=np.array([np.inf, np.inf, np.inf, np.inf, 1000, 3]))
self.observation_space = obs_space
# Import the parameters from the classic beer game runs for use with Random Team matching
Team_Parameter_Filename = "JS Parameter Table.csv"
with open(Team_Parameter_Filename, newline='') as csvfile:
Team_Parameter_Data = list(csv.reader(csvfile))
All_Team_Parameters = np.asarray(Team_Parameter_Data)
# Remove header row
Team_Parameter_Header = All_Team_Parameters[0, :]
All_Team_Parameters = np.delete(All_Team_Parameters, (0), axis=0)
# Replace all blanks with 0's
All_Team_Parameters = np.asarray([[x or '0' for x in xs] for xs in All_Team_Parameters])
# Extract the team numbers and convert to integers or numbers from strings as appropriate
self.Team_Index = [int(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 1])]
self.Team_Name = np.ndarray.tolist(All_Team_Parameters[:, 0])
self.Entity_Code = np.ndarray.tolist(All_Team_Parameters[:, 2])
self.Entity_Index = [int(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 3])]
self.thetas = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 4])]
self.alphas = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 5])]
self.betas = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 6])]
self.S_primes = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 7])]
if self.Fixed_Team_Nb >= min(self.Team_Index) and self.Fixed_Team_Nb <= max(self.Team_Index):
Match_Team = self.Fixed_Team_Nb
# Create a mask of the rows that correspond to that team number
Match_Team_Mask = np.asarray(self.Team_Index) == Match_Team
# Filter, using the mask, the arrays that have the data for the team that was drawn
Match_Team_Theta = np.asarray(self.thetas)[Match_Team_Mask]
Match_Team_Alpha = np.asarray(self.alphas)[Match_Team_Mask]
Match_Team_Beta = np.asarray(self.betas)[Match_Team_Mask]
Match_Team_S_prime = np.asarray(self.S_primes)[Match_Team_Mask]
# Assemble the team parameters into a named list for later use in the main Beer Game function
Match_Team_Parameter_df = {"theta": np.ndarray.tolist(Match_Team_Theta),
"alpha_s": np.ndarray.tolist(Match_Team_Alpha),
"beta": np.ndarray.tolist(Match_Team_Beta),
"S_prime": np.ndarray.tolist(Match_Team_S_prime)}
self.Parameter_df = Match_Team_Parameter_df
else:
# Set the defualt ordering parameter values (for use if the random team flag above is False or no team number is provided)
self.Parameter_df = {"theta": [0.36] * 4,
"alpha_s": [0.26] * 4,
"beta": [0.34] * 4,
"S_prime": [17] * 4}
# Main function that runs the beer game for a single step
def PirateBeerGame_funct(self, AI_Entity_Index, AI_Order, Orders, Order_flows, Shipping_flows, OH_Inventory,
Backorder, L_hat, Production_Request, AI_Entity=False, AI_parameter_df=False,
Integer_Ordering=False, Noisey_Ordering=False, Noise_Mean=0, Noise_SD=1,
Parameter_df=False, Shipping_Delay=2, Information_Delay=2) -> object:
Relative_Ordering = self.AI_Order_Plus
#If relative ordering is true, translate the agent action into a relative number
if Relative_Ordering == True:
AI_Relative_Order = AI_Order + self.Relative_Min_Order # + 1
Final_Orders = np.empty(4, dtype=float)
OH_Inventory = np.array(OH_Inventory)
Shipping_flows = np.array(Shipping_flows)
Order_flows = np.array(Order_flows)
# Ensure that the order flow facing the retailer is the actual customer order
Order_flows[0, 0] = Orders
# Read in the ordering paramters
if Parameter_df != False:
theta = Parameter_df['theta']
alpha_s = Parameter_df['alpha_s']
beta = Parameter_df['beta']
S_prime = Parameter_df['S_prime']
else:
theta = [0.36] * 4
alpha_s = [0.26] * 4
beta = [0.34] * 4
S_prime = [17] * 4
TeamName = "Default Average Agents"
# Read in AI Ordering Parameters if present
if AI_parameter_df != False:
theta[AI_Entity_Index] = AI_parameter_df['theta']
alpha_s[AI_Entity_Index] = AI_parameter_df['alpha_s']
beta[AI_Entity_Index] = AI_parameter_df['beta']
S_prime[AI_Entity_Index] = AI_parameter_df['S_prime']
#####Recieve Inventory and Advance Shipping Delays#####
# Recieve shipments
New_OH_Inventory = OH_Inventory + Shipping_flows[:, 0]
# Advance shippping delays
Shipping_flows[:, 0] = Shipping_flows[:, (Shipping_Delay - 1)]
# Shipping_flows[:, (Shipping_Delay - 1)] = np.nan
#####Fill Orders######
# View Orders
Order_Received = Order_flows[:, 0]
# Calculate net order that needs to be fullfilled
Incoming_Order = Order_flows[:, 0] + Backorder
# Ship what you can
Outbound_shipments = np.maximum(0, np.minimum(New_OH_Inventory, Incoming_Order))
# Put shipments into lefthand shipping slot
Shipping_flows[0:3, 1] = Outbound_shipments[1:]
# Send shipments from retailer to the final customer
Final_Customer_Orders_Filled = Outbound_shipments[0]
# Update the On-Hand Inventory to account for outflows
OH_Inventory = New_OH_Inventory - Outbound_shipments
# Determine Backlog, if any
Inventory_Shortage = Order_flows[:, 0] - New_OH_Inventory
New_Backorder = np.maximum(0, Backorder + Inventory_Shortage)
Backorder = np.copy(New_Backorder)
# Remember observed order but then Overwrite processed order flow to NaN for debuging if needed
Observed_Order = np.copy(Order_flows[:, 0])
# Order_flows[:, 0] = np.nan ## ORIGINAL LINE OF CODE!!!! REPLACED TO AVOID NAN ISSUES !!!!!
# Order_flows[:, 0] = 0
#####Advance Order Slips and Brewers Brew######
# Advance order slips
Order_flows[:, 0] = Order_flows[:, (Information_Delay - 1)]
# Order_flows[:, (Information_Delay - 1)] = np.nan
# Brewers Brew
Shipping_flows[3, (Shipping_Delay - 1)] = Production_Request
#####PLACE ORDERS######
for i in range(0, 4):
Entity_Index = i
# Obsrve the total supply line and the previous demand
SL = sum(Shipping_flows[Entity_Index, :])
L = Observed_Order[Entity_Index]
# L hat is smoothing of observed demand from previous 2 periods
# if t == 0:
# L_hat[Entity_Index] = np.copy(Observed_Order[Entity_Index])
# Update L_hat (expected future orders) based on observed order
L_hat_new = theta[Entity_Index] * L + (1 - theta[Entity_Index]) * L_hat[Entity_Index]
L_hat[Entity_Index] = L_hat_new
# Note stock of current inventory
S = OH_Inventory[Entity_Index]
#! Note stock of current inventory inclusive of backorder position
S = OH_Inventory[Entity_Index] - Backorder[Entity_Index]
# Add noise to the order if needed
if (Noisey_Ordering == True):
eps = np.random.normal(Noise_Mean, Noise_SD)
else:
eps = 0
# AI Decision
if (AI_Entity == True) and (Entity_Index == AI_Entity_Index):
if (AI_Order != False):
#Check if agent decision is absolute or relative to the last order received
if (Relative_Ordering != True):
# here, agent action is absolute
Order_Placed = max(0,AI_Order)
else:
# here, the agent action is relative to the order received
Order_Placed = max(0,Order_Received[AI_Entity_Index] + AI_Relative_Order)
else:
Order_Placed = max(0, L_hat[Entity_Index] + alpha_s[Entity_Index] * (
S_prime[Entity_Index] - S - beta[Entity_Index] * SL) + eps)
else:
Order_Placed = max(0, L_hat[Entity_Index] + alpha_s[Entity_Index] * (
S_prime[Entity_Index] - S - beta[Entity_Index] * SL) + eps)
##TURN ON FOR INTEGER ONLY ORDERING
if Integer_Ordering:
Order_Placed = np.round(Order_Placed, 0)
if Entity_Index == 3:
Production_Request = Order_Placed
else:
Order_flows[Entity_Index + 1, (Information_Delay - 1)] = Order_Placed
# End of loop
# Make orders placed by each entity explict
Final_Orders[0:3] = Order_flows[1:, (Information_Delay - 1)]
Final_Orders[3] = Production_Request
fnt_output = {"Order_flows": Order_flows, "Shipping_flows": Shipping_flows, "OH_Inventory": OH_Inventory,
"Backorder": Backorder, "L_hat": L_hat, "Production_Request": Production_Request,
"Entity_Orders": Final_Orders, "Order_Received": Order_Received}
return fnt_output
# Resets the state space to the initial conditions for anothe repisode run
# Note that the output format for this function MUST match the output for the step function
# Any additional clean up or resetting of helper variables should occur eslewhere
def reset(self):
##################
# Assign and reset random game parameters
##################
#### Randomly Draw new teammates if applicable
if self.Random_Teams:
# Randomly draw a team number
Rand_Team = random.randint(min(self.Team_Index), max(self.Team_Index))
# Create a mask of the rows that correspond to that team number
Rand_Team_Mask = np.asarray(self.Team_Index) == Rand_Team
# Filter, using the mask, the arrays that have the data for the team that was drawn
Rand_Team_Theta = np.asarray(self.thetas)[Rand_Team_Mask]
Rand_Team_Alpha = np.asarray(self.alphas)[Rand_Team_Mask]
Rand_Team_Beta = np.asarray(self.betas)[Rand_Team_Mask]
Rand_Team_S_prime = np.asarray(self.S_primes)[Rand_Team_Mask]
# Assemble the team parameters into a named list for later use in the main Beer Game function
Rand_Team_Parameter_df = {"theta": np.ndarray.tolist(Rand_Team_Theta),
"alpha_s": np.ndarray.tolist(Rand_Team_Alpha),
"beta": np.ndarray.tolist(Rand_Team_Beta),
"S_prime": np.ndarray.tolist(Rand_Team_S_prime)}
self.Parameter_df = Rand_Team_Parameter_df
#### Randomly set game horizon if applicable
if self.Random_Horizon == True:
self.horizon = random.randint(self.min_horizon, self.max_horizon)
else:
self.horizon = self.fixed_horizon
#### Randomly set the agent's position on the team
if self.Random_AI_Position:
self.AI_Entity_Index = random.randint(0, 3)
else:
self.AI_Entity_Index = self.AI_Position
##################
# Resetting the global game parameters
##################
# Reset the time period to t=0 for the beginning of the game
self.period = 0
# Reset the various stocks of material both wihtin and without each player's position
self.Order_flows = np.full([4, 2], self.Initial_OrderFlows, dtype=float)
self.Shipping_flows = np.full([4, 2], self.Initial_OrderFlows, dtype=float)
self.OH_Inventory = [self.Initial_Inventory] * 4
self.Backorder = [0] * 4
self.Order_Received = [self.Initial_OrderFlows] * 4
self.L_hat = [self.Initial_OrderFlows] * 4
self.Order_History = np.full([4, self.horizon], 0, dtype=float)
self.Service_rate = [0] * self.horizon
self.OH_Inventory_History = np.full([4, self.horizon], 0, dtype=float)
self.Backlog_History = np.full([4, self.horizon], 0, dtype=float)
self.Production_Request = self.Initial_OrderFlows
self.Final_Orders = [0] * 4 # delete?
self.Amp_Vector = [0] * self.horizon # delete?
self.Reward_Vector = [0] * self.horizon # delete?
# Largely for later debugging and for record keeping, assemble the various items to reset at a global level
# together into a single list
# Output = {"AI_Entity_Index": AI_Entity_Index, "Parameter_df": Parameter_df,"horizon": horizon, "period": period,
# "Orders": Orders, "Order_flows": Order_flows, "Shipping_flows": Shipping_flows,
# "OH_Inventory": OH_Inventory, "Backorder": Backorder, "L_hat": L_hat,
# "Order_History": Order_History, "Service_rate": Service_rate,
# "OH_Inventory_History": OH_Inventory_History, "Backlog_History": Backlog_History,
# "Production_Request": Production_Request, "Amp_Vector": Amp_Vector, "Reward_Vector": Reward_Vector}
# Global_State = Output
# globals().update(Global_State)
##################
# Subset the global parameters to just those the agent is able to observe
##################
# Subset the full global state to just the part the agent has access to
Agent_Order_Received = self.Order_Received[self.AI_Entity_Index]
Agent_OH_Inventory = self.OH_Inventory[self.AI_Entity_Index]
Agent_Backorder = self.Backorder[self.AI_Entity_Index]
if self.AI_Entity_Index == 3:
Agent_Recent_Order = self.Production_Request
else:
Agent_Recent_Order = self.Order_flows[self.AI_Entity_Index + 1, (self.Information_Delay - 1)]
AI_Entity_Index = self.AI_Entity_Index
period = self.period
# Note: The observed state outputted by the reset function MUST match the shape as that from the step function
# and must ONLY consist of the parts of the global state the agent can actually observe
Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
Agent_Recent_Order, period, AI_Entity_Index])
return (Observed_State)
# Takes the action by the agent, along with some simulation specific parameters, and updates the state
# Note that the output format fo this function MUST match the output for the reset function
# def step(self, action, Integer_Ordering, Noisey_Ordering, Parameter_df, AI_Entity_Index, CustOrders, horizon, period, OH_Inventory_History, Backlog_History):
def step(self, action):
# import globally assigned environmental variables
global period, AI_Entity_Index
# Check if the current period is the final one (the Horizon) and return dn=True for 'done' state
# Recall that Python indexes starting at 0! So if Horizon is t=52, need to stop at period = 51
if self.period == (self.horizon - 1):
dn = True
else:
dn = False
# Run the beer game function for a single step
BeerGame_output = self.PirateBeerGame_funct(AI_Entity_Index=self.AI_Entity_Index, AI_Order=action,
Orders=self.Orders[self.period], Order_flows=self.Order_flows,
Shipping_flows=self.Shipping_flows, OH_Inventory=self.OH_Inventory,
Backorder=self.Backorder, L_hat=self.L_hat,
Production_Request=self.Production_Request,
AI_Entity=self.AI_Entity,
Noisey_Ordering=self.Noisey_Ordering,
Integer_Ordering=self.Integer_Ordering,
Parameter_df=self.Parameter_df)
# Note on output of obove function call:
# fnt_output = {"Order_flows": Order_flows, "Shipping_flows": Shipping_flows, "OH_Inventory": OH_Inventory,
# "Backorder": Backorder, "L_hat": L_hat, "Production_Request": Production_Request,
# "Entity_Orders": Final_Orders, "Order_Received": Order_Received}
self.Order_flows = BeerGame_output['Order_flows']
self.Shipping_flows = BeerGame_output['Shipping_flows']
self.OH_Inventory = BeerGame_output['OH_Inventory']
self.Backorder = BeerGame_output['Backorder']
self.L_hat = BeerGame_output['L_hat']
self.Production_Request = BeerGame_output['Production_Request']
self.Order_Received = BeerGame_output['Order_Received']
# Don't use 'Entity_Orders' output right now
info = dict()
# Reward in any time other than the final time is the cost incurred by the AI that round.
# But in the final state, it's the total cost incurred by the entire team!
# Calculation of the running cost incurred so far for the entire team...
self.OH_Inventory_History[:, self.period] = BeerGame_output['OH_Inventory']
self.Backlog_History[:, self.period] = BeerGame_output['Backorder']
# Calculation of the cost incurred by the AI for just this one period...
Period_OH_Inventory = BeerGame_output['OH_Inventory']
Period_Backorder = BeerGame_output['Backorder']
AI_period_OH_Inventory = Period_OH_Inventory[self.AI_Entity_Index]
AI_period_Backorder = Period_Backorder[self.AI_Entity_Index]
AI_period_cost = AI_period_OH_Inventory * self.Holding_Cost + AI_period_Backorder * self.Backorder_Cost
AI_Reward = -AI_period_cost
reward = AI_Reward
#In final round, reward is total team cost, offset by costs incurred by AI so far in order to
# to make the entire episode cost the standard team cost
if dn == True:
Costs_Per_Period = self.OH_Inventory_History * self.Holding_Cost + self.Backlog_History * self.Backorder_Cost
Total_Costs_Per_Entity = np.sum(Costs_Per_Period, 1)
Total_Team_Costs = sum(Total_Costs_Per_Entity)
Team_Reward = -Total_Team_Costs
reward = Team_Reward #+ Total_Costs_Per_Entity[self.AI_Entity_Index]
#normalize final reward by the horizon
#if self.Random_Horizon == True:
# reward = reward/self.horizon
reward = reward / self.horizon
# Alt reward calculation
#reward = AI_Reward + Team_Reward / (self.period + 1) # Team_Reward matters more and more as time goes on?
#### Subset the global state to just the parts the agent has access to
Agent_Order_Received = self.Order_Received[self.AI_Entity_Index]
Agent_OH_Inventory = self.OH_Inventory[self.AI_Entity_Index]
Agent_Backorder = self.Backorder[self.AI_Entity_Index]
if self.AI_Entity_Index == 3:
Agent_Recent_Order = self.Production_Request
else:
Agent_Recent_Order = self.Order_flows[self.AI_Entity_Index + 1, (self.Information_Delay - 1)]
AI_Entity_Index = self.AI_Entity_Index
# Add to the period number
self.period += 1
period = self.period
Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
Agent_Recent_Order, period, AI_Entity_Index])
return Observed_State, reward, dn, info
## Main Code
if __name__ == '__main__':
from gym import Env, spaces
# Import methods to build DQN agent
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy,MaxBoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
# Get environment and set seed for reproduceability
env = BeerGameEnv()
Set_Random_Seed = True
if Set_Random_Seed:
Random_Seed = 11111111
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(Random_Seed)
random.seed(Random_Seed)
tf.random.set_seed(Random_Seed)
env.action_space.seed(Random_Seed)
# Count number of actions
nb_actions = env.action_space.n
# Build build simple model.
WINDOW_LENGTH = 4
input_shape = env.observation_space.shape
model = Sequential()
model.add(Flatten(input_shape = (WINDOW_LENGTH,) + env.observation_space.shape))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Configure and compile the DQN agent
memory = SequentialMemory(limit=2000, window_length=WINDOW_LENGTH)
policy = BoltzmannQPolicy()
policy = MaxBoltzmannQPolicy()
#policy = EpsGreedyQPolicy()
#Note, Boltzman policy and DQN is overestimating Q values, causing probabilitiies to explode...
#Double DQN helps mitigate this Q-value overestimation a bit
#Dueling networks appear to allow for a full run
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
target_model_update=1e-2, policy=policy, enable_dueling_network=True, dueling_type='avg')
dqn.compile(Adam(lr=1e-4), metrics=['mae'])
mode = "Train"
#mode = "Test"
if mode == "Train":
now = datetime.datetime.now()
dt_string = now.strftime("%Y%m%d_%H%M%S")
ENV_NAME = "Beer_Game_Stocastic_DQN"
print('Training model....')
Full_Hist = dqn.fit(env, nb_steps=1e5, visualize=False, verbose=2)
Training_History = Full_Hist.history
#wieght_filename = f'dqn_{ENV_NAME}_{dt_string}_weights.h5f'
wieght_filename = 'dqn_test_fit.weights'
model_filename='dqn_test_fit_wide'
model_filename = 'dqn_test_fit'
#model_filename = 'Entity3Test'
dqn.save_weights(wieght_filename, overwrite=True)
dqn.model.save(model_filename, overwrite=True)
print('Training completed! Testing and printing Average Episodic Rewards')
dqn.test(env, nb_episodes=10, visualize=False)
avg_reward_list = Training_History['episode_reward']
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.title("Avg. Episodic Reward")
plt.show()
if mode == "Test":
### Load a saved and trained model
#wieght_filename = "ddpg_Beer_Game_Stocastic_DQN_20210614_115704_weights.h5f"
wieght_filename = 'dqn_test_fit.weights'
model_filename = 'dqn_test_fit.model'
#Trained_DQN = tf.saved_model.load(model_filename)
#dqn.load_weights(wieght_filename)
#test_out = dqn.test(env,nb_episodes=10, visualize=False)
agent = tf.keras.models.load_model(model_filename)
#Get the implied window size used when originally training the loaded model:
model_input_shape = (agent.get_layer(index=0).output_shape)[0] #Get the shape attribute from the input layer
Original_Batch_Size = model_input_shape[0] #First number is the number of items looked as simulateously
Original_Window_Size = model_input_shape[1] #Second number is the window used for any sequential memory
Original_Observation_Size = model_input_shape[2] #Third number and (and onwards for multi dimensional inputs) is the actual observed space
sub_mode = "Full"
sub_mode = "Single"
if sub_mode == "Single":
###Test for single observation:
#Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
# Agent_Recent_Order, period, AI_Entity_Index])
#Note: need to muliply by window length!
obs = np.array([4, 0, 5,
4, 10, 2])
#Extract the order received from the observations set for use in relative ordering
Agent_Order_Received = obs[1]
#Expand initial observation out to fill history or window length
obs = np.tile(obs,(Original_Window_Size,1))
#Coerce the 1-D observation input into a 3-D array that TensorFlow will flattend and accept
resized_obs = obs[np.newaxis, ...]
qmatrix = agent.predict(resized_obs)
flattened_q = np.ndarray.flatten(qmatrix)
BestChoice = np.argmax(flattened_q)
Relative_Order = BestChoice + env.Relative_Min_Order # + 1 double check this plus one here...
Agent_Order = max(0, Agent_Order_Received + Relative_Order)
print("Agent Order:")
print(Agent_Order)
if sub_mode == "Full":
horizon = 120
reset_list = reset(horizon=horizon)
locals().update(reset_list)
Parameter_df = {"theta": [0.36] * 4,
"alpha_s": [0.26] * 4,
"beta": [0.34] * 4,
"S_prime": [17] * 4}
Holding_Cost = 0.50
Backorder_Cost = 1.00
AI_Entity = False
AI_Entity_Index = False
AI_Order = False
Integer_Ordering = True
Noisey_Ordering = False
for t in range(0, (horizon)):
if t >= 20:
t = t
# NESTED FUNCTION TO RUN THE GAME FOR ONE TIME STEP AND RETURN THE NEW STATE
BeerGame_output = PirateBeerGame_funct(AI_Entity_Index=AI_Entity_Index, AI_Order=AI_Order,
Orders=Orders[t],
Order_flows=Order_flows,
Shipping_flows=Shipping_flows, OH_Inventory=OH_Inventory,
Backorder=Backorder, L_hat=L_hat,
Production_Request=Production_Request, AI_Entity=AI_Entity,
Noisey_Ordering=Noisey_Ordering,
Integer_Ordering=Integer_Ordering,
Parameter_df=Parameter_df)
locals().update(BeerGame_output)
# Write values for analysis/plotting later
Order_History[:, t] = Entity_Orders
OH_Inventory_History[:, t] = OH_Inventory
Backlog_History[:, t] = Backorder
Service_rate[t] = Final_Customer_Orders_Filled / Orders[t]
# Calculate costs
Net_Inventory = OH_Inventory_History - Backlog_History
Costs_Per_Period = OH_Inventory_History * Holding_Cost + Backlog_History * Backorder_Cost
Total_Costs_Per_Entity = np.sum(Costs_Per_Period, 1)
Total_Team_Costs = sum(Total_Costs_Per_Entity)
print(Order_History)
print(Total_Team_Costs)
###GRAPHS###
import matplotlib.pyplot as plt
x = range(0, horizon)
plt.figure(1)
PlotObj = plt.plot(Order_History.T)
plt.title('Orders per Period')
plt.xlabel('Time')
plt.ylabel('Orders')
# showing legend
plt.legend(iter(PlotObj), ('0: Retailer', '1: Wholesaler', '2: Distributor', '3: Factory'))
plt.figure(2)
PlotObj = plt.plot(Net_Inventory.T)
plt.title('Net Inventory per Period')
plt.xlabel('Time')
plt.ylabel('Net Inventory (On-Hand less Backlog)')
# showing legend
plt.legend(iter(PlotObj), ('0: Retailer', '1: Wholesaler', '2: Distributor', '3: Factory'))
plt.show()
| [
"matplotlib.pyplot.ylabel",
"rl.agents.dqn.DQNAgent",
"numpy.array",
"tensorflow.keras.models.load_model",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.ndarray.tolist",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"rl.memory.SequentialMemory... | [((25696, 25708), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (25706, 25708), False, 'from keras.models import Sequential, Model\n'), ((26125, 26182), 'rl.memory.SequentialMemory', 'SequentialMemory', ([], {'limit': '(2000)', 'window_length': 'WINDOW_LENGTH'}), '(limit=2000, window_length=WINDOW_LENGTH)\n', (26141, 26182), False, 'from rl.memory import SequentialMemory\n'), ((26196, 26214), 'rl.policy.BoltzmannQPolicy', 'BoltzmannQPolicy', ([], {}), '()\n', (26212, 26214), False, 'from rl.policy import BoltzmannQPolicy, MaxBoltzmannQPolicy, EpsGreedyQPolicy\n'), ((26228, 26249), 'rl.policy.MaxBoltzmannQPolicy', 'MaxBoltzmannQPolicy', ([], {}), '()\n', (26247, 26249), False, 'from rl.policy import BoltzmannQPolicy, MaxBoltzmannQPolicy, EpsGreedyQPolicy\n'), ((26511, 26692), 'rl.agents.dqn.DQNAgent', 'DQNAgent', ([], {'model': 'model', 'nb_actions': 'nb_actions', 'memory': 'memory', 'nb_steps_warmup': '(1000)', 'target_model_update': '(0.01)', 'policy': 'policy', 'enable_dueling_network': '(True)', 'dueling_type': '"""avg"""'}), "(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup\n =1000, target_model_update=0.01, policy=policy, enable_dueling_network=\n True, dueling_type='avg')\n", (26519, 26692), False, 'from rl.agents.dqn import DQNAgent\n'), ((5051, 5092), 'numpy.delete', 'np.delete', (['All_Team_Parameters', '(0)'], {'axis': '(0)'}), '(All_Team_Parameters, 0, axis=0)\n', (5060, 5092), True, 'import numpy as np\n'), ((5164, 5232), 'numpy.asarray', 'np.asarray', (["[[(x or '0') for x in xs] for xs in All_Team_Parameters]"], {}), "([[(x or '0') for x in xs] for xs in All_Team_Parameters])\n", (5174, 5232), True, 'import numpy as np\n'), ((5450, 5494), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 0]'], {}), '(All_Team_Parameters[:, 0])\n', (5467, 5494), True, 'import numpy as np\n'), ((5522, 5566), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 2]'], {}), '(All_Team_Parameters[:, 2])\n', (5539, 5566), True, 'import numpy as np\n'), ((8352, 8376), 'numpy.empty', 'np.empty', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (8360, 8376), True, 'import numpy as np\n'), ((8400, 8422), 'numpy.array', 'np.array', (['OH_Inventory'], {}), '(OH_Inventory)\n', (8408, 8422), True, 'import numpy as np\n'), ((8448, 8472), 'numpy.array', 'np.array', (['Shipping_flows'], {}), '(Shipping_flows)\n', (8456, 8472), True, 'import numpy as np\n'), ((8495, 8516), 'numpy.array', 'np.array', (['Order_flows'], {}), '(Order_flows)\n', (8503, 8516), True, 'import numpy as np\n'), ((10556, 10601), 'numpy.maximum', 'np.maximum', (['(0)', '(Backorder + Inventory_Shortage)'], {}), '(0, Backorder + Inventory_Shortage)\n', (10566, 10601), True, 'import numpy as np\n'), ((10622, 10644), 'numpy.copy', 'np.copy', (['New_Backorder'], {}), '(New_Backorder)\n', (10629, 10644), True, 'import numpy as np\n'), ((10776, 10802), 'numpy.copy', 'np.copy', (['Order_flows[:, 0]'], {}), '(Order_flows[:, 0])\n', (10783, 10802), True, 'import numpy as np\n'), ((16581, 16634), 'numpy.full', 'np.full', (['[4, 2]', 'self.Initial_OrderFlows'], {'dtype': 'float'}), '([4, 2], self.Initial_OrderFlows, dtype=float)\n', (16588, 16634), True, 'import numpy as np\n'), ((16665, 16718), 'numpy.full', 'np.full', (['[4, 2]', 'self.Initial_OrderFlows'], {'dtype': 'float'}), '([4, 2], self.Initial_OrderFlows, dtype=float)\n', (16672, 16718), True, 'import numpy as np\n'), ((16949, 16991), 'numpy.full', 'np.full', (['[4, self.horizon]', '(0)'], {'dtype': 'float'}), '([4, self.horizon], 0, dtype=float)\n', (16956, 16991), True, 'import numpy as np\n'), ((17075, 17117), 'numpy.full', 'np.full', (['[4, self.horizon]', '(0)'], {'dtype': 'float'}), '([4, self.horizon], 0, dtype=float)\n', (17082, 17117), True, 'import numpy as np\n'), ((17149, 17191), 'numpy.full', 'np.full', (['[4, self.horizon]', '(0)'], {'dtype': 'float'}), '([4, self.horizon], 0, dtype=float)\n', (17156, 17191), True, 'import numpy as np\n'), ((19219, 19337), 'numpy.array', 'np.array', (['[Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,\n Agent_Recent_Order, period, AI_Entity_Index]'], {}), '([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,\n Agent_Recent_Order, period, AI_Entity_Index])\n', (19227, 19337), True, 'import numpy as np\n'), ((24511, 24629), 'numpy.array', 'np.array', (['[Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,\n Agent_Recent_Order, period, AI_Entity_Index]'], {}), '([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,\n Agent_Recent_Order, period, AI_Entity_Index])\n', (24519, 24629), True, 'import numpy as np\n'), ((25368, 25395), 'numpy.random.seed', 'np.random.seed', (['Random_Seed'], {}), '(Random_Seed)\n', (25382, 25395), True, 'import numpy as np\n'), ((25404, 25428), 'random.seed', 'random.seed', (['Random_Seed'], {}), '(Random_Seed)\n', (25415, 25428), False, 'import random\n'), ((25437, 25468), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['Random_Seed'], {}), '(Random_Seed)\n', (25455, 25468), True, 'import tensorflow as tf\n'), ((25723, 25790), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '((WINDOW_LENGTH,) + env.observation_space.shape)'}), '(input_shape=(WINDOW_LENGTH,) + env.observation_space.shape)\n', (25730, 25790), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((25808, 25818), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (25813, 25818), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((25834, 25852), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25844, 25852), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((25868, 25878), 'keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (25873, 25878), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((25894, 25912), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25904, 25912), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((25928, 25937), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (25933, 25937), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((25953, 25971), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25963, 25971), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((25987, 26004), 'keras.layers.Dense', 'Dense', (['nb_actions'], {}), '(nb_actions)\n', (25992, 26004), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((26020, 26040), 'keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (26030, 26040), False, 'from keras.layers import Dense, Activation, Flatten, Input, Concatenate\n'), ((26718, 26733), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (26722, 26733), False, 'from keras.optimizers import Adam\n'), ((26828, 26851), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26849, 26851), False, 'import datetime\n'), ((27668, 27693), 'matplotlib.pyplot.plot', 'plt.plot', (['avg_reward_list'], {}), '(avg_reward_list)\n', (27676, 27693), True, 'import matplotlib.pyplot as plt\n'), ((27702, 27723), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (27712, 27723), True, 'import matplotlib.pyplot as plt\n'), ((27732, 27765), 'matplotlib.pyplot.title', 'plt.title', (['"""Avg. Episodic Reward"""'], {}), "('Avg. Episodic Reward')\n", (27741, 27765), True, 'import matplotlib.pyplot as plt\n'), ((27774, 27784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27782, 27784), True, 'import matplotlib.pyplot as plt\n'), ((28221, 28263), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_filename'], {}), '(model_filename)\n', (28247, 28263), True, 'import tensorflow as tf\n'), ((865, 894), 'gym.spaces.Discrete', 'spaces.Discrete', (['(MaxOrder + 1)'], {}), '(MaxOrder + 1)\n', (880, 894), False, 'from gym import Env, spaces\n'), ((1689, 1723), 'gym.spaces.Discrete', 'spaces.Discrete', (['Action_Space_Span'], {}), '(Action_Space_Span)\n', (1704, 1723), False, 'from gym import Env, spaces\n'), ((3310, 3336), 'gym.spaces.Box', 'Box', (['(0)', 'np.inf'], {'shape': '(1,)'}), '(0, np.inf, shape=(1,))\n', (3313, 3336), False, 'from gym.spaces import Discrete, Box\n'), ((3382, 3408), 'gym.spaces.Box', 'Box', (['(0)', 'np.inf'], {'shape': '(1,)'}), '(0, np.inf, shape=(1,))\n', (3385, 3408), False, 'from gym.spaces import Discrete, Box\n'), ((3451, 3477), 'gym.spaces.Box', 'Box', (['(0)', 'np.inf'], {'shape': '(1,)'}), '(0, np.inf, shape=(1,))\n', (3454, 3477), False, 'from gym.spaces import Discrete, Box\n'), ((3523, 3549), 'gym.spaces.Box', 'Box', (['(0)', 'np.inf'], {'shape': '(1,)'}), '(0, np.inf, shape=(1,))\n', (3526, 3549), False, 'from gym.spaces import Discrete, Box\n'), ((3583, 3607), 'gym.spaces.Box', 'Box', (['(0)', '(1000)'], {'shape': '(1,)'}), '(0, 1000, shape=(1,))\n', (3586, 3607), False, 'from gym.spaces import Discrete, Box\n'), ((3808, 3829), 'gym.spaces.Box', 'Box', (['(0)', '(3)'], {'shape': '(1,)'}), '(0, 3, shape=(1,))\n', (3811, 3829), False, 'from gym.spaces import Discrete, Box\n'), ((4902, 4933), 'numpy.asarray', 'np.asarray', (['Team_Parameter_Data'], {}), '(Team_Parameter_Data)\n', (4912, 4933), True, 'import numpy as np\n'), ((10026, 10070), 'numpy.minimum', 'np.minimum', (['New_OH_Inventory', 'Incoming_Order'], {}), '(New_OH_Inventory, Incoming_Order)\n', (10036, 10070), True, 'import numpy as np\n'), ((15934, 15984), 'random.randint', 'random.randint', (['self.min_horizon', 'self.max_horizon'], {}), '(self.min_horizon, self.max_horizon)\n', (15948, 15984), False, 'import random\n'), ((16176, 16196), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (16190, 16196), False, 'import random\n'), ((23295, 23322), 'numpy.sum', 'np.sum', (['Costs_Per_Period', '(1)'], {}), '(Costs_Per_Period, 1)\n', (23301, 23322), True, 'import numpy as np\n'), ((29285, 29314), 'numpy.array', 'np.array', (['[4, 0, 5, 4, 10, 2]'], {}), '([4, 0, 5, 4, 10, 2])\n', (29293, 29314), True, 'import numpy as np\n'), ((29577, 29616), 'numpy.tile', 'np.tile', (['obs', '(Original_Window_Size, 1)'], {}), '(obs, (Original_Window_Size, 1))\n', (29584, 29616), True, 'import numpy as np\n'), ((29843, 29870), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['qmatrix'], {}), '(qmatrix)\n', (29861, 29870), True, 'import numpy as np\n'), ((29896, 29918), 'numpy.argmax', 'np.argmax', (['flattened_q'], {}), '(flattened_q)\n', (29905, 29918), True, 'import numpy as np\n'), ((32356, 32383), 'numpy.sum', 'np.sum', (['Costs_Per_Period', '(1)'], {}), '(Costs_Per_Period, 1)\n', (32362, 32383), True, 'import numpy as np\n'), ((32631, 32644), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (32641, 32644), True, 'import matplotlib.pyplot as plt\n'), ((32667, 32692), 'matplotlib.pyplot.plot', 'plt.plot', (['Order_History.T'], {}), '(Order_History.T)\n', (32675, 32692), True, 'import matplotlib.pyplot as plt\n'), ((32705, 32735), 'matplotlib.pyplot.title', 'plt.title', (['"""Orders per Period"""'], {}), "('Orders per Period')\n", (32714, 32735), True, 'import matplotlib.pyplot as plt\n'), ((32748, 32766), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (32758, 32766), True, 'import matplotlib.pyplot as plt\n'), ((32779, 32799), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Orders"""'], {}), "('Orders')\n", (32789, 32799), True, 'import matplotlib.pyplot as plt\n'), ((32946, 32959), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (32956, 32959), True, 'import matplotlib.pyplot as plt\n'), ((32982, 33007), 'matplotlib.pyplot.plot', 'plt.plot', (['Net_Inventory.T'], {}), '(Net_Inventory.T)\n', (32990, 33007), True, 'import matplotlib.pyplot as plt\n'), ((33020, 33057), 'matplotlib.pyplot.title', 'plt.title', (['"""Net Inventory per Period"""'], {}), "('Net Inventory per Period')\n", (33029, 33057), True, 'import matplotlib.pyplot as plt\n'), ((33070, 33088), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (33080, 33088), True, 'import matplotlib.pyplot as plt\n'), ((33101, 33151), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Net Inventory (On-Hand less Backlog)"""'], {}), "('Net Inventory (On-Hand less Backlog)')\n", (33111, 33151), True, 'import matplotlib.pyplot as plt\n'), ((33298, 33308), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33306, 33308), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1488), 'numpy.sign', 'np.sign', (['Relative_Max_Order'], {}), '(Relative_Max_Order)\n', (1468, 1488), True, 'import numpy as np\n'), ((4418, 4446), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (4426, 4446), True, 'import numpy as np\n'), ((4484, 4535), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf, np.inf, 1000, 3]'], {}), '([np.inf, np.inf, np.inf, np.inf, 1000, 3])\n', (4492, 4535), True, 'import numpy as np\n'), ((4847, 4866), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (4857, 4866), False, 'import csv\n'), ((5379, 5423), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 1]'], {}), '(All_Team_Parameters[:, 1])\n', (5396, 5423), True, 'import numpy as np\n'), ((5618, 5662), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 3]'], {}), '(All_Team_Parameters[:, 3])\n', (5635, 5662), True, 'import numpy as np\n'), ((5711, 5755), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 4]'], {}), '(All_Team_Parameters[:, 4])\n', (5728, 5755), True, 'import numpy as np\n'), ((5804, 5848), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 5]'], {}), '(All_Team_Parameters[:, 5])\n', (5821, 5848), True, 'import numpy as np\n'), ((5896, 5940), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 6]'], {}), '(All_Team_Parameters[:, 6])\n', (5913, 5940), True, 'import numpy as np\n'), ((5991, 6035), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['All_Team_Parameters[:, 7]'], {}), '(All_Team_Parameters[:, 7])\n', (6008, 6035), True, 'import numpy as np\n'), ((6292, 6319), 'numpy.asarray', 'np.asarray', (['self.Team_Index'], {}), '(self.Team_Index)\n', (6302, 6319), True, 'import numpy as np\n'), ((6462, 6485), 'numpy.asarray', 'np.asarray', (['self.thetas'], {}), '(self.thetas)\n', (6472, 6485), True, 'import numpy as np\n'), ((6534, 6557), 'numpy.asarray', 'np.asarray', (['self.alphas'], {}), '(self.alphas)\n', (6544, 6557), True, 'import numpy as np\n'), ((6605, 6627), 'numpy.asarray', 'np.asarray', (['self.betas'], {}), '(self.betas)\n', (6615, 6627), True, 'import numpy as np\n'), ((6678, 6703), 'numpy.asarray', 'np.asarray', (['self.S_primes'], {}), '(self.S_primes)\n', (6688, 6703), True, 'import numpy as np\n'), ((6876, 6911), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Match_Team_Theta'], {}), '(Match_Team_Theta)\n', (6893, 6911), True, 'import numpy as np\n'), ((6963, 6998), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Match_Team_Alpha'], {}), '(Match_Team_Alpha)\n', (6980, 6998), True, 'import numpy as np\n'), ((7047, 7081), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Match_Team_Beta'], {}), '(Match_Team_Beta)\n', (7064, 7081), True, 'import numpy as np\n'), ((7133, 7170), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Match_Team_S_prime'], {}), '(Match_Team_S_prime)\n', (7150, 7170), True, 'import numpy as np\n'), ((12255, 12293), 'numpy.random.normal', 'np.random.normal', (['Noise_Mean', 'Noise_SD'], {}), '(Noise_Mean, Noise_SD)\n', (12271, 12293), True, 'import numpy as np\n'), ((13449, 13474), 'numpy.round', 'np.round', (['Order_Placed', '(0)'], {}), '(Order_Placed, 0)\n', (13457, 13474), True, 'import numpy as np\n'), ((14893, 14920), 'numpy.asarray', 'np.asarray', (['self.Team_Index'], {}), '(self.Team_Index)\n', (14903, 14920), True, 'import numpy as np\n'), ((15061, 15084), 'numpy.asarray', 'np.asarray', (['self.thetas'], {}), '(self.thetas)\n', (15071, 15084), True, 'import numpy as np\n'), ((15131, 15154), 'numpy.asarray', 'np.asarray', (['self.alphas'], {}), '(self.alphas)\n', (15141, 15154), True, 'import numpy as np\n'), ((15200, 15222), 'numpy.asarray', 'np.asarray', (['self.betas'], {}), '(self.betas)\n', (15210, 15222), True, 'import numpy as np\n'), ((15271, 15296), 'numpy.asarray', 'np.asarray', (['self.S_primes'], {}), '(self.S_primes)\n', (15281, 15296), True, 'import numpy as np\n'), ((15467, 15501), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Rand_Team_Theta'], {}), '(Rand_Team_Theta)\n', (15484, 15501), True, 'import numpy as np\n'), ((15552, 15586), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Rand_Team_Alpha'], {}), '(Rand_Team_Alpha)\n', (15569, 15586), True, 'import numpy as np\n'), ((15634, 15667), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Rand_Team_Beta'], {}), '(Rand_Team_Beta)\n', (15651, 15667), True, 'import numpy as np\n'), ((15718, 15754), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['Rand_Team_S_prime'], {}), '(Rand_Team_S_prime)\n', (15735, 15754), True, 'import numpy as np\n'), ((1376, 1403), 'numpy.sign', 'np.sign', (['Relative_Min_Order'], {}), '(Relative_Min_Order)\n', (1383, 1403), True, 'import numpy as np\n')] |
import numpy as np
import scipy
import matcompat
# if available import pylab (from matlibplot)
try:
import matplotlib.pylab as plt
except ImportError:
pass
def aveknt(varargin):
# Local Variables: ndim, nrb, onedim, idim, varargin, knt_aux, knt, pts, order
# Function calls: false, nargin, reshape, sum, isfield, cell, aveknt, zeros, numel, error, iscell, repmat, true
#% AVEKNT: compute the knot averages (Greville points) of a knot vector
#%
#% Calling Sequence:
#%
#% pts = aveknt (knt, p)
#% pts = aveknt (nrb)
#%
#% INPUT:
#%
#% knt - knot sequence
#% p - spline order (degree + 1)
#% nrb - NURBS structure (see nrbmak)
#%
#% OUTPUT:
#%
#% pts - average knots. If the input is a NURBS, it gives a cell-array,
#% with the average knots in each direction
#%
#% See also:
#%
#% Copyright (C) 2016 <NAME>
#%
#% This program is free software: you can redistribute it and/or modify
#% it under the terms of the GNU General Public License as published by
#% the Free Software Foundation, either version 3 of the License, or
#% (at your option) any later version.
#% This program is distributed in the hope that it will be useful,
#% but WITHOUT ANY WARRANTY; without even the implied warranty of
#% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#% GNU General Public License for more details.
#%
#% You should have received a copy of the GNU General Public License
#% along with this program. If not, see <http://www.gnu.org/licenses/>.
if nargin == 1.:
if isfield(varargin.cell[0], 'form'):
nrb = varargin.cell[0]
knt = nrb.knots
order = nrb.order
else:
matcompat.error('The input should be a NURBS structure, or a knot vector and the order. See the help for details')
elif nargin == 2.:
knt = varargin.cell[0]
order = varargin.cell[1]
else:
matcompat.error('The input should be a NURBS structure, or a knot vector and the order. See the help for details')
onedim = false
if not iscell(knt):
knt = cellarray(np.hstack((knt)))
onedim = true
ndim = numel(knt)
pts = cell(ndim, 1.)
for idim in np.arange(1., (ndim)+1):
if numel(knt.cell[int(idim)-1])<order[int(idim)-1]+1.:
matcompat.error('The knot vector must contain at least p+2 knots, with p the degree')
knt_aux = matcompat.repmat(knt.cell[int(idim)-1,1:0-1.](), 1., (order[int(idim)-1]-1.))
knt_aux = np.array(np.vstack((np.hstack((knt_aux.flatten(1))), np.hstack((np.zeros((order[int(idim)-1]-1.), 1.))))))
knt_aux = np.reshape(knt_aux, np.array([]), (order[int(idim)-1]-1.))
pts.cell[int(idim)-1] = matdiv(np.sum(knt_aux.T, 1.), order[int(idim)-1]-1.)
pts.cell[int(idim)-1] = pts.cell[int(idim)-1,0:0-order[int(idim)-1]+1.]()
if onedim:
pts = pts.cell[0]
#%!test
#%! knt = [0 0 0 0.5 1 1 1];
#%! pts = aveknt (knt, 3);
#%! assert (pts - [0 1/4 3/4 1] < 1e-14)
#%!
#%!test
#%! knt = {[0 0 0 0.5 1 1 1] [0 0 0 0 1/3 2/3 1 1 1 1]};
#%! pts = aveknt (knt, [3 4]);
#%! assert (pts{1} - [0 1/4 3/4 1] < 1e-14);
#%! assert (pts{2} - [0 1/9 1/3 2/3 8/9 1] < 1e-14);
#%!
#%!test
#%! nrb = nrb4surf([0 0], [1 0], [0 1], [1 1]);
#%! nrb = nrbkntins (nrbdegelev (nrb, [1 2]), {[1/2] [1/3 2/3]});
#%! pts = aveknt (nrb);
#%! assert (pts{1} - [0 1/4 3/4 1] < 1e-14);
#%! assert (pts{2} - [0 1/9 1/3 2/3 8/9 1] < 1e-14);
return [pts] | [
"matcompat.error",
"numpy.hstack",
"numpy.array",
"numpy.sum",
"numpy.arange"
] | [((2437, 2461), 'numpy.arange', 'np.arange', (['(1.0)', '(ndim + 1)'], {}), '(1.0, ndim + 1)\n', (2446, 2461), True, 'import numpy as np\n'), ((1877, 2001), 'matcompat.error', 'matcompat.error', (['"""The input should be a NURBS structure, or a knot vector and the order. See the help for details"""'], {}), "(\n 'The input should be a NURBS structure, or a knot vector and the order. See the help for details'\n )\n", (1892, 2001), False, 'import matcompat\n'), ((2128, 2252), 'matcompat.error', 'matcompat.error', (['"""The input should be a NURBS structure, or a knot vector and the order. See the help for details"""'], {}), "(\n 'The input should be a NURBS structure, or a knot vector and the order. See the help for details'\n )\n", (2143, 2252), False, 'import matcompat\n'), ((2324, 2338), 'numpy.hstack', 'np.hstack', (['knt'], {}), '(knt)\n', (2333, 2338), True, 'import numpy as np\n'), ((2537, 2627), 'matcompat.error', 'matcompat.error', (['"""The knot vector must contain at least p+2 knots, with p the degree"""'], {}), "(\n 'The knot vector must contain at least p+2 knots, with p the degree')\n", (2552, 2627), False, 'import matcompat\n'), ((2900, 2912), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2908, 2912), True, 'import numpy as np\n'), ((2978, 3000), 'numpy.sum', 'np.sum', (['knt_aux.T', '(1.0)'], {}), '(knt_aux.T, 1.0)\n', (2984, 3000), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import gym
import copy
import numpy as np
import random
import torch
from torch import optim
from torch import nn
from torch.distributions import Normal, Distribution
import cherry as ch
from cherry import envs
ACTION_DISCRETISATION = 5
ACTION_NOISE = 0.1
BACKTRACK_COEFF = 0.8
BACKTRACK_ITERS = 10
CONJUGATE_GRADIENT_ITERS = 10
DAMPING_COEFF = 0.1
DISCOUNT = 0.99
EPSILON = 0.05
ENTROPY_WEIGHT = 0.2
HIDDEN_SIZE = 32
KL_LIMIT = 0.05
LEARNING_RATE = 0.001
MAX_STEPS = 100000
ON_POLICY_BATCH_SIZE = 2048
BATCH_SIZE = 128
POLICY_DELAY = 2
POLYAK_FACTOR = 0.995
PPO_CLIP_RATIO = 0.2
PPO_EPOCHS = 20
REPLAY_SIZE = 100000
TARGET_ACTION_NOISE = 0.2
TARGET_ACTION_NOISE_CLIP = 0.5
TARGET_UPDATE_INTERVAL = 2500
TRACE_DECAY = 0.97
UPDATE_INTERVAL = 1
UPDATE_START = 10000
TEST_INTERVAL = 1000
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
def create_target_network(network):
target_network = copy.deepcopy(network)
for param in target_network.parameters():
param.requires_grad = False
return target_network
class TanhNormal(Distribution):
def __init__(self, loc, scale):
super().__init__()
self.normal = Normal(loc, scale)
def sample(self):
return torch.tanh(self.normal.sample())
def rsample(self):
return torch.tanh(self.normal.rsample())
def log_prob(self, value):
inv_value = (torch.log1p(value) - torch.log1p(-value)) / 2.0
return self.normal.log_prob(inv_value) - torch.log1p(-value.pow(2) + 1e-6)
@property
def mean(self):
return torch.tanh(self.normal.mean)
class SoftActor(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.log_std_min, self.log_std_max = -20, 2
layers = [nn.Linear(3, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 2)]
self.policy = nn.Sequential(*layers)
def forward(self, state):
policy_mean, policy_log_std = self.policy(state).chunk(2, dim=1)
policy_log_std = torch.clamp(policy_log_std,
min=self.log_std_min,
max=self.log_std_max)
policy = TanhNormal(policy_mean, policy_log_std.exp())
return policy
class Critic(nn.Module):
def __init__(self, hidden_size, state_action=False, layer_norm=False):
super().__init__()
self.state_action = state_action
layers = [nn.Linear(3 + (1 if state_action else 0), hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 1)]
if layer_norm:
layers = (layers[:1]
+ [nn.LayerNorm(hidden_size)]
+ layers[1:3]
+ [nn.LayerNorm(hidden_size)]
+ layers[3:])
self.value = nn.Sequential(*layers)
def forward(self, state, action=None):
if self.state_action:
value = self.value(torch.cat([state, action], dim=1))
else:
value = self.value(state)
return value.squeeze(dim=1)
def get_random_action(state):
return torch.tensor([[2 * random.random() - 1]])
def main(env='Pendulum-v0'):
env = gym.make(env)
env.seed(SEED)
env = envs.Torch(env)
env = envs.Logger(env)
env = envs.Runner(env)
replay = ch.ExperienceReplay()
actor = SoftActor(HIDDEN_SIZE)
critic_1 = Critic(HIDDEN_SIZE, state_action=True)
critic_2 = Critic(HIDDEN_SIZE, state_action=True)
value_critic = Critic(HIDDEN_SIZE)
target_value_critic = create_target_network(value_critic)
actor_optimiser = optim.Adam(actor.parameters(), lr=LEARNING_RATE)
critics_optimiser = optim.Adam((list(critic_1.parameters())
+ list(critic_2.parameters())),
lr=LEARNING_RATE)
value_critic_optimiser = optim.Adam(value_critic.parameters(),
lr=LEARNING_RATE)
get_action = lambda state: actor(state).sample()
for step in range(1, MAX_STEPS + 1):
with torch.no_grad():
if step < UPDATE_START:
replay += env.run(get_random_action, steps=1)
else:
replay += env.run(get_action, steps=1)
replay = replay[-REPLAY_SIZE:]
if step > UPDATE_START and step % UPDATE_INTERVAL == 0:
sample = random.sample(replay, BATCH_SIZE)
batch = ch.ExperienceReplay(sample)
# Pre-compute some quantities
states = batch.state()
rewards = batch.reward()
old_actions = batch.action()
dones = batch.done()
masses = actor(states)
actions = masses.rsample()
log_probs = masses.log_prob(actions)
q_values = torch.min(critic_1(states, actions.detach()),
critic_2(states, actions.detach())
).view(-1, 1)
# Compute Q losses
v_next = target_value_critic(batch.next_state()).view(-1, 1)
q_old_pred1 = critic_1(states,
old_actions.detach()
).view(-1, 1)
q_old_pred2 = critic_2(states,
old_actions.detach()
).view(-1, 1)
qloss1 = ch.algorithms.sac.action_value_loss(q_old_pred1,
v_next.detach(),
rewards,
dones,
DISCOUNT)
qloss2 = ch.algorithms.sac.action_value_loss(q_old_pred2,
v_next.detach(),
rewards,
dones,
DISCOUNT)
# Update Q-functions by one step of gradient descent
qloss = qloss1 + qloss2
critics_optimiser.zero_grad()
qloss.backward()
critics_optimiser.step()
# Update V-function by one step of gradient descent
v_pred = value_critic(batch.state()).view(-1, 1)
vloss = ch.algorithms.sac.state_value_loss(v_pred,
log_probs.detach(),
q_values.detach(),
alpha=ENTROPY_WEIGHT)
value_critic_optimiser.zero_grad()
vloss.backward()
value_critic_optimiser.step()
# Update policy by one step of gradient ascent
q_actions = critic_1(batch.state(), actions).view(-1, 1)
policy_loss = ch.algorithms.sac.policy_loss(log_probs,
q_actions,
alpha=ENTROPY_WEIGHT)
actor_optimiser.zero_grad()
policy_loss.backward()
actor_optimiser.step()
# Update target value network
ch.models.polyak_average(target_value_critic,
value_critic,
POLYAK_FACTOR)
if __name__ == '__main__':
main()
| [
"torch.nn.Tanh",
"torch.nn.Sequential",
"copy.deepcopy",
"gym.make",
"cherry.algorithms.sac.policy_loss",
"torch.tanh",
"cherry.models.polyak_average",
"torch.nn.LayerNorm",
"cherry.envs.Logger",
"numpy.random.seed",
"random.sample",
"torch.distributions.Normal",
"cherry.envs.Runner",
"che... | [((822, 839), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (833, 839), False, 'import random\n'), ((840, 860), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (854, 860), True, 'import numpy as np\n'), ((861, 884), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (878, 884), False, 'import torch\n'), ((944, 966), 'copy.deepcopy', 'copy.deepcopy', (['network'], {}), '(network)\n', (957, 966), False, 'import copy\n'), ((3594, 3607), 'gym.make', 'gym.make', (['env'], {}), '(env)\n', (3602, 3607), False, 'import gym\n'), ((3637, 3652), 'cherry.envs.Torch', 'envs.Torch', (['env'], {}), '(env)\n', (3647, 3652), False, 'from cherry import envs\n'), ((3663, 3679), 'cherry.envs.Logger', 'envs.Logger', (['env'], {}), '(env)\n', (3674, 3679), False, 'from cherry import envs\n'), ((3690, 3706), 'cherry.envs.Runner', 'envs.Runner', (['env'], {}), '(env)\n', (3701, 3706), False, 'from cherry import envs\n'), ((3720, 3741), 'cherry.ExperienceReplay', 'ch.ExperienceReplay', ([], {}), '()\n', (3739, 3741), True, 'import cherry as ch\n'), ((1194, 1212), 'torch.distributions.Normal', 'Normal', (['loc', 'scale'], {}), '(loc, scale)\n', (1200, 1212), False, 'from torch.distributions import Normal, Distribution\n'), ((1591, 1619), 'torch.tanh', 'torch.tanh', (['self.normal.mean'], {}), '(self.normal.mean)\n', (1601, 1619), False, 'import torch\n'), ((1991, 2013), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2004, 2013), False, 'from torch import nn\n'), ((2143, 2214), 'torch.clamp', 'torch.clamp', (['policy_log_std'], {'min': 'self.log_std_min', 'max': 'self.log_std_max'}), '(policy_log_std, min=self.log_std_min, max=self.log_std_max)\n', (2154, 2214), False, 'import torch\n'), ((3165, 3187), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3178, 3187), False, 'from torch import nn\n'), ((1784, 1809), 'torch.nn.Linear', 'nn.Linear', (['(3)', 'hidden_size'], {}), '(3, hidden_size)\n', (1793, 1809), False, 'from torch import nn\n'), ((1829, 1838), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1836, 1838), False, 'from torch import nn\n'), ((1858, 1893), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1867, 1893), False, 'from torch import nn\n'), ((1913, 1922), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1920, 1922), False, 'from torch import nn\n'), ((1942, 1967), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(2)'], {}), '(hidden_size, 2)\n', (1951, 1967), False, 'from torch import nn\n'), ((2590, 2644), 'torch.nn.Linear', 'nn.Linear', (['(3 + (1 if state_action else 0))', 'hidden_size'], {}), '(3 + (1 if state_action else 0), hidden_size)\n', (2599, 2644), False, 'from torch import nn\n'), ((2672, 2681), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2679, 2681), False, 'from torch import nn\n'), ((2709, 2744), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2718, 2744), False, 'from torch import nn\n'), ((2772, 2781), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2779, 2781), False, 'from torch import nn\n'), ((2809, 2834), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (2818, 2834), False, 'from torch import nn\n'), ((4476, 4491), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4489, 4491), False, 'import torch\n'), ((4789, 4822), 'random.sample', 'random.sample', (['replay', 'BATCH_SIZE'], {}), '(replay, BATCH_SIZE)\n', (4802, 4822), False, 'import random\n'), ((4843, 4870), 'cherry.ExperienceReplay', 'ch.ExperienceReplay', (['sample'], {}), '(sample)\n', (4862, 4870), True, 'import cherry as ch\n'), ((7348, 7421), 'cherry.algorithms.sac.policy_loss', 'ch.algorithms.sac.policy_loss', (['log_probs', 'q_actions'], {'alpha': 'ENTROPY_WEIGHT'}), '(log_probs, q_actions, alpha=ENTROPY_WEIGHT)\n', (7377, 7421), True, 'import cherry as ch\n'), ((7699, 7773), 'cherry.models.polyak_average', 'ch.models.polyak_average', (['target_value_critic', 'value_critic', 'POLYAK_FACTOR'], {}), '(target_value_critic, value_critic, POLYAK_FACTOR)\n', (7723, 7773), True, 'import cherry as ch\n'), ((1410, 1428), 'torch.log1p', 'torch.log1p', (['value'], {}), '(value)\n', (1421, 1428), False, 'import torch\n'), ((1431, 1450), 'torch.log1p', 'torch.log1p', (['(-value)'], {}), '(-value)\n', (1442, 1450), False, 'import torch\n'), ((3317, 3350), 'torch.cat', 'torch.cat', (['[state, action]'], {'dim': '(1)'}), '([state, action], dim=1)\n', (3326, 3350), False, 'import torch\n'), ((3061, 3086), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size'], {}), '(hidden_size)\n', (3073, 3086), False, 'from torch import nn\n'), ((3530, 3545), 'random.random', 'random.random', ([], {}), '()\n', (3543, 3545), False, 'import random\n'), ((2949, 2974), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size'], {}), '(hidden_size)\n', (2961, 2974), False, 'from torch import nn\n')] |
import numpy as np
import pandas as pd
import collections
from . import arrops
from .region import parse_region, regions_add_name_column
_rc = {
'colnames':{
'chrom':'chrom',
'start':'start',
'end':'end'
}
}
def _get_default_colnames():
return _rc['colnames']['chrom'], _rc['colnames']['start'], _rc['colnames']['end']
class update_default_colnames:
def __init__(self, new_colnames):
self._old_colnames = dict(_rc['colnames'])
if isinstance(new_colnames, collections.Iterable):
if len(new_colnames) != 3:
raise ValueError(
'Please, specify new columns using a list of '
'3 strings or a dict!')
(_rc['colnames']['chrom'],
_rc['colnames']['start'],
_rc['colnames']['end']) = new_colnames
elif isinstance(new_colnames, collections.Mapping):
_rc['colnames'].update({k:v for k,v in new_colnames.items()
if k in ['chrom', 'start', 'end']})
else:
raise ValueError(
'Please, specify new columns using a list of '
'3 strings or a dict!')
def __enter__(self):
return self
def __exit__(self, *args):
_rc['colnames'] = self._old_colnames
def _verify_columns(df, colnames):
"""
df: pandas.DataFrame
colnames: list of columns
"""
if not set(colnames).issubset(df.columns):
raise ValueError(
", ".join(set(colnames).difference(set(df.columns)))
+ " not in keys of df.columns"
)
def select(df, region, cols=None):
"""
Return all genomic intervals in a dataframe that overlap
a genomic region.
Parameters
----------
df : pandas.DataFrame
region : UCSC str
The genomic region to select from the dataframe.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df : pandas.DataFrame
"""
ck, sk, ek = _get_default_colnames() if cols is None else cols
chrom, start, end = parse_region(region)
if chrom is None:
raise ValueError("no chromosome detected, check region input")
if (start is not None) and (end is not None):
inds = (
(df.chrom.values == chrom)
& (df.start.values < end)
& (df.end.values > start)
)
else:
inds = df.chrom.values == chrom
return df.iloc[np.where(inds)[0]]
def expand(df, pad, limits=None, side="both", limits_region_col=None, cols=None):
"""
Expand each interval by a given amount.
Parameters
----------
df : pandas.DataFrame
pad : int
The amount by which the intervals are expanded *on each side*.
limits : {str: int} or {str: (int, int)}
The limits of interval expansion. If a single number X if provided,
the expanded intervals are trimmed to fit into (0, X); if a tuple
of numbers is provided (X,Y), the new intervals are trimmed to (X, Y).
side : str
Which side to expand, possible values are "left", "right" and "both".
region_col : str
The column to select the expansion limits for each interval.
If None, then use the chromosome column.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df : pandas.DataFrame
"""
ck, sk, ek = _get_default_colnames() if cols is None else cols
limits_region_col = ck if limits_region_col is None else limits_region_col
if limits:
lower_limits = {}
upper_limits = {}
for k, v in dict(limits).items():
if isinstance(v, (tuple, list, np.ndarray)):
lower_limits[k] = v[0]
upper_limits[k] = v[1]
elif np.isscalar(v):
upper_limits[k] = v
lower_limits[k] = 0
else:
raise ValueError("Unknown limit type: {type(v)}")
if side == "both" or side == "left":
if limits:
df[sk] = np.maximum(
df[limits_region_col].apply(lower_limits.__getitem__, 0),
df[sk].values - pad,
)
else:
df[sk] = df[sk].values - pad
if side == "both" or side == "right":
if limits:
df[ek] = np.minimum(
df[limits_region_col].apply(
upper_limits.__getitem__, np.iinfo(np.int64).max
),
df[ek] + pad,
)
else:
df[ek] = df[ek] + pad
return df
def _overlap_intidxs(
df1, df2, how="left", keep_order=False, cols1=None, cols2=None, on=None
):
"""
Find pairs of overlapping genomic intervals and return the integer
indices of the overlapping intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list or None
Additional shared columns to consider as separate groups
Returns
-------
overlap_ids : numpy.ndarray
The indices of the overlapping genomic intervals in the original
dataframes. The 1st column contains the indices of intervals
from the 1st set, the 2nd column - the indicies from the 2nd set.
"""
# Allow users to specify the names of columns containing the interval coordinates.
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
_verify_columns(df1, [ck1, sk1, ek1])
_verify_columns(df2, [ck2, sk2, ek2])
# Switch to integer indices.
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
# Find overlapping intervals per chromosome.
group_list1 = [ck1]
group_list2 = [ck2]
if on is not None:
if type(on) is not list:
raise ValueError("on=[] must be None or list")
if (ck1 in on) or (ck2 in on):
raise ValueError("on=[] should not contain chromosome colnames")
_verify_columns(df1, on)
_verify_columns(df2, on)
group_list1 += on
group_list2 += on
df1_groups = df1.groupby(group_list1).groups
df2_groups = df2.groupby(group_list2).groups
all_groups = sorted(
set.union(set(df1_groups), set(df2_groups))
) ### breaks if any of the groupby elements are pd.NA...
# all_groups = list(set.union(set(df1_groups), set(df2_groups))) ### disagrees with pyranges order so a test fails...
overlap_intidxs = []
for group_keys in all_groups:
df1_group_idxs = (
df1_groups[group_keys].values
if (group_keys in df1_groups)
else np.array([])
)
df2_group_idxs = (
df2_groups[group_keys].values
if (group_keys in df2_groups)
else np.array([])
)
overlap_intidxs_sub = []
both_groups_nonempty = (df1_group_idxs.size > 0) and (df2_group_idxs.size > 0)
if both_groups_nonempty:
overlap_idxs_loc = arrops.overlap_intervals(
df1[sk1].values[df1_group_idxs],
df1[ek1].values[df1_group_idxs],
df2[sk2].values[df2_group_idxs],
df2[ek2].values[df2_group_idxs],
)
# Convert local per-chromosome indices into the
# indices of the original table.
overlap_intidxs_sub += [
[
df1_group_idxs[overlap_idxs_loc[:, 0]],
df2_group_idxs[overlap_idxs_loc[:, 1]],
]
]
if how in ["outer", "left"] and df1_group_idxs.size > 0:
if both_groups_nonempty:
no_overlap_ids1 = df1_group_idxs[
np.where(
np.bincount(
overlap_idxs_loc[:, 0], minlength=len(df1_group_idxs)
)
== 0
)[0]
]
else:
no_overlap_ids1 = df1_group_idxs
overlap_intidxs_sub += [
[no_overlap_ids1, -1 * np.ones_like(no_overlap_ids1),]
]
if how in ["outer", "right"] and df2_group_idxs.size > 0:
if both_groups_nonempty:
no_overlap_ids2 = df2_group_idxs[
np.where(
np.bincount(
overlap_idxs_loc[:, 1], minlength=len(df2_group_idxs)
)
== 0
)[0]
]
else:
no_overlap_ids2 = df2_group_idxs
overlap_intidxs_sub += [
[-1 * np.ones_like(no_overlap_ids2), no_overlap_ids2,]
]
if overlap_intidxs_sub:
overlap_intidxs.append(
np.block(
[
[idxs[:, None] for idxs in idxs_pair]
for idxs_pair in overlap_intidxs_sub
]
)
)
if len(overlap_intidxs) == 0:
return np.ndarray(shape=(0, 2), dtype=np.int)
overlap_intidxs = np.vstack(overlap_intidxs)
if keep_order:
order = np.lexsort([overlap_intidxs[:, 1], overlap_intidxs[:, 0]])
overlap_intidxs = overlap_intidxs[order]
return overlap_intidxs
def overlap(
df1,
df2,
how="left",
return_input=True,
return_index=False,
return_overlap=False,
suffixes=("_1", "_2"),
keep_order=False,
cols1=None,
cols2=None,
on=None,
):
"""
Find pairs of overlapping genomic intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
return_input : bool
If True, return columns from input dfs. Default True.
return_index : bool
If True, return indicies of overlapping pairs. Default False.
return_overlap
If True, return overlapping intervals for the overlapping pairs. Default False.
suffixes : (str, str)
The suffixes for the columns of the two overlapped sets.
keep_order : bool
<< to be documented >>
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list
List of column names to perform clustering on indepdendently, passed as an argument
to df.groupby when considering overlaps. Default is ['chrom'], which must match the first name
from cols. Examples for additional columns include 'strand'.
Returns
-------
df_overlap : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
overlap_df_idxs = _overlap_intidxs(
df1, df2, how=how, cols1=cols1, cols2=cols2, keep_order=keep_order, on=on,
)
# Generate output tables.
df_index_1 = None
df_index_2 = None
if return_index:
index_col = return_index if isinstance(return_index, str) else "index"
df_index_1 = pd.DataFrame(
{index_col + suffixes[0]: df1.index[overlap_df_idxs[:, 0]]}
)
df_index_2 = pd.DataFrame(
{index_col + suffixes[1]: df2.index[overlap_df_idxs[:, 1]]}
)
df_overlap = None
if return_overlap:
overlap_col = return_overlap if isinstance(return_overlap, str) else "overlap"
overlap_start = np.maximum(
df1[sk1].values[overlap_df_idxs[:, 0]],
df2[sk2].values[overlap_df_idxs[:, 1]],
)
overlap_end = np.minimum(
df1[ek1].values[overlap_df_idxs[:, 0]],
df2[ek2].values[overlap_df_idxs[:, 1]],
)
df_overlap = pd.DataFrame(
{
overlap_col + "_" + sk1: overlap_start,
overlap_col + "_" + ek1: overlap_end,
}
)
df_input_1 = None
df_input_2 = None
if return_input is True or str(return_input) == "1" or return_input == "left":
df_input_1 = df1.iloc[overlap_df_idxs[:, 0]].reset_index(drop=True)
df_input_1.columns = [c + suffixes[0] for c in df_input_1.columns]
if return_input is True or str(return_input) == "2" or return_input == "right":
df_input_2 = df2.iloc[overlap_df_idxs[:, 1]].reset_index(drop=True)
df_input_2.columns = [c + suffixes[1] for c in df_input_2.columns]
# Masking non-overlapping regions if using non-inner joins.
if how != "inner":
if df_input_1 is not None:
df_input_1[overlap_df_idxs[:, 0] == -1] = pd.NA
if df_input_2 is not None:
df_input_2[overlap_df_idxs[:, 1] == -1] = pd.NA
if df_index_1 is not None:
df_index_1[overlap_df_idxs[:, 0] == -1] = pd.NA
if df_index_2 is not None:
df_index_2[overlap_df_idxs[:, 1] == -1] = pd.NA
if df_overlap is not None:
df_overlap[
(overlap_df_idxs[:, 0] == -1) | (overlap_df_idxs[:, 1] == -1)
] = pd.NA
out_df = pd.concat(
[df_index_1, df_input_1, df_index_2, df_input_2, df_overlap], axis="columns"
)
return out_df
def cluster(
df,
min_dist=0,
cols=None,
on=None,
return_input=True,
return_cluster_ids=True,
return_cluster_intervals=True,
):
"""
Cluster overlapping intervals.
Parameters
----------
df : pandas.DataFrame
min_dist : float or None
If provided, cluster intervals separated by this distance or less.
If None, do not cluster non-overlapping intervals. Using
min_dist=0 and min_dist=None will bring different results.
bioframe uses semi-open intervals, so interval pairs [0,1) and [1,2)
do not overlap, but are separated by a distance of 0. Adjacent intervals
are not clustered when min_dist=None, but are clustered when min_dist=0.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
on : None or list
List of column names to perform clustering on indepdendently, passed as an argument
to df.groupby before clustering. Default is None. An example use would be on=['strand'].
return_input : bool
If True, return input
return_cluster_ids : bool
If True, return ids for clusters
return_cluster_invervals : bool
If True, return clustered interval the original interval belongs to
return_cluster_df : bool
If True, return df_clusters
Returns
-------
df_clustered : pd.DataFrame
"""
if min_dist is not None:
if min_dist < 0:
raise ValueError("min_dist>=0 currently required")
# Allow users to specify the names of columns containing the interval coordinates.
ck, sk, ek = _get_default_colnames() if cols is None else cols
_verify_columns(df, [ck, sk, ek])
# Switch to integer indices.
df_index = df.index
df = df.reset_index(drop=True)
# Find overlapping intervals for groups specified by ck1 and on=[] (default on=None)
group_list = [ck]
if on is not None:
if type(on) is not list:
raise ValueError("on=[] must be None or list")
if ck in on:
raise ValueError("on=[] should not contain chromosome colnames")
_verify_columns(df, on)
group_list += on
df_groups = df.groupby(group_list).groups
cluster_ids = np.full(df.shape[0], -1)
clusters = []
max_cluster_id = -1
for group_keys, df_group_idxs in df_groups.items():
if df_group_idxs.empty:
continue
df_group = df.loc[df_group_idxs]
(
cluster_ids_group,
cluster_starts_group,
cluster_ends_group,
) = arrops.merge_intervals(
df_group[sk].values, df_group[ek].values, min_dist=min_dist
)
interval_counts = np.bincount(cluster_ids_group)
cluster_ids_group += max_cluster_id + 1
n_clusters = cluster_starts_group.shape[0]
max_cluster_id += n_clusters
cluster_ids[df_group_idxs.values] = cluster_ids_group
## Storing chromosome names causes a 2x slowdown. :(
if type(group_keys) is str:
group_keys = tuple((group_keys,))
clusters_group = {}
for col in group_list:
clusters_group[col] = pd.Series(
data=np.full(n_clusters, group_keys[group_list.index(col)]),
dtype=df[col].dtype,
)
clusters_group[sk] = cluster_starts_group
clusters_group[ek] = cluster_ends_group
clusters_group["n_intervals"] = interval_counts
clusters_group = pd.DataFrame(clusters_group)
clusters.append(clusters_group)
assert np.all(cluster_ids >= 0)
clusters = pd.concat(clusters).reset_index(drop=True)
# reorder cluster columns to have chrom,start,end first
clusters_names = list(clusters.keys())
clusters = clusters[
[ck, sk, ek] + [col for col in clusters_names if col not in [ck, sk, ek]]
]
out_df = {}
if return_cluster_ids:
out_df["cluster"] = cluster_ids
if return_cluster_intervals:
out_df["cluster_start"] = clusters[sk].values[cluster_ids]
out_df["cluster_end"] = clusters[ek].values[cluster_ids]
out_df = pd.DataFrame(out_df)
if return_input:
out_df = pd.concat([df, out_df], axis="columns")
out_df.set_index(df_index)
return out_df
def merge(df, min_dist=0, cols=None, on=None):
"""
Merge overlapping intervals.
Parameters
----------
df : pandas.DataFrame
min_dist : float or None
If provided, merge intervals separated by this distance or less.
If None, do not merge non-overlapping intervals. Using
min_dist=0 and min_dist=None will bring different results.
bioframe uses semi-open intervals, so interval pairs [0,1) and [1,2)
do not overlap, but are separated by a distance of 0. Adjacent intervals
are not merged when min_dist=None, but are merged when min_dist=0.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
on : list
List of column names to consider separately for merging, passed as an argument
to df.groupby before merging. Default is ['chrom'], which must match the first name
from cols. Examples for additional columns include 'strand'.
Returns
-------
df_merged : pandas.DataFrame
A pandas dataframe with coordinates of merged clusters.
"""
if min_dist is not None:
if min_dist < 0:
raise ValueError("min_dist>=0 currently required")
# Allow users to specify the names of columns containing the interval coordinates.
ck, sk, ek = _get_default_colnames() if cols is None else cols
_verify_columns(df, [ck, sk, ek])
# Find overlapping intervals for groups specified by on=[] (default on=None)
group_list = [ck]
if on is not None:
if type(on) is not list:
raise ValueError("on=[] must be None or list")
if ck in on:
raise ValueError("on=[] should not contain chromosome colnames")
_verify_columns(df, on)
group_list += on
df_groups = df.groupby(group_list).groups
clusters = []
for group_keys, df_group_idxs in df_groups.items():
if df_group_idxs.empty:
continue
df_group = df.loc[df_group_idxs]
(
cluster_ids_group,
cluster_starts_group,
cluster_ends_group,
) = arrops.merge_intervals(
df_group[sk].values, df_group[ek].values, min_dist=min_dist
)
interval_counts = np.bincount(cluster_ids_group)
n_clusters = cluster_starts_group.shape[0]
## Storing chromosome names causes a 2x slowdown. :(
if type(group_keys) is str:
group_keys = tuple((group_keys,))
clusters_group = {}
for col in group_list:
clusters_group[col] = pd.Series(
data=np.full(n_clusters, group_keys[group_list.index(col)]),
dtype=df[col].dtype,
)
clusters_group[sk] = cluster_starts_group
clusters_group[ek] = cluster_ends_group
clusters_group["n_intervals"] = interval_counts
clusters_group = pd.DataFrame(clusters_group)
clusters.append(clusters_group)
clusters = pd.concat(clusters).reset_index(drop=True)
# reorder cluster columns to have chrom,start,end first
clusters_names = list(clusters.keys())
clusters = clusters[
[ck, sk, ek] + [col for col in clusters_names if col not in [ck, sk, ek]]
]
return clusters
def complement(df, chromsizes=None, cols=None):
"""
Find genomic regions that are not covered by any interval.
Parameters
----------
df : pandas.DataFrame
chromsizes : dict
cols : (str, str, str)
The names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
Returns
-------
df_complement : numpy.ndarray
"""
# Allow users to specify the names of columns containing the interval coordinates.
ck, sk, ek = _get_default_colnames() if cols is None else cols
infer_chromsizes = (chromsizes is None)
# Find overlapping intervals per chromosome.
df_groups = df.groupby(ck).groups
if infer_chromsizes:
all_groups = sorted(set(df_groups))
else:
if not set(df_groups).issubset(set(chromsizes.keys())):
raise ValueError(
'Chromsizes are missing some chromosomes from the input interval table.')
all_groups = sorted(set(chromsizes.keys()))
complements = []
for group_keys in all_groups:
# this is a stub for potential on argument
chrom = group_keys
if group_keys not in df_groups:
complement_group = {
ck: pd.Series(
data=[chrom],
dtype=df[ck].dtype,
),
sk: 0,
ek: chromsizes[chrom],
}
complements.append(pd.DataFrame(complement_group))
continue
df_group_idxs = df_groups[group_keys].values
df_group = df.loc[df_group_idxs]
if infer_chromsizes:
chromsize = np.iinfo(np.int64).max
else:
chromsize = chromsizes[chrom]
if chromsize < np.max(df_group[ek].values):
raise ValueError("one or more intervals exceed provided chromsize")
(
complement_starts_group,
complement_ends_group,
) = arrops.complement_intervals(
df_group[sk].values, df_group[ek].values, bounds=(0, chromsize),
)
# Storing chromosome names causes a 2x slowdown. :(
complement_group = {
ck: pd.Series(
data=np.full(complement_starts_group.shape[0], chrom),
dtype=df[ck].dtype,
),
sk: complement_starts_group,
ek: complement_ends_group,
}
complement_group = pd.DataFrame(complement_group)
complements.append(complement_group)
complements = pd.concat(complements).reset_index(drop=True)
return complements
def coverage(df1, df2, return_input=True, cols1=None, cols2=None):
"""
Quantify the coverage of intervals from set 1 by intervals from set2. For every interval
in set 1 find the number of base pairs covered by intervals in set 2.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
return_input : bool
If True, return input as well as computed coverage
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_coverage : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
df2_merged = merge(df2, cols=cols2)
overlap_idxs = overlap(
df1,
df2_merged,
how="left",
return_index=True,
return_overlap=True,
cols1=cols1,
cols2=cols2,
)
overlap_idxs["overlap"] = (
overlap_idxs["overlap_end"] - overlap_idxs["overlap_start"]
)
coverage_sparse_df = overlap_idxs.groupby("index_1").agg({"overlap": "sum"})
out_df = {}
out_df["coverage"] = (
pd.Series(np.zeros_like(df1[sk1]), index=df1.index)
.add(coverage_sparse_df["overlap"], fill_value=0)
.astype(df1[sk1].dtype)
)
out_df = pd.DataFrame(out_df)
if return_input:
out_df = pd.concat([df1, out_df], axis="columns")
return out_df
def _closest_intidxs(
df1,
df2=None,
k=1,
ignore_overlaps=False,
ignore_upstream=False,
ignore_downstream=False,
tie_breaking_col=None,
cols1=None,
cols2=None,
):
"""
For every interval in set 1 find k closest genomic intervals in set2 and
return their integer indices.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
If df2 is None or same object as df1, find closest intervals within the same set.
k_closest : int
The number of closest intervals to report.
cols1, cols2 : (str, str, str)
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
closest_ids : numpy.ndarray
The indices of the overlapping genomic intervals in the original
dataframes. The 1st column contains the indices of intervals
from the 1st set, the 2nd column - the indicies from the 2nd set.
"""
# Allow users to specify the names of columns containing the interval coordinates.
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
self_closest = False
if (df2 is None) or (df2 is df1):
df2 = df1
self_closest = True
# Switch to integer indices.
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
# Find overlapping intervals per chromosome.
df1_groups = df1.groupby(ck1).groups
df2_groups = df2.groupby(ck2).groups
closest_intidxs = []
for group_keys, df1_group_idxs in df1_groups.items():
if group_keys not in df2_groups:
continue
df2_group_idxs = df2_groups[group_keys]
df1_group = df1.loc[df1_group_idxs]
df2_group = df2.loc[df2_group_idxs]
tie_arr = None
if isinstance(tie_breaking_col, str):
tie_arr = df2_group[tie_breaking_col].values
elif callable(tie_breaking_col):
tie_arr = tie_breaking_col(df2_group).values
else:
ValueError(
"tie_breaking_col must be either a column label or "
"f(DataFrame) -> Series"
)
closest_idxs_group = arrops.closest_intervals(
df1_group[sk1].values,
df1_group[ek1].values,
None if self_closest else df2_group[sk2].values,
None if self_closest else df2_group[ek2].values,
k=k,
tie_arr=tie_arr,
ignore_overlaps=ignore_overlaps,
ignore_upstream=ignore_upstream,
ignore_downstream=ignore_downstream,
)
# Convert local per-chromosome indices into the
# indices of the original table.
closest_idxs_group = np.vstack(
[
df1_group_idxs.values[closest_idxs_group[:, 0]],
df2_group_idxs.values[closest_idxs_group[:, 1]],
]
).T
closest_intidxs.append(closest_idxs_group)
if len(closest_intidxs) == 0:
return np.ndarray(shape=(0, 2), dtype=np.int)
closest_intidxs = np.vstack(closest_intidxs)
return closest_intidxs
def closest(
df1,
df2=None,
k=1,
ignore_overlaps=False,
ignore_upstream=False,
ignore_downstream=False,
tie_breaking_col=None,
return_input=True,
return_index=False,
return_distance=True,
return_overlap=False,
suffixes=("_1", "_2"),
cols1=None,
cols2=None,
):
"""
For every interval in set 1 find k closest genomic intervals in set 2.
Note that, unless specified otherwise, overlapping intervals are considered
as closest. When multiple intervals are located at the same distance, the
ones with the lowest index in df2 are chosen.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
If df2 is None, find closest non-identical intervals within the same set.
k : int
The number of closest intervals to report.
ignore_overlaps : bool
If True, return the closest non-overlapping interval.
ignore_upstream : bool
If True, ignore intervals in df2 that are upstream of intervals in df1.
ignore_downstream : bool
If True, ignore intervals in df2 that are downstream of intervals in df1.
tie_breaking_col : str
A column in df2 to use for breaking ties when multiple intervals
are located at the same distance. Intervals with *lower* values will
be selected.
return_input : bool
If True, return input
return_index : bool
If True, return indices
return_distance : bool
If True, return distances. Returns zero for overlaps.
return_overlap = False,
If True, return columns: have_overlap, overlap_start, and overlap_end.
Fills df_closest['overlap_start'] and df['overlap_end'] with pd.NA if non-overlapping.
suffixes : (str, str)
The suffixes for the columns of the two sets.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_closest : pandas.DataFrame
If no intervals found, returns none.
"""
if k < 1:
raise ValueError("k>=1 required")
if df2 is df1:
raise ValueError(
"pass df2=None to find closest non-identical intervals within the same set."
)
# If finding closest within the same set, df2 now has to be set
# to df1, so that the rest of the logic works.
if df2 is None:
df2 = df1
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
closest_df_idxs = _closest_intidxs(
df1,
df2,
k=k,
ignore_overlaps=ignore_overlaps,
ignore_upstream=ignore_upstream,
ignore_downstream=ignore_downstream,
tie_breaking_col=tie_breaking_col,
cols1=cols1,
cols2=cols2,
)
if len(closest_df_idxs) == 0:
return # case of no closest intervals
# Generate output tables.
df_index_1 = None
df_index_2 = None
if return_index:
index_col = return_index if isinstance(return_index, str) else "index"
df_index_1 = pd.DataFrame(
{index_col + suffixes[0]: df1.index[closest_df_idxs[:, 0]]}
)
df_index_2 = pd.DataFrame(
{index_col + suffixes[1]: df2.index[closest_df_idxs[:, 1]]}
)
df_overlap = None
if return_overlap:
overlap_start = np.amax(
np.vstack(
[
df1[sk1].values[closest_df_idxs[:, 0]],
df2[sk2].values[closest_df_idxs[:, 1]],
]
),
axis=0,
)
overlap_end = np.amin(
np.vstack(
[
df1[ek1].values[closest_df_idxs[:, 0]],
df2[ek2].values[closest_df_idxs[:, 1]],
]
),
axis=0,
)
have_overlap = overlap_start < overlap_end
df_overlap = pd.DataFrame({
"have_overlap" : have_overlap,
"overlap_start" : np.where(have_overlap, overlap_start, pd.NA),
"overlap_end": np.where(have_overlap, overlap_end, pd.NA)
})
df_distance = None
if return_distance:
distance_left = np.maximum(
0,
df1[sk1].values[closest_df_idxs[:, 0]]
- df2[ek2].values[closest_df_idxs[:, 1]],
)
distance_right = np.maximum(
0,
df2[sk2].values[closest_df_idxs[:, 1]]
- df1[ek1].values[closest_df_idxs[:, 0]],
)
distance = np.amax(np.vstack([distance_left, distance_right]), axis=0)
df_distance = pd.DataFrame({
"distance" : distance
})
df_input_1 = None
df_input_2 = None
if return_input is True or str(return_input) == "1" or return_input == "left":
df_input_1 = df1.iloc[closest_df_idxs[:, 0]].reset_index(drop=True)
df_input_1.columns = [c + suffixes[0] for c in df_input_1.columns]
if return_input is True or str(return_input) == "2" or return_input == "right":
df_input_2 = df2.iloc[closest_df_idxs[:, 1]].reset_index(drop=True)
df_input_2.columns = [c + suffixes[1] for c in df_input_2.columns]
out_df = pd.concat([
df_index_1,
df_input_1,
df_index_2,
df_input_2,
df_overlap,
df_distance], axis="columns")
return out_df
def subtract(df1, df2, cols1=None, cols2=None):
"""
Generate a new set of genomic intervals by subtracting the second set of genomic intervals from the first.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_subtracted : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
name_updates = {ck1 + "_1": "chrom", "overlap_start": "start", "overlap_end": "end"}
extra_columns_1 = [i for i in list(df1.columns) if i not in [ck1, sk1, ek1]]
for i in extra_columns_1:
name_updates[i + "_1"] = i
### loop over chromosomes, then either return the same or subtracted intervals.
df1_groups = df1.groupby(ck1).groups
df2_groups = df2.groupby(ck2).groups
df_subtracted = []
for group_keys, df1_group_idxs in df1_groups.items():
df1_group = df1.loc[df1_group_idxs]
# if nothing to subtract, add original intervals
if group_keys not in df2_groups:
df_subtracted.append(df1_group)
continue
df2_group_idxs = df2_groups[group_keys]
df2_group = df2.loc[df2_group_idxs]
df_subtracted_group = overlap(
df1_group, complement(df2_group), how="inner", return_overlap=True
)[list(name_updates)]
df_subtracted.append(df_subtracted_group.rename(columns=name_updates))
df_subtracted = pd.concat(df_subtracted)
return df_subtracted
def setdiff(df1, df2, cols1=None, cols2=None, on=None):
"""
Generate a new dataframe of genomic intervals by removing any interval from the
first dataframe that overlaps an interval from the second dataframe.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as DataFrames.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : None or list
Additional column names to perform clustering on indepdendently, passed as an argument
to df.groupby when considering overlaps and must be present in both dataframes.
Examples for additional columns include 'strand'.
Returns
-------
df_setdiff : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
df_overlapped = _overlap_intidxs(
df1, df2, how="inner", cols1=cols1, cols2=cols2, on=on
)
inds_non_overlapped = np.setdiff1d(np.arange(len(df1)), df_overlapped[:, 0])
df_setdiff = df1.iloc[inds_non_overlapped]
return df_setdiff
def split(
df,
points,
cols=None,
cols_points=None,
add_names=False,
suffixes=["_left", "_right"],
):
"""
Generate a new dataframe of genomic intervals by splitting each interval from the
first dataframe that overlaps an interval from the second dataframe.
Parameters
----------
df : pandas.DataFrame
Genomic intervals stored as a DataFrame.
points : pandas.DataFrame or dict
If pandas.DataFrame, a set of genomic positions specified in columns 'chrom', 'pos'.
Names of cols can be overwridden by cols_points.
If dict, mapping of chromosomes to positions.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_split : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols is None else cols
ck2, sk2 = ("chrom", "pos") if cols_points is None else cols_points
name_updates = {ck1 + "_1": "chrom", "overlap_start": "start", "overlap_end": "end"}
if add_names:
name_updates["index_2"] = "index_2"
return_index = True
else:
return_index = False
extra_columns_1 = [i for i in list(df.columns) if i not in [ck1, sk1, ek1]]
for i in extra_columns_1:
name_updates[i + "_1"] = i
if isinstance(points, dict):
points = pd.DataFrame.from_dict(points, orient="index", columns=[sk2])
points.reset_index(inplace=True)
points.rename(columns={"index": "chrom"}, inplace=True)
elif not isinstance(points, pd.DataFrame):
raise ValueError("points must be a dict or pd.Dataframe")
points["start"] = points[sk2]
points["end"] = points[sk2]
df_split = overlap(
df,
complement(points),
how="inner",
cols1=cols,
cols2=(ck2, "start", "end"),
return_overlap=True,
return_index=return_index,
)[list(name_updates)]
df_split.rename(columns=name_updates, inplace=True)
if add_names:
df_split = regions_add_name_column(df_split)
sides = np.mod(df_split["index_2"].values, 2).astype(int) # .astype(str)
df_split["name"] = df_split["name"].values + np.array(suffixes)[sides]
df_split.drop(columns=["index_2"])
return df_split
def count_overlaps(
df1, df2, cols1=None, cols2=None, on=None,
):
"""
Count number of overlapping genomic intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
return_input : bool
If True, return columns from input dfs. Default True.
suffixes : (str, str)
The suffixes for the columns of the two overlapped sets.
keep_order : bool
<< to be documented >>
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list
List of column names to check overlap on indepdendently, passed as an argument
to df.groupby when considering overlaps. Default is None. Examples for additional columns include 'strand'.
Returns
-------
df_counts : pandas.DataFrame
"""
df_counts = overlap(
df1,
df2,
how="left",
return_input=False,
keep_order=True,
return_index=True,
on=on,
cols1=cols1,
cols2=cols2,
)
df_counts = pd.concat(
[
df1,
pd.DataFrame(
df_counts.groupby(["index_1"])["index_2"].count().values,
columns=["count"],
),
],
axis=1,
names=["count"],
)
return df_counts
| [
"numpy.block",
"numpy.iinfo",
"numpy.array",
"numpy.mod",
"numpy.isscalar",
"numpy.where",
"numpy.zeros_like",
"pandas.DataFrame.from_dict",
"numpy.max",
"numpy.vstack",
"pandas.DataFrame",
"numpy.maximum",
"numpy.bincount",
"pandas.Series",
"numpy.ones_like",
"numpy.minimum",
"numpy... | [((9809, 9835), 'numpy.vstack', 'np.vstack', (['overlap_intidxs'], {}), '(overlap_intidxs)\n', (9818, 9835), True, 'import numpy as np\n'), ((13948, 14039), 'pandas.concat', 'pd.concat', (['[df_index_1, df_input_1, df_index_2, df_input_2, df_overlap]'], {'axis': '"""columns"""'}), "([df_index_1, df_input_1, df_index_2, df_input_2, df_overlap],\n axis='columns')\n", (13957, 14039), True, 'import pandas as pd\n'), ((16423, 16447), 'numpy.full', 'np.full', (['df.shape[0]', '(-1)'], {}), '(df.shape[0], -1)\n', (16430, 16447), True, 'import numpy as np\n'), ((17759, 17783), 'numpy.all', 'np.all', (['(cluster_ids >= 0)'], {}), '(cluster_ids >= 0)\n', (17765, 17783), True, 'import numpy as np\n'), ((18321, 18341), 'pandas.DataFrame', 'pd.DataFrame', (['out_df'], {}), '(out_df)\n', (18333, 18341), True, 'import pandas as pd\n'), ((25995, 26015), 'pandas.DataFrame', 'pd.DataFrame', (['out_df'], {}), '(out_df)\n', (26007, 26015), True, 'import pandas as pd\n'), ((29397, 29423), 'numpy.vstack', 'np.vstack', (['closest_intidxs'], {}), '(closest_intidxs)\n', (29406, 29423), True, 'import numpy as np\n'), ((34887, 34991), 'pandas.concat', 'pd.concat', (['[df_index_1, df_input_1, df_index_2, df_input_2, df_overlap, df_distance]'], {'axis': '"""columns"""'}), "([df_index_1, df_input_1, df_index_2, df_input_2, df_overlap,\n df_distance], axis='columns')\n", (34896, 34991), True, 'import pandas as pd\n'), ((36834, 36858), 'pandas.concat', 'pd.concat', (['df_subtracted'], {}), '(df_subtracted)\n', (36843, 36858), True, 'import pandas as pd\n'), ((9748, 9786), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0, 2)', 'dtype': 'np.int'}), '(shape=(0, 2), dtype=np.int)\n', (9758, 9786), True, 'import numpy as np\n'), ((9872, 9930), 'numpy.lexsort', 'np.lexsort', (['[overlap_intidxs[:, 1], overlap_intidxs[:, 0]]'], {}), '([overlap_intidxs[:, 1], overlap_intidxs[:, 0]])\n', (9882, 9930), True, 'import numpy as np\n'), ((11964, 12039), 'pandas.DataFrame', 'pd.DataFrame', (['{(index_col + suffixes[0]): df1.index[overlap_df_idxs[:, 0]]}'], {}), '({(index_col + suffixes[0]): df1.index[overlap_df_idxs[:, 0]]})\n', (11976, 12039), True, 'import pandas as pd\n'), ((12081, 12156), 'pandas.DataFrame', 'pd.DataFrame', (['{(index_col + suffixes[1]): df2.index[overlap_df_idxs[:, 1]]}'], {}), '({(index_col + suffixes[1]): df2.index[overlap_df_idxs[:, 1]]})\n', (12093, 12156), True, 'import pandas as pd\n'), ((12334, 12429), 'numpy.maximum', 'np.maximum', (['df1[sk1].values[overlap_df_idxs[:, 0]]', 'df2[sk2].values[overlap_df_idxs[:, 1]]'], {}), '(df1[sk1].values[overlap_df_idxs[:, 0]], df2[sk2].values[\n overlap_df_idxs[:, 1]])\n', (12344, 12429), True, 'import numpy as np\n'), ((12483, 12578), 'numpy.minimum', 'np.minimum', (['df1[ek1].values[overlap_df_idxs[:, 0]]', 'df2[ek2].values[overlap_df_idxs[:, 1]]'], {}), '(df1[ek1].values[overlap_df_idxs[:, 0]], df2[ek2].values[\n overlap_df_idxs[:, 1]])\n', (12493, 12578), True, 'import numpy as np\n'), ((12631, 12731), 'pandas.DataFrame', 'pd.DataFrame', (["{(overlap_col + '_' + sk1): overlap_start, (overlap_col + '_' + ek1):\n overlap_end}"], {}), "({(overlap_col + '_' + sk1): overlap_start, (overlap_col + '_' +\n ek1): overlap_end})\n", (12643, 12731), True, 'import pandas as pd\n'), ((16893, 16923), 'numpy.bincount', 'np.bincount', (['cluster_ids_group'], {}), '(cluster_ids_group)\n', (16904, 16923), True, 'import numpy as np\n'), ((17678, 17706), 'pandas.DataFrame', 'pd.DataFrame', (['clusters_group'], {}), '(clusters_group)\n', (17690, 17706), True, 'import pandas as pd\n'), ((18381, 18420), 'pandas.concat', 'pd.concat', (['[df, out_df]'], {'axis': '"""columns"""'}), "([df, out_df], axis='columns')\n", (18390, 18420), True, 'import pandas as pd\n'), ((20820, 20850), 'numpy.bincount', 'np.bincount', (['cluster_ids_group'], {}), '(cluster_ids_group)\n', (20831, 20850), True, 'import numpy as np\n'), ((21457, 21485), 'pandas.DataFrame', 'pd.DataFrame', (['clusters_group'], {}), '(clusters_group)\n', (21469, 21485), True, 'import pandas as pd\n'), ((24295, 24325), 'pandas.DataFrame', 'pd.DataFrame', (['complement_group'], {}), '(complement_group)\n', (24307, 24325), True, 'import pandas as pd\n'), ((26055, 26095), 'pandas.concat', 'pd.concat', (['[df1, out_df]'], {'axis': '"""columns"""'}), "([df1, out_df], axis='columns')\n", (26064, 26095), True, 'import pandas as pd\n'), ((29336, 29374), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0, 2)', 'dtype': 'np.int'}), '(shape=(0, 2), dtype=np.int)\n', (29346, 29374), True, 'import numpy as np\n'), ((32757, 32832), 'pandas.DataFrame', 'pd.DataFrame', (['{(index_col + suffixes[0]): df1.index[closest_df_idxs[:, 0]]}'], {}), '({(index_col + suffixes[0]): df1.index[closest_df_idxs[:, 0]]})\n', (32769, 32832), True, 'import pandas as pd\n'), ((32874, 32949), 'pandas.DataFrame', 'pd.DataFrame', (['{(index_col + suffixes[1]): df2.index[closest_df_idxs[:, 1]]}'], {}), '({(index_col + suffixes[1]): df2.index[closest_df_idxs[:, 1]]})\n', (32886, 32949), True, 'import pandas as pd\n'), ((33888, 33987), 'numpy.maximum', 'np.maximum', (['(0)', '(df1[sk1].values[closest_df_idxs[:, 0]] - df2[ek2].values[closest_df_idxs[:,\n 1]])'], {}), '(0, df1[sk1].values[closest_df_idxs[:, 0]] - df2[ek2].values[\n closest_df_idxs[:, 1]])\n', (33898, 33987), True, 'import numpy as np\n'), ((34055, 34154), 'numpy.maximum', 'np.maximum', (['(0)', '(df2[sk2].values[closest_df_idxs[:, 1]] - df1[ek1].values[closest_df_idxs[:,\n 0]])'], {}), '(0, df2[sk2].values[closest_df_idxs[:, 1]] - df1[ek1].values[\n closest_df_idxs[:, 0]])\n', (34065, 34154), True, 'import numpy as np\n'), ((34298, 34334), 'pandas.DataFrame', 'pd.DataFrame', (["{'distance': distance}"], {}), "({'distance': distance})\n", (34310, 34334), True, 'import pandas as pd\n'), ((39700, 39761), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['points'], {'orient': '"""index"""', 'columns': '[sk2]'}), "(points, orient='index', columns=[sk2])\n", (39722, 39761), True, 'import pandas as pd\n'), ((2656, 2670), 'numpy.where', 'np.where', (['inds'], {}), '(inds)\n', (2664, 2670), True, 'import numpy as np\n'), ((7315, 7327), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7323, 7327), True, 'import numpy as np\n'), ((7466, 7478), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7474, 7478), True, 'import numpy as np\n'), ((17799, 17818), 'pandas.concat', 'pd.concat', (['clusters'], {}), '(clusters)\n', (17808, 17818), True, 'import pandas as pd\n'), ((21543, 21562), 'pandas.concat', 'pd.concat', (['clusters'], {}), '(clusters)\n', (21552, 21562), True, 'import pandas as pd\n'), ((23620, 23647), 'numpy.max', 'np.max', (['df_group[ek].values'], {}), '(df_group[ek].values)\n', (23626, 23647), True, 'import numpy as np\n'), ((24391, 24413), 'pandas.concat', 'pd.concat', (['complements'], {}), '(complements)\n', (24400, 24413), True, 'import pandas as pd\n'), ((29053, 29167), 'numpy.vstack', 'np.vstack', (['[df1_group_idxs.values[closest_idxs_group[:, 0]], df2_group_idxs.values[\n closest_idxs_group[:, 1]]]'], {}), '([df1_group_idxs.values[closest_idxs_group[:, 0]], df2_group_idxs.\n values[closest_idxs_group[:, 1]]])\n', (29062, 29167), True, 'import numpy as np\n'), ((33061, 33157), 'numpy.vstack', 'np.vstack', (['[df1[sk1].values[closest_df_idxs[:, 0]], df2[sk2].values[closest_df_idxs[:, 1]]\n ]'], {}), '([df1[sk1].values[closest_df_idxs[:, 0]], df2[sk2].values[\n closest_df_idxs[:, 1]]])\n', (33070, 33157), True, 'import numpy as np\n'), ((33316, 33412), 'numpy.vstack', 'np.vstack', (['[df1[ek1].values[closest_df_idxs[:, 0]], df2[ek2].values[closest_df_idxs[:, 1]]\n ]'], {}), '([df1[ek1].values[closest_df_idxs[:, 0]], df2[ek2].values[\n closest_df_idxs[:, 1]]])\n', (33325, 33412), True, 'import numpy as np\n'), ((34224, 34266), 'numpy.vstack', 'np.vstack', (['[distance_left, distance_right]'], {}), '([distance_left, distance_right])\n', (34233, 34266), True, 'import numpy as np\n'), ((4161, 4175), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (4172, 4175), True, 'import numpy as np\n'), ((9489, 9579), 'numpy.block', 'np.block', (['[[idxs[:, None] for idxs in idxs_pair] for idxs_pair in overlap_intidxs_sub]'], {}), '([[idxs[:, None] for idxs in idxs_pair] for idxs_pair in\n overlap_intidxs_sub])\n', (9497, 9579), True, 'import numpy as np\n'), ((23103, 23146), 'pandas.Series', 'pd.Series', ([], {'data': '[chrom]', 'dtype': 'df[ck].dtype'}), '(data=[chrom], dtype=df[ck].dtype)\n', (23112, 23146), True, 'import pandas as pd\n'), ((23315, 23345), 'pandas.DataFrame', 'pd.DataFrame', (['complement_group'], {}), '(complement_group)\n', (23327, 23345), True, 'import pandas as pd\n'), ((23517, 23535), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (23525, 23535), True, 'import numpy as np\n'), ((33689, 33733), 'numpy.where', 'np.where', (['have_overlap', 'overlap_start', 'pd.NA'], {}), '(have_overlap, overlap_start, pd.NA)\n', (33697, 33733), True, 'import numpy as np\n'), ((33762, 33804), 'numpy.where', 'np.where', (['have_overlap', 'overlap_end', 'pd.NA'], {}), '(have_overlap, overlap_end, pd.NA)\n', (33770, 33804), True, 'import numpy as np\n'), ((40423, 40460), 'numpy.mod', 'np.mod', (["df_split['index_2'].values", '(2)'], {}), "(df_split['index_2'].values, 2)\n", (40429, 40460), True, 'import numpy as np\n'), ((40542, 40560), 'numpy.array', 'np.array', (['suffixes'], {}), '(suffixes)\n', (40550, 40560), True, 'import numpy as np\n'), ((24077, 24125), 'numpy.full', 'np.full', (['complement_starts_group.shape[0]', 'chrom'], {}), '(complement_starts_group.shape[0], chrom)\n', (24084, 24125), True, 'import numpy as np\n'), ((4793, 4811), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (4801, 4811), True, 'import numpy as np\n'), ((8768, 8797), 'numpy.ones_like', 'np.ones_like', (['no_overlap_ids1'], {}), '(no_overlap_ids1)\n', (8780, 8797), True, 'import numpy as np\n'), ((9342, 9371), 'numpy.ones_like', 'np.ones_like', (['no_overlap_ids2'], {}), '(no_overlap_ids2)\n', (9354, 9371), True, 'import numpy as np\n'), ((25844, 25867), 'numpy.zeros_like', 'np.zeros_like', (['df1[sk1]'], {}), '(df1[sk1])\n', (25857, 25867), True, 'import numpy as np\n')] |
"""Base translation model with different variations"""
import os
import shutil
from abc import ABC, abstractmethod
import numpy as np
import tensorflow as tf
class BaseModel(ABC):
"""
This is the base class for the translation model. Child class defines encode and decode
architecture.
Attribures:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
embedding_size: The size of the bottleneck layer which is later used as molecular
descriptor.
encode_vocabulary: Dictonary that maps integers to unique tokens of the
input strings.
decode_vocabulary: Dictonary that maps integers to unique tokens of the
output strings.
encode_voc_size: Number of tokens in encode_vocabulary.
decode_voc_size: Number of tokens in decode_vocabulary.
char_embedding_size: Number of Dimensiones used to encode the one-hot encoded tokens
in a contineous space.
global_step: Counter for steps during training.
save_dir: Path to directory used to save the model and logs.
checkpoint_path: path to the model checkpoint file.
batch_size: Number of samples per training batch.
rand_input_swap: Flag to define if (for SMILES input) the input SMILES should be swapt
randomly between canonical SMILES (usually output sequnce) and random shuffled SMILES
(usually input sequnce).
measures_to_log: Dictonary with values to log.
emb_activation: Activation function used in the bottleneck layer.
lr: Learning rate for training the model.
lr_decay: Boolean to define if learning rate deacay is used.
lr_decay_frequency: Number of steps between learning rate decay steps.
lr_decay_factor: Amount of learning rate decay.
beam_width: Width of the the window used for the beam search decoder.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for base translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
self.mode = mode
self.iterator = iterator
self.embedding_size = hparams.emb_size
self.encode_vocabulary = {
v: k for k, v in np.load(hparams.encode_vocabulary_file, allow_pickle=True).item().items()
}
self.encode_voc_size = len(self.encode_vocabulary)
self.decode_vocabulary = {
v: k for k, v in np.load(hparams.decode_vocabulary_file, allow_pickle=True).item().items()
}
self.decode_vocabulary_reverse = {v: k for k, v in self.decode_vocabulary.items()}
self.decode_voc_size = len(self.decode_vocabulary)
self.one_hot_embedding = hparams.one_hot_embedding
self.char_embedding_size = hparams.char_embedding_size
self.global_step = tf.get_variable('global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False)
self.save_dir = hparams.save_dir
self.checkpoint_path = os.path.join(self.save_dir, 'model.ckpt')
self.batch_size = hparams.batch_size
self.rand_input_swap = hparams.rand_input_swap
self.measures_to_log = {}
if hparams.emb_activation == "tanh":
self.emb_activation = tf.nn.tanh
elif hparams.emb_activation == "linear":
self.emb_activation = lambda x: x
else:
raise ValueError("This activationfunction is not implemented...")
if mode == "TRAIN":
self.lr = hparams.lr
self.lr_decay = hparams.lr_decay
self.lr_decay_frequency = hparams.lr_decay_frequency
self.lr_decay_factor = hparams.lr_decay_factor
if mode == "DECODE":
self.beam_width = hparams.beam_width
if mode not in ["TRAIN", "EVAL", "ENCODE", "DECODE"]:
raise ValueError("Choose one of following modes: TRAIN, EVAL, ENCODE, DECODE")
def build_graph(self):
"""Method that defines the graph for a translation model instance."""
if self.mode in ["TRAIN", "EVAL"]:
with tf.name_scope("Input"):
(self.input_seq,
self.shifted_target_seq,
self.input_len,
self.shifted_target_len,
self.target_mask,
encoder_emb_inp,
decoder_emb_inp) = self._input()
with tf.variable_scope("Encoder"):
encoded_seq = self._encoder(encoder_emb_inp)
with tf.variable_scope("Decoder"):
logits = self._decoder(encoded_seq, decoder_emb_inp)
self.prediction = tf.argmax(logits, axis=2, output_type=tf.int32)
with tf.name_scope("Measures"):
self.loss = self._compute_loss(logits)
self.accuracy = self._compute_accuracy(self.prediction)
self.measures_to_log["loss"] = self.loss
self.measures_to_log["accuracy"] = self.accuracy
if self.mode == "TRAIN":
with tf.name_scope("Training"):
self._training()
if self.mode == "ENCODE":
with tf.name_scope("Input"):
self.input_seq = tf.placeholder(tf.int32, [None, None])
self.input_len = tf.placeholder(tf.int32, [None])
encoder_emb_inp = self._emb_lookup(self.input_seq)
with tf.variable_scope("Encoder"):
self.encoded_seq = self._encoder(encoder_emb_inp)
if self.mode == "DECODE":
if self.one_hot_embedding:
self.decoder_embedding = tf.one_hot(
list(range(0, self.decode_voc_size)),
self.decode_voc_size
)
elif self.encode_vocabulary == self.decode_vocabulary:
self.decoder_embedding = tf.get_variable(
"char_embedding",
[self.decode_voc_size, self.char_embedding_size]
)
else:
self.decoder_embedding = tf.get_variable(
"char_embedding2",
[self.decode_voc_size, self.char_embedding_size]
)
with tf.name_scope("Input"):
self.encoded_seq = tf.placeholder(tf.float32,
[None, self.embedding_size])
self.maximum_iterations = tf.placeholder(tf.int32, [])
self.maximum_iterations = tf.placeholder(tf.int32, [])
with tf.variable_scope("Decoder"):
self.output_ids = self._decoder(self.encoded_seq)
self.saver_op = tf.train.Saver()
def _input(self, with_features=False):
"""Method that defines input part of the graph for a translation model instance.
Args:
with_features: Defines if in addition to input and output sequnce futher
molecular features e.g. logP are expected from the input pipleine iterator.
Returns:
input_seq: The input sequnce.
shifted_target_seq: The target sequnce shifted by one charcater to the left.
input_len: Number of tokens in input.
shifted_target_len: Number of tokens in the shifted target sequence.
target_mask: shifted target sequence with masked padding tokens.
encoder_emb_inp: Embedded input sequnce (contineous character embedding).
decoder_emb_inp: Embedded input sequnce (contineous character embedding).
mol_features: if Arg with_features is set to True, the molecular features of the
input pipleine are passed.
"""
with tf.device('/cpu:0'):
if with_features:
seq1, seq2, seq1_len, seq2_len, mol_features = self.iterator.get_next()
else:
seq1, seq2, seq1_len, seq2_len = self.iterator.get_next()
if self.rand_input_swap:
rand_val = tf.random_uniform([], dtype=tf.float32)
input_seq = tf.cond(tf.greater_equal(rand_val, 0.5),
lambda: seq1, lambda: seq2)
input_len = tf.cond(tf.greater_equal(rand_val, 0.5),
lambda: seq1_len, lambda: seq2_len)
else:
input_seq = seq1
input_len = seq1_len
target_seq = seq2
target_len = seq2_len
shifted_target_len = tf.reshape(target_len, [tf.shape(target_len)[0]]) - 1
shifted_target_seq = tf.slice(target_seq, [0, 1], [-1, -1])
target_mask = tf.sequence_mask(shifted_target_len, dtype=tf.float32)
target_mask = target_mask / tf.reduce_sum(target_mask)
input_len = tf.reshape(input_len, [tf.shape(input_len)[0]])
encoder_emb_inp, decoder_emb_inp = self._emb_lookup(input_seq, target_seq)
if with_features:
return (input_seq, shifted_target_seq, input_len, shifted_target_len,
target_mask, encoder_emb_inp, decoder_emb_inp, mol_features)
else:
return (input_seq, shifted_target_seq, input_len, shifted_target_len,
target_mask, encoder_emb_inp, decoder_emb_inp)
def _emb_lookup(self, input_seq, target_seq=None):
"""Method that performs an embedding lookup to embed the one-hot encoded input
and output sequnce into the trainable contineous character embedding.
Args:
input_seq: The input sequnce.
target_seq: The target sequnce.
Returns:
encoder_emb_inp: Embedded input sequnce (contineous character embedding).
decoder_emb_inp: Embedded input sequnce (contineous character embedding).
"""
if self.one_hot_embedding:
self.encoder_embedding = tf.one_hot(
list(range(0, self.encode_voc_size)),
self.encode_voc_size
)
else:
self.encoder_embedding = tf.get_variable(
"char_embedding",
[self.encode_voc_size, self.char_embedding_size]
)
encoder_emb_inp = tf.nn.embedding_lookup(self.encoder_embedding, input_seq)
if self.mode != "ENCODE":
assert target_seq is not None
if self.encode_vocabulary == self.decode_vocabulary:
self.decoder_embedding = self.encoder_embedding
elif self.one_hot_embedding:
self.decoder_embedding = tf.one_hot(
list(range(0, self.decode_voc_size)),
self.decode_voc_size
)
else:
self.decoder_embedding = tf.get_variable(
"char_embedding2",
[self.decode_voc_size, self.char_embedding_size]
)
decoder_emb_inp = tf.nn.embedding_lookup(self.decoder_embedding, target_seq)
return encoder_emb_inp, decoder_emb_inp
else:
return encoder_emb_inp
def _training(self):
"""Method that defines the training opertaion of the training model's graph."""
if self.lr_decay:
self.lr = tf.train.exponential_decay(self.lr,
self.global_step,
self.lr_decay_frequency,
self.lr_decay_factor,
staircase=True,)
self.opt = tf.train.AdamOptimizer(self.lr, name='optimizer')
grads = self.opt.compute_gradients(self.loss)
grads = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in grads]
self.train_step = self.opt.apply_gradients(grads, self.global_step)
@abstractmethod
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
raise NotImplementedError("Must override _encoder in child class")
@abstractmethod
def _decoder(self, encoded_seq, decoder_emb_inp=None):
"""Method that defines the decoder part of the translation model graph."""
raise NotImplementedError("Must override _decoder in child class")
def _compute_loss(self, logits):
"""Method that calculates the loss function."""
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.shifted_target_seq,
logits=logits)
loss = (tf.reduce_sum(crossent * self.target_mask))
return loss
def _compute_accuracy(self, prediction):
"""Method that calculates the character-wise translation accuracy."""
right_predictions = tf.cast(tf.equal(prediction, self.shifted_target_seq), tf.float32)
accuracy = (tf.reduce_sum(right_predictions * self.target_mask))
return accuracy
def train(self, sess):
"""Method that can be called to perform a training step.
Args:
sess: The Session the model is running in.
Returns:
step: The global step.
"""
assert self.mode == "TRAIN"
_, step = sess.run([self.train_step, self.global_step])
return step
def eval(self, sess):
"""Method that can be called to perform a evaluation step.
Args:
sess: The Session the model is running in.
Returns:
step: The loged measures.
"""
return sess.run(list(self.measures_to_log.values()))
def idx_to_char(self, seq):
"""Helper function to transform the one-hot encoded sequnce tensor back to string-sequence.
Args:
seq: sequnce of one-hot encoded characters.
Returns:
string sequnce.
"""
return ''.join([self.decode_vocabulary_reverse[idx] for idx in seq
if idx not in [-1, self.decode_vocabulary["</s>"],
self.decode_vocabulary["<s>"]]])
def seq2emb(self, sess, input_seq, input_len):
"""Method to run a forwards path up to the bottneck layer (ENCODER).
Encodes a one-hot encoded input sequnce.
Args:
sess: The Session the model is running in.
input_seq: sequnces of one-hot encoded characters.
input_len: number of characters per sequnce.
Returns:
Embedding of the input sequnces.
"""
assert self.mode == "ENCODE"
return sess.run(self.encoded_seq, {self.input_seq: input_seq,
self.input_len: input_len})
def emb2seq(self, sess, embedding, num_top, maximum_iterations=1000):
"""Method to run a forwards path from bottlneck layer to output sequnce (DECODER).
Decodes the embedding (molecular descriptor) back to a sequnce representaion.
Args:
sess: The Session the model is running in.
embedding: Embeddings (molecular descriptors) of the input sequnces.
num_top: Number of most probable sequnces as output of the beam search decoder
Returns:
Embedding of the input sequnces.
"""
assert self.mode == "DECODE"
output_seq = sess.run(self.output_ids, {self.encoded_seq: embedding,
self.maximum_iterations: maximum_iterations})
return [[self.idx_to_char(seq[:, i]) for i in range(num_top)] for seq in output_seq]
def initilize(self, sess, overwrite_saves=False):
"""Function to initialize variables in the model graph and creation of save folder.
Args:
sess: The Session the model is running in.
overwrite_saves: Defines whether to overwrite the files (recreate directory) if a folder
with same save file path exists.
Returns:
step: Initial value of global step.
"""
assert self.mode == "TRAIN"
sess.run(tf.global_variables_initializer())
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
print('Create save file in: ', self.save_dir)
elif overwrite_saves:
shutil.rmtree(self.save_dir)
os.makedirs(self.save_dir)
else:
raise ValueError("Save directory %s already exist." %(self.save_dir))
return sess.run(self.global_step)
def restore(self, sess, restore_path=None):
""" Helper Function to restore the variables in the model graph."""
if restore_path is None:
restore_path = self.checkpoint_path
self.saver_op.restore(sess, restore_path)
if self.mode == "TRAIN":
step = sess.run(self.global_step)
print("Restarting training at step %d" %(step))
return step
def save(self, sess):
"""Wrapper function save model to file."""
self.saver_op.save(sess, self.checkpoint_path)
class GRUSeq2Seq(BaseModel):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder
and Decoder with Gate Recurrent Units (GRUs). Encoder and Decoder architecutre are
the same.
Attribures:
cell_size: list defining the number of Units in each GRU cell.
reverse_decoding: whether to invert the cell_size list for the Decoder.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the GRU translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.cell_size = hparams.cell_size
self.reverse_decoding = hparams.reverse_decoding
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
encoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
emb = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size,
activation=self.emb_activation
)
return emb
def _decoder(self, encoded_seq, decoder_emb_inp=None):
"""Method that defines the decoder part of the translation model graph."""
if self.reverse_decoding:
self.cell_size = self.cell_size[::-1]
decoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cell)
decoder_cell_inital = tf.layers.dense(encoded_seq, sum(self.cell_size))
decoder_cell_inital = tuple(tf.split(decoder_cell_inital, self.cell_size, 1))
projection_layer = tf.layers.Dense(self.decode_voc_size, use_bias=False)
if self.mode != "DECODE":
helper = tf.contrib.seq2seq.TrainingHelper(decoder_emb_inp,
sequence_length=self.shifted_target_len,
time_major=False)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell,
helper,
decoder_cell_inital,
output_layer=projection_layer)
outputs, output_state, _ = tf.contrib.seq2seq.dynamic_decode(decoder,
impute_finished=True,
output_time_major=False)
return outputs.rnn_output
else:
decoder_cell_inital = tf.contrib.seq2seq.tile_batch(decoder_cell_inital,
self.beam_width)
start_tokens = tf.fill([tf.shape(encoded_seq)[0]], self.decode_vocabulary['<s>'])
end_token = self.decode_vocabulary['</s>']
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=self.decoder_embedding,
start_tokens=start_tokens,
end_token=end_token,
initial_state=decoder_cell_inital,
beam_width=self.beam_width,
output_layer=projection_layer,
length_penalty_weight=0.0)
outputs, output_state, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
impute_finished=False,
output_time_major=False,
maximum_iterations=self.maximum_iterations
)
return outputs.predicted_ids
class GRUVAE(GRUSeq2Seq):
def __init__(self, mode, iterator, hparams):
super().__init__(mode, iterator, hparams)
self.div_loss_scale = hparams.div_loss_scale
self.div_loss_rate = hparams.div_loss_rate
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
encoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
loc = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size
)
log_scale = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size
)
return loc, log_scale
def _sampler(self, loc, log_scale):
epsilon = tf.random_normal(
shape=[tf.shape(loc)[0], self.embedding_size],
mean=0,
stddev=1
)
return loc + tf.exp(log_scale) * epsilon
def _compute_loss(self, logits, loc, log_scale):
"""Method that calculates the loss function."""
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.shifted_target_seq,
logits=logits)
crossent = tf.reduce_sum(crossent * self.target_mask, axis=1)
divergence = -0.5 * tf.reduce_sum(1 + 2*log_scale - tf.square(loc) - tf.square(tf.exp(log_scale)), axis=-1)
self.measures_to_log["crossent"] = tf.reduce_mean(crossent)
self.measures_to_log["divergence"] = tf.reduce_mean(divergence)
div_loss_scale = self.div_loss_scale - tf.train.exponential_decay(self.div_loss_scale,
self.global_step,
10000,
self.div_loss_rate,
staircase=True,)
self.measures_to_log["div_loss_scale"] = div_loss_scale
return tf.reduce_mean(crossent + div_loss_scale * divergence)
def build_graph(self):
"""Method that defines the graph for a translation model instance."""
if self.mode in ["TRAIN", "EVAL"]:
with tf.name_scope("Input"):
(self.input_seq,
self.shifted_target_seq,
self.input_len,
self.shifted_target_len,
self.target_mask,
encoder_emb_inp,
decoder_emb_inp) = self._input()
with tf.variable_scope("Encoder"):
loc, log_scale = self._encoder(encoder_emb_inp)
encoded_seq = self._sampler(loc, log_scale)
with tf.variable_scope("Decoder"):
logits = self._decoder(encoded_seq, decoder_emb_inp)
self.prediction = tf.argmax(logits, axis=2, output_type=tf.int32)
with tf.name_scope("Measures"):
#rossent, divergence, self.loss = self._compute_loss(logits, posterior)
self.loss = self._compute_loss(logits, loc, log_scale)
self.accuracy = self._compute_accuracy(self.prediction)
self.measures_to_log["loss"] = self.loss
self.measures_to_log["accuracy"] = self.accuracy
if self.mode == "TRAIN":
with tf.name_scope("Training"):
self._training()
if self.mode == "ENCODE":
with tf.name_scope("Input"):
self.input_seq = tf.placeholder(tf.int32, [None, None])
self.input_len = tf.placeholder(tf.int32, [None])
encoder_emb_inp = self._emb_lookup(self.input_seq)
with tf.variable_scope("Encoder"):
loc, log_scale = self._encoder(encoder_emb_inp)
self.encoded_seq = self._sampler(loc, log_scale)
if self.mode == "DECODE":
if self.one_hot_embedding:
self.decoder_embedding = tf.one_hot(
list(range(0, self.decode_voc_size)),
self.decode_voc_size
)
elif self.encode_vocabulary == self.decode_vocabulary:
self.decoder_embedding = tf.get_variable(
"char_embedding",
[self.decode_voc_size, self.char_embedding_size]
)
else:
self.decoder_embedding = tf.get_variable(
"char_embedding2",
[self.decode_voc_size, self.char_embedding_size]
)
with tf.name_scope("Input"):
self.encoded_seq = tf.placeholder(tf.float32,
[None, self.embedding_size])
with tf.variable_scope("Decoder"):
self.output_ids = self._decoder(self.encoded_seq)
self.saver_op = tf.train.Saver()
class NoisyGRUSeq2Seq(GRUSeq2Seq):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder and
Decoder with Gate Recurrent Units (GRUs) with input dropout and a Gaussian Noise term
after the bottlneck layer. Encoder and Decoder architecutre are the same.
Attribures:
input_dropout: Dropout rate of a Dropout layer after the character embedding of the
input sequnce.
emb_noise: Standard deviation of the Gaussian Noise term after the bottlneck layer.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the Noisy GRU translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.input_dropout = hparams.input_dropout
self.emb_noise = hparams.emb_noise
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
if (self.mode == "TRAIN") & (self.input_dropout > 0.0):
max_time = tf.shape(encoder_emb_inp)[1]
encoder_emb_inp = tf.nn.dropout(encoder_emb_inp,
1. - self.input_dropout,
noise_shape=[self.batch_size, max_time, 1])
encoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
emb = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size
)
if (self.mode == "TRAIN") & (self.emb_noise > 0.0):
emb += tf.random_normal(shape=tf.shape(emb),
mean=0.0,
stddev=self.emb_noise,
dtype=tf.float32)
emb = self.emb_activation(emb)
return emb
class LSTMSeq2Seq(BaseModel):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder
and Decoder with Long short-term memory units (LSTM). Encoder and Decoder architecutre
are the same.
Attribures:
cell_size: list defining the number of Units in each GRU cell.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the LSTM translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.cell_size = hparams.cell_size
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
encoder_cell = [tf.nn.rnn_cell.LSTMCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
encoder_state_c = [state.c for state in encoder_state]
emb = tf.layers.dense(tf.concat(encoder_state_c, axis=1),
self.embedding_size,
activation=self.emb_activation
)
return emb
def _decoder(self, encoded_seq, decoder_emb_inp=None):
"""Method that defines the decoder part of the translation model graph."""
decoder_cell = [tf.nn.rnn_cell.LSTMCell(size) for size in self.cell_size]
decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cell)
initial_state_c_full = tf.layers.dense(encoded_seq, sum(self.cell_size))
initial_state_c = tuple(tf.split(initial_state_c_full, self.cell_size, 1))
initial_state_h_full = tf.zeros_like(initial_state_c_full)
initial_state_h = tuple(tf.split(initial_state_h_full, self.cell_size, 1))
decoder_cell_inital = tuple(
[tf.contrib.rnn.LSTMStateTuple(
initial_state_c[i],
initial_state_h[i]) for i in range(len(self.cell_size))
]
)
helper = tf.contrib.seq2seq.TrainingHelper(decoder_emb_inp,
sequence_length=self.shifted_target_len,
time_major=False)
projection_layer = tf.layers.Dense(self.decode_voc_size, use_bias=False)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell,
helper,
decoder_cell_inital,
output_layer=projection_layer)
outputs, output_state, _ = tf.contrib.seq2seq.dynamic_decode(decoder,
impute_finished=True,
output_time_major=False)
return outputs.rnn_output
class Conv2GRUSeq2Seq(GRUSeq2Seq):
"""Translation model class with a multi-layer 1-D Convolutional Neural Network as Encoder.
The Decoder is still a RNN with GRU cells.
Attribures:
conv_hidden_size: List defining the number of filters in each layer.
kernel_size: List defining the width of the 1-D conv-filters in each layer.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the Convolutional translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.conv_hidden_size = hparams.conv_hidden_size
self.kernel_size = hparams.kernel_size
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
for i, size in enumerate(self.conv_hidden_size):
x = tf.layers.conv1d(encoder_emb_inp,
size,
self.kernel_size[i],
activation=tf.nn.relu,
padding='SAME')
if i+1 < len(self.conv_hidden_size):
x = tf.layers.max_pooling1d(x, 3, 2, padding='SAME')
x = tf.layers.conv1d(x,
self.conv_hidden_size[-1],
1,
activation=tf.nn.relu,
padding='SAME')
emb = tf.layers.dense(tf.reduce_mean(x, axis=1),
self.embedding_size,
activation=self.emb_activation
)
return emb
class GRUSeq2SeqWithFeatures(GRUSeq2Seq):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder
and Decoder with Gate Recurrent Units (GRUs) with an additional feature classification
task. Encoder and Decoder architecutre are the same.
Attribures:
num_features: Number of features to prediced.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the GRU translation model with feature classification class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.num_features = hparams.num_features
def build_graph(self):
"""Method that defines the graph for a translation model instance with the additional
feature prediction task.
"""
if self.mode in ["TRAIN", "EVAL"]:
with tf.name_scope("Input"):
(self.input_seq,
self.shifted_target_seq,
self.input_len,
self.shifted_target_len,
self.target_mask,
encoder_emb_inp,
decoder_emb_inp,
self.mol_features) = self._input(with_features=True)
with tf.variable_scope("Encoder"):
encoded_seq = self._encoder(encoder_emb_inp)
with tf.variable_scope("Decoder"):
sequence_logits = self._decoder(encoded_seq, decoder_emb_inp)
self.sequence_prediction = tf.argmax(sequence_logits,
axis=2,
output_type=tf.int32)
with tf.variable_scope("Feature_Regression"):
feature_predictions = self._feature_regression(encoded_seq)
with tf.name_scope("Measures"):
self.loss_sequence, self.loss_features = self._compute_loss(sequence_logits,
feature_predictions)
self.loss = self.loss_sequence + self.loss_features
self.accuracy = self._compute_accuracy(self.sequence_prediction)
self.measures_to_log["loss"] = self.loss
self.measures_to_log["accuracy"] = self.accuracy
if self.mode == "TRAIN":
with tf.name_scope("Training"):
self._training()
if self.mode == "ENCODE":
with tf.name_scope("Input"):
self.input_seq = tf.placeholder(tf.int32, [None, None])
self.input_len = tf.placeholder(tf.int32, [None])
encoder_emb_inp = self._emb_lookup(self.input_seq)
with tf.variable_scope("Encoder"):
self.encoded_seq = self._encoder(encoder_emb_inp)
if self.mode == "DECODE":
if self.one_hot_embedding:
self.decoder_embedding = tf.one_hot(
list(range(0, self.decode_voc_size)),
self.decode_voc_size
)
elif self.encode_vocabulary == self.decode_vocabulary:
self.decoder_embedding = tf.get_variable(
"char_embedding",
[self.decode_voc_size, self.char_embedding_size]
)
else:
self.decoder_embedding = tf.get_variable(
"char_embedding2",
[self.decode_voc_size, self.char_embedding_size]
)
with tf.name_scope("Input"):
self.encoded_seq = tf.placeholder(tf.float32, [None, self.embedding_size])
self.maximum_iterations = tf.placeholder(tf.int32, [])
with tf.variable_scope("Decoder"):
self.output_ids = self._decoder(self.encoded_seq)
self.saver_op = tf.train.Saver()
def _feature_regression(self, encoded_seq):
"""Method that defines the feature classification part of the graph."""
x = tf.layers.dense(inputs=encoded_seq,
units=512,
activation=tf.nn.relu
)
x = tf.layers.dense(inputs=x,
units=128,
activation=tf.nn.relu
)
x = tf.layers.dense(inputs=x,
units=self.num_features,
activation=None
)
return x
def _compute_loss(self, sequence_logits, features_predictions):
"""Method that calculates the loss function."""
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.shifted_target_seq,
logits=sequence_logits)
loss_sequence = (tf.reduce_sum(crossent * self.target_mask))
loss_features = tf.losses.mean_squared_error(labels=self.mol_features,
predictions=features_predictions,
)
return loss_sequence, loss_features
class NoisyGRUSeq2SeqWithFeatures(GRUSeq2SeqWithFeatures):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder and Decoder
with Gate Recurrent Units (GRUs) with input dropout and a Gaussian Noise Term after the
bottlneck layer and an additional feature classification task. Encoder and Decoder architecutre
are the same.
Attribures:
input_dropout: Dropout rate of a Dropout layer after the character embedding of the input
sequnce.
emb_noise: Standard deviation of the Gaussian Noise term after the bottlneck layer.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the Noisy GRU translation model with feature vlassification class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.input_dropout = hparams.input_dropout
self.emb_noise = hparams.emb_noise
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
if self.mode == "TRAIN":
max_time = tf.shape(encoder_emb_inp)[1]
encoder_emb_inp = tf.nn.dropout(encoder_emb_inp,
1. - self.input_dropout,
noise_shape=[self.batch_size, max_time, 1])
encoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
emb = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size
)
if (self.emb_noise >= 0) & (self.mode == "TRAIN"):
emb += tf.random_normal(shape=tf.shape(emb),
mean=0.0,
stddev=self.emb_noise,
dtype=tf.float32)
emb = self.emb_activation(emb)
return emb
class ModelWithGrads(NoisyGRUSeq2SeqWithFeatures):
def __init__(self, mode, iterator, hparams):
super().__init__(mode, iterator, hparams)
def build_graph(self):
with tf.name_scope("Input"):
self.input_seq = tf.placeholder(tf.int32, [None, None])
self.input_len = tf.placeholder(tf.int32, [None])
self.start_grads = tf.placeholder(tf.float32, [None, ndims])
encoder_emb_inp = self._emb_lookup(self.input_seq)
with tf.variable_scope("Encoder"):
self.encoded_seq = self._encoder(encoder_emb_inp)
self.grads = tf.gradients(self.encoded_seq, encoder_emb_inp, self.start_grads)
self.saver_op = tf.train.Saver()
| [
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.gradients",
"tensorflow.nn.dropo... | [((3541, 3582), 'os.path.join', 'os.path.join', (['self.save_dir', '"""model.ckpt"""'], {}), "(self.save_dir, 'model.ckpt')\n", (3553, 3582), False, 'import os\n'), ((7194, 7210), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7208, 7210), True, 'import tensorflow as tf\n'), ((10712, 10769), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.encoder_embedding', 'input_seq'], {}), '(self.encoder_embedding, input_seq)\n', (10734, 10769), True, 'import tensorflow as tf\n'), ((12074, 12123), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {'name': '"""optimizer"""'}), "(self.lr, name='optimizer')\n", (12096, 12123), True, 'import tensorflow as tf\n'), ((12905, 13003), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.shifted_target_seq', 'logits': 'logits'}), '(labels=self.\n shifted_target_seq, logits=logits)\n', (12951, 13003), True, 'import tensorflow as tf\n'), ((13040, 13082), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(crossent * self.target_mask)'], {}), '(crossent * self.target_mask)\n', (13053, 13082), True, 'import tensorflow as tf\n'), ((13343, 13394), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(right_predictions * self.target_mask)'], {}), '(right_predictions * self.target_mask)\n', (13356, 13394), True, 'import tensorflow as tf\n'), ((18804, 18845), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['encoder_cell'], {}), '(encoder_cell)\n', (18831, 18845), True, 'import tensorflow as tf\n'), ((18887, 19008), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['encoder_cell', 'encoder_emb_inp'], {'sequence_length': 'self.input_len', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(encoder_cell, encoder_emb_inp, sequence_length=self.\n input_len, dtype=tf.float32, time_major=False)\n', (18904, 19008), True, 'import tensorflow as tf\n'), ((19797, 19838), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['decoder_cell'], {}), '(decoder_cell)\n', (19824, 19838), True, 'import tensorflow as tf\n'), ((20032, 20085), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['self.decode_voc_size'], {'use_bias': '(False)'}), '(self.decode_voc_size, use_bias=False)\n', (20047, 20085), True, 'import tensorflow as tf\n'), ((22482, 22523), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['encoder_cell'], {}), '(encoder_cell)\n', (22509, 22523), True, 'import tensorflow as tf\n'), ((22565, 22686), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['encoder_cell', 'encoder_emb_inp'], {'sequence_length': 'self.input_len', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(encoder_cell, encoder_emb_inp, sequence_length=self.\n input_len, dtype=tf.float32, time_major=False)\n', (22582, 22686), True, 'import tensorflow as tf\n'), ((23630, 23728), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.shifted_target_seq', 'logits': 'logits'}), '(labels=self.\n shifted_target_seq, logits=logits)\n', (23676, 23728), True, 'import tensorflow as tf\n'), ((23768, 23818), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(crossent * self.target_mask)'], {'axis': '(1)'}), '(crossent * self.target_mask, axis=1)\n', (23781, 23818), True, 'import tensorflow as tf\n'), ((23992, 24016), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['crossent'], {}), '(crossent)\n', (24006, 24016), True, 'import tensorflow as tf\n'), ((24062, 24088), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['divergence'], {}), '(divergence)\n', (24076, 24088), True, 'import tensorflow as tf\n'), ((24539, 24593), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(crossent + div_loss_scale * divergence)'], {}), '(crossent + div_loss_scale * divergence)\n', (24553, 24593), True, 'import tensorflow as tf\n'), ((27425, 27441), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (27439, 27441), True, 'import tensorflow as tf\n'), ((29212, 29253), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['encoder_cell'], {}), '(encoder_cell)\n', (29239, 29253), True, 'import tensorflow as tf\n'), ((29295, 29416), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['encoder_cell', 'encoder_emb_inp'], {'sequence_length': 'self.input_len', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(encoder_cell, encoder_emb_inp, sequence_length=self.\n input_len, dtype=tf.float32, time_major=False)\n', (29312, 29416), True, 'import tensorflow as tf\n'), ((31303, 31344), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['encoder_cell'], {}), '(encoder_cell)\n', (31330, 31344), True, 'import tensorflow as tf\n'), ((31386, 31507), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['encoder_cell', 'encoder_emb_inp'], {'sequence_length': 'self.input_len', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(encoder_cell, encoder_emb_inp, sequence_length=self.\n input_len, dtype=tf.float32, time_major=False)\n', (31403, 31507), True, 'import tensorflow as tf\n'), ((32278, 32319), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['decoder_cell'], {}), '(decoder_cell)\n', (32305, 32319), True, 'import tensorflow as tf\n'), ((32515, 32550), 'tensorflow.zeros_like', 'tf.zeros_like', (['initial_state_c_full'], {}), '(initial_state_c_full)\n', (32528, 32550), True, 'import tensorflow as tf\n'), ((32864, 32978), 'tensorflow.contrib.seq2seq.TrainingHelper', 'tf.contrib.seq2seq.TrainingHelper', (['decoder_emb_inp'], {'sequence_length': 'self.shifted_target_len', 'time_major': '(False)'}), '(decoder_emb_inp, sequence_length=self.\n shifted_target_len, time_major=False)\n', (32897, 32978), True, 'import tensorflow as tf\n'), ((33103, 33156), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['self.decode_voc_size'], {'use_bias': '(False)'}), '(self.decode_voc_size, use_bias=False)\n', (33118, 33156), True, 'import tensorflow as tf\n'), ((33175, 33284), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', (['decoder_cell', 'helper', 'decoder_cell_inital'], {'output_layer': 'projection_layer'}), '(decoder_cell, helper, decoder_cell_inital,\n output_layer=projection_layer)\n', (33206, 33284), True, 'import tensorflow as tf\n'), ((33466, 33559), 'tensorflow.contrib.seq2seq.dynamic_decode', 'tf.contrib.seq2seq.dynamic_decode', (['decoder'], {'impute_finished': '(True)', 'output_time_major': '(False)'}), '(decoder, impute_finished=True,\n output_time_major=False)\n', (33499, 33559), True, 'import tensorflow as tf\n'), ((35347, 35439), 'tensorflow.layers.conv1d', 'tf.layers.conv1d', (['x', 'self.conv_hidden_size[-1]', '(1)'], {'activation': 'tf.nn.relu', 'padding': '"""SAME"""'}), "(x, self.conv_hidden_size[-1], 1, activation=tf.nn.relu,\n padding='SAME')\n", (35363, 35439), True, 'import tensorflow as tf\n'), ((39997, 40013), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (40011, 40013), True, 'import tensorflow as tf\n'), ((40155, 40224), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'encoded_seq', 'units': '(512)', 'activation': 'tf.nn.relu'}), '(inputs=encoded_seq, units=512, activation=tf.nn.relu)\n', (40170, 40224), True, 'import tensorflow as tf\n'), ((40322, 40381), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'x', 'units': '(128)', 'activation': 'tf.nn.relu'}), '(inputs=x, units=128, activation=tf.nn.relu)\n', (40337, 40381), True, 'import tensorflow as tf\n'), ((40479, 40546), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'x', 'units': 'self.num_features', 'activation': 'None'}), '(inputs=x, units=self.num_features, activation=None)\n', (40494, 40546), True, 'import tensorflow as tf\n'), ((40794, 40901), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.shifted_target_seq', 'logits': 'sequence_logits'}), '(labels=self.\n shifted_target_seq, logits=sequence_logits)\n', (40840, 40901), True, 'import tensorflow as tf\n'), ((40988, 41030), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(crossent * self.target_mask)'], {}), '(crossent * self.target_mask)\n', (41001, 41030), True, 'import tensorflow as tf\n'), ((41056, 41149), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'self.mol_features', 'predictions': 'features_predictions'}), '(labels=self.mol_features, predictions=\n features_predictions)\n', (41084, 41149), True, 'import tensorflow as tf\n'), ((43136, 43177), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['encoder_cell'], {}), '(encoder_cell)\n', (43163, 43177), True, 'import tensorflow as tf\n'), ((43219, 43340), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['encoder_cell', 'encoder_emb_inp'], {'sequence_length': 'self.input_len', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(encoder_cell, encoder_emb_inp, sequence_length=self.\n input_len, dtype=tf.float32, time_major=False)\n', (43236, 43340), True, 'import tensorflow as tf\n'), ((44659, 44724), 'tensorflow.gradients', 'tf.gradients', (['self.encoded_seq', 'encoder_emb_inp', 'self.start_grads'], {}), '(self.encoded_seq, encoder_emb_inp, self.start_grads)\n', (44671, 44724), True, 'import tensorflow as tf\n'), ((44750, 44766), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (44764, 44766), True, 'import tensorflow as tf\n'), ((8217, 8236), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (8226, 8236), True, 'import tensorflow as tf\n'), ((9098, 9136), 'tensorflow.slice', 'tf.slice', (['target_seq', '[0, 1]', '[-1, -1]'], {}), '(target_seq, [0, 1], [-1, -1])\n', (9106, 9136), True, 'import tensorflow as tf\n'), ((9163, 9217), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['shifted_target_len'], {'dtype': 'tf.float32'}), '(shifted_target_len, dtype=tf.float32)\n', (9179, 9217), True, 'import tensorflow as tf\n'), ((10556, 10644), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding"""', '[self.encode_voc_size, self.char_embedding_size]'], {}), "('char_embedding', [self.encode_voc_size, self.\n char_embedding_size])\n", (10571, 10644), True, 'import tensorflow as tf\n'), ((11418, 11476), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.decoder_embedding', 'target_seq'], {}), '(self.decoder_embedding, target_seq)\n', (11440, 11476), True, 'import tensorflow as tf\n'), ((11741, 11862), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.lr', 'self.global_step', 'self.lr_decay_frequency', 'self.lr_decay_factor'], {'staircase': '(True)'}), '(self.lr, self.global_step, self.\n lr_decay_frequency, self.lr_decay_factor, staircase=True)\n', (11767, 11862), True, 'import tensorflow as tf\n'), ((13264, 13309), 'tensorflow.equal', 'tf.equal', (['prediction', 'self.shifted_target_seq'], {}), '(prediction, self.shifted_target_seq)\n', (13272, 13309), True, 'import tensorflow as tf\n'), ((16523, 16556), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16554, 16556), True, 'import tensorflow as tf\n'), ((16573, 16602), 'os.path.exists', 'os.path.exists', (['self.save_dir'], {}), '(self.save_dir)\n', (16587, 16602), False, 'import os\n'), ((16616, 16642), 'os.makedirs', 'os.makedirs', (['self.save_dir'], {}), '(self.save_dir)\n', (16627, 16642), False, 'import os\n'), ((18724, 18752), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['size'], {}), '(size)\n', (18746, 18752), True, 'import tensorflow as tf\n'), ((19270, 19302), 'tensorflow.concat', 'tf.concat', (['encoder_state'], {'axis': '(1)'}), '(encoder_state, axis=1)\n', (19279, 19302), True, 'import tensorflow as tf\n'), ((19717, 19745), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['size'], {}), '(size)\n', (19739, 19745), True, 'import tensorflow as tf\n'), ((19955, 20003), 'tensorflow.split', 'tf.split', (['decoder_cell_inital', 'self.cell_size', '(1)'], {}), '(decoder_cell_inital, self.cell_size, 1)\n', (19963, 20003), True, 'import tensorflow as tf\n'), ((20141, 20255), 'tensorflow.contrib.seq2seq.TrainingHelper', 'tf.contrib.seq2seq.TrainingHelper', (['decoder_emb_inp'], {'sequence_length': 'self.shifted_target_len', 'time_major': '(False)'}), '(decoder_emb_inp, sequence_length=self.\n shifted_target_len, time_major=False)\n', (20174, 20255), True, 'import tensorflow as tf\n'), ((20383, 20492), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', (['decoder_cell', 'helper', 'decoder_cell_inital'], {'output_layer': 'projection_layer'}), '(decoder_cell, helper, decoder_cell_inital,\n output_layer=projection_layer)\n', (20414, 20492), True, 'import tensorflow as tf\n'), ((20690, 20783), 'tensorflow.contrib.seq2seq.dynamic_decode', 'tf.contrib.seq2seq.dynamic_decode', (['decoder'], {'impute_finished': '(True)', 'output_time_major': '(False)'}), '(decoder, impute_finished=True,\n output_time_major=False)\n', (20723, 20783), True, 'import tensorflow as tf\n'), ((21012, 21079), 'tensorflow.contrib.seq2seq.tile_batch', 'tf.contrib.seq2seq.tile_batch', (['decoder_cell_inital', 'self.beam_width'], {}), '(decoder_cell_inital, self.beam_width)\n', (21041, 21079), True, 'import tensorflow as tf\n'), ((21315, 21586), 'tensorflow.contrib.seq2seq.BeamSearchDecoder', 'tf.contrib.seq2seq.BeamSearchDecoder', ([], {'cell': 'decoder_cell', 'embedding': 'self.decoder_embedding', 'start_tokens': 'start_tokens', 'end_token': 'end_token', 'initial_state': 'decoder_cell_inital', 'beam_width': 'self.beam_width', 'output_layer': 'projection_layer', 'length_penalty_weight': '(0.0)'}), '(cell=decoder_cell, embedding=self.\n decoder_embedding, start_tokens=start_tokens, end_token=end_token,\n initial_state=decoder_cell_inital, beam_width=self.beam_width,\n output_layer=projection_layer, length_penalty_weight=0.0)\n', (21351, 21586), True, 'import tensorflow as tf\n'), ((21743, 21889), 'tensorflow.contrib.seq2seq.dynamic_decode', 'tf.contrib.seq2seq.dynamic_decode', ([], {'decoder': 'decoder', 'impute_finished': '(False)', 'output_time_major': '(False)', 'maximum_iterations': 'self.maximum_iterations'}), '(decoder=decoder, impute_finished=False,\n output_time_major=False, maximum_iterations=self.maximum_iterations)\n', (21776, 21889), True, 'import tensorflow as tf\n'), ((22402, 22430), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['size'], {}), '(size)\n', (22424, 22430), True, 'import tensorflow as tf\n'), ((22948, 22980), 'tensorflow.concat', 'tf.concat', (['encoder_state'], {'axis': '(1)'}), '(encoder_state, axis=1)\n', (22957, 22980), True, 'import tensorflow as tf\n'), ((23099, 23131), 'tensorflow.concat', 'tf.concat', (['encoder_state'], {'axis': '(1)'}), '(encoder_state, axis=1)\n', (23108, 23131), True, 'import tensorflow as tf\n'), ((24145, 24257), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.div_loss_scale', 'self.global_step', '(10000)', 'self.div_loss_rate'], {'staircase': '(True)'}), '(self.div_loss_scale, self.global_step, 10000,\n self.div_loss_rate, staircase=True)\n', (24171, 24257), True, 'import tensorflow as tf\n'), ((28920, 29025), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_emb_inp', '(1.0 - self.input_dropout)'], {'noise_shape': '[self.batch_size, max_time, 1]'}), '(encoder_emb_inp, 1.0 - self.input_dropout, noise_shape=[self.\n batch_size, max_time, 1])\n', (28933, 29025), True, 'import tensorflow as tf\n'), ((29132, 29160), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['size'], {}), '(size)\n', (29154, 29160), True, 'import tensorflow as tf\n'), ((29678, 29710), 'tensorflow.concat', 'tf.concat', (['encoder_state'], {'axis': '(1)'}), '(encoder_state, axis=1)\n', (29687, 29710), True, 'import tensorflow as tf\n'), ((31222, 31251), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['size'], {}), '(size)\n', (31245, 31251), True, 'import tensorflow as tf\n'), ((31832, 31866), 'tensorflow.concat', 'tf.concat', (['encoder_state_c'], {'axis': '(1)'}), '(encoder_state_c, axis=1)\n', (31841, 31866), True, 'import tensorflow as tf\n'), ((32197, 32226), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['size'], {}), '(size)\n', (32220, 32226), True, 'import tensorflow as tf\n'), ((32433, 32482), 'tensorflow.split', 'tf.split', (['initial_state_c_full', 'self.cell_size', '(1)'], {}), '(initial_state_c_full, self.cell_size, 1)\n', (32441, 32482), True, 'import tensorflow as tf\n'), ((32583, 32632), 'tensorflow.split', 'tf.split', (['initial_state_h_full', 'self.cell_size', '(1)'], {}), '(initial_state_h_full, self.cell_size, 1)\n', (32591, 32632), True, 'import tensorflow as tf\n'), ((34984, 35088), 'tensorflow.layers.conv1d', 'tf.layers.conv1d', (['encoder_emb_inp', 'size', 'self.kernel_size[i]'], {'activation': 'tf.nn.relu', 'padding': '"""SAME"""'}), "(encoder_emb_inp, size, self.kernel_size[i], activation=tf.\n nn.relu, padding='SAME')\n", (35000, 35088), True, 'import tensorflow as tf\n'), ((35583, 35608), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (35597, 35608), True, 'import tensorflow as tf\n'), ((42844, 42949), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_emb_inp', '(1.0 - self.input_dropout)'], {'noise_shape': '[self.batch_size, max_time, 1]'}), '(encoder_emb_inp, 1.0 - self.input_dropout, noise_shape=[self.\n batch_size, max_time, 1])\n', (42857, 42949), True, 'import tensorflow as tf\n'), ((43056, 43084), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['size'], {}), '(size)\n', (43078, 43084), True, 'import tensorflow as tf\n'), ((43602, 43634), 'tensorflow.concat', 'tf.concat', (['encoder_state'], {'axis': '(1)'}), '(encoder_state, axis=1)\n', (43611, 43634), True, 'import tensorflow as tf\n'), ((44242, 44264), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (44255, 44264), True, 'import tensorflow as tf\n'), ((44295, 44333), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (44309, 44333), True, 'import tensorflow as tf\n'), ((44363, 44395), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (44377, 44395), True, 'import tensorflow as tf\n'), ((44427, 44468), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, ndims]'], {}), '(tf.float32, [None, ndims])\n', (44441, 44468), True, 'import tensorflow as tf\n'), ((44546, 44574), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (44563, 44574), True, 'import tensorflow as tf\n'), ((3381, 3407), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (3404, 3407), True, 'import tensorflow as tf\n'), ((4621, 4643), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (4634, 4643), True, 'import tensorflow as tf\n'), ((4932, 4960), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (4949, 4960), True, 'import tensorflow as tf\n'), ((5041, 5069), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (5058, 5069), True, 'import tensorflow as tf\n'), ((5174, 5221), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(2)', 'output_type': 'tf.int32'}), '(logits, axis=2, output_type=tf.int32)\n', (5183, 5221), True, 'import tensorflow as tf\n'), ((5240, 5265), 'tensorflow.name_scope', 'tf.name_scope', (['"""Measures"""'], {}), "('Measures')\n", (5253, 5265), True, 'import tensorflow as tf\n'), ((5691, 5713), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (5704, 5713), True, 'import tensorflow as tf\n'), ((5748, 5786), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (5762, 5786), True, 'import tensorflow as tf\n'), ((5820, 5852), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (5834, 5852), True, 'import tensorflow as tf\n'), ((5938, 5966), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (5955, 5966), True, 'import tensorflow as tf\n'), ((6748, 6770), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (6761, 6770), True, 'import tensorflow as tf\n'), ((6807, 6862), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.embedding_size]'], {}), '(tf.float32, [None, self.embedding_size])\n', (6821, 6862), True, 'import tensorflow as tf\n'), ((6955, 6983), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[]'], {}), '(tf.int32, [])\n', (6969, 6983), True, 'import tensorflow as tf\n'), ((7026, 7054), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[]'], {}), '(tf.int32, [])\n', (7040, 7054), True, 'import tensorflow as tf\n'), ((7073, 7101), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (7090, 7101), True, 'import tensorflow as tf\n'), ((8512, 8551), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'dtype': 'tf.float32'}), '([], dtype=tf.float32)\n', (8529, 8551), True, 'import tensorflow as tf\n'), ((9258, 9284), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['target_mask'], {}), '(target_mask)\n', (9271, 9284), True, 'import tensorflow as tf\n'), ((12196, 12229), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-1.0)', '(1.0)'], {}), '(grad, -1.0, 1.0)\n', (12212, 12229), True, 'import tensorflow as tf\n'), ((16743, 16771), 'shutil.rmtree', 'shutil.rmtree', (['self.save_dir'], {}), '(self.save_dir)\n', (16756, 16771), False, 'import shutil\n'), ((16784, 16810), 'os.makedirs', 'os.makedirs', (['self.save_dir'], {}), '(self.save_dir)\n', (16795, 16810), False, 'import os\n'), ((23460, 23477), 'tensorflow.exp', 'tf.exp', (['log_scale'], {}), '(log_scale)\n', (23466, 23477), True, 'import tensorflow as tf\n'), ((24764, 24786), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (24777, 24786), True, 'import tensorflow as tf\n'), ((25075, 25103), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (25092, 25103), True, 'import tensorflow as tf\n'), ((25247, 25275), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (25264, 25275), True, 'import tensorflow as tf\n'), ((25380, 25427), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(2)', 'output_type': 'tf.int32'}), '(logits, axis=2, output_type=tf.int32)\n', (25389, 25427), True, 'import tensorflow as tf\n'), ((25446, 25471), 'tensorflow.name_scope', 'tf.name_scope', (['"""Measures"""'], {}), "('Measures')\n", (25459, 25471), True, 'import tensorflow as tf\n'), ((26001, 26023), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (26014, 26023), True, 'import tensorflow as tf\n'), ((26058, 26096), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (26072, 26096), True, 'import tensorflow as tf\n'), ((26130, 26162), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (26144, 26162), True, 'import tensorflow as tf\n'), ((26248, 26276), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (26265, 26276), True, 'import tensorflow as tf\n'), ((27121, 27143), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (27134, 27143), True, 'import tensorflow as tf\n'), ((27180, 27235), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.embedding_size]'], {}), '(tf.float32, [None, self.embedding_size])\n', (27194, 27235), True, 'import tensorflow as tf\n'), ((27304, 27332), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (27321, 27332), True, 'import tensorflow as tf\n'), ((28861, 28886), 'tensorflow.shape', 'tf.shape', (['encoder_emb_inp'], {}), '(encoder_emb_inp)\n', (28869, 28886), True, 'import tensorflow as tf\n'), ((32684, 32753), 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', (['initial_state_c[i]', 'initial_state_h[i]'], {}), '(initial_state_c[i], initial_state_h[i])\n', (32713, 32753), True, 'import tensorflow as tf\n'), ((35285, 35333), 'tensorflow.layers.max_pooling1d', 'tf.layers.max_pooling1d', (['x', '(3)', '(2)'], {'padding': '"""SAME"""'}), "(x, 3, 2, padding='SAME')\n", (35308, 35333), True, 'import tensorflow as tf\n'), ((37013, 37035), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (37026, 37035), True, 'import tensorflow as tf\n'), ((37378, 37406), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (37395, 37406), True, 'import tensorflow as tf\n'), ((37487, 37515), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (37504, 37515), True, 'import tensorflow as tf\n'), ((37638, 37694), 'tensorflow.argmax', 'tf.argmax', (['sequence_logits'], {'axis': '(2)', 'output_type': 'tf.int32'}), '(sequence_logits, axis=2, output_type=tf.int32)\n', (37647, 37694), True, 'import tensorflow as tf\n'), ((37819, 37858), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Feature_Regression"""'], {}), "('Feature_Regression')\n", (37836, 37858), True, 'import tensorflow as tf\n'), ((37954, 37979), 'tensorflow.name_scope', 'tf.name_scope', (['"""Measures"""'], {}), "('Measures')\n", (37967, 37979), True, 'import tensorflow as tf\n'), ((38617, 38639), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (38630, 38639), True, 'import tensorflow as tf\n'), ((38674, 38712), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (38688, 38712), True, 'import tensorflow as tf\n'), ((38746, 38778), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (38760, 38778), True, 'import tensorflow as tf\n'), ((38864, 38892), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (38881, 38892), True, 'import tensorflow as tf\n'), ((39674, 39696), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (39687, 39696), True, 'import tensorflow as tf\n'), ((39733, 39788), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.embedding_size]'], {}), '(tf.float32, [None, self.embedding_size])\n', (39747, 39788), True, 'import tensorflow as tf\n'), ((39831, 39859), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[]'], {}), '(tf.int32, [])\n', (39845, 39859), True, 'import tensorflow as tf\n'), ((39877, 39905), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (39894, 39905), True, 'import tensorflow as tf\n'), ((42785, 42810), 'tensorflow.shape', 'tf.shape', (['encoder_emb_inp'], {}), '(encoder_emb_inp)\n', (42793, 42810), True, 'import tensorflow as tf\n'), ((5575, 5600), 'tensorflow.name_scope', 'tf.name_scope', (['"""Training"""'], {}), "('Training')\n", (5588, 5600), True, 'import tensorflow as tf\n'), ((6386, 6474), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding"""', '[self.decode_voc_size, self.char_embedding_size]'], {}), "('char_embedding', [self.decode_voc_size, self.\n char_embedding_size])\n", (6401, 6474), True, 'import tensorflow as tf\n'), ((6587, 6676), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding2"""', '[self.decode_voc_size, self.char_embedding_size]'], {}), "('char_embedding2', [self.decode_voc_size, self.\n char_embedding_size])\n", (6602, 6676), True, 'import tensorflow as tf\n'), ((8588, 8619), 'tensorflow.greater_equal', 'tf.greater_equal', (['rand_val', '(0.5)'], {}), '(rand_val, 0.5)\n', (8604, 8619), True, 'import tensorflow as tf\n'), ((8721, 8752), 'tensorflow.greater_equal', 'tf.greater_equal', (['rand_val', '(0.5)'], {}), '(rand_val, 0.5)\n', (8737, 8752), True, 'import tensorflow as tf\n'), ((11245, 11334), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding2"""', '[self.decode_voc_size, self.char_embedding_size]'], {}), "('char_embedding2', [self.decode_voc_size, self.\n char_embedding_size])\n", (11260, 11334), True, 'import tensorflow as tf\n'), ((25885, 25910), 'tensorflow.name_scope', 'tf.name_scope', (['"""Training"""'], {}), "('Training')\n", (25898, 25910), True, 'import tensorflow as tf\n'), ((26759, 26847), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding"""', '[self.decode_voc_size, self.char_embedding_size]'], {}), "('char_embedding', [self.decode_voc_size, self.\n char_embedding_size])\n", (26774, 26847), True, 'import tensorflow as tf\n'), ((26960, 27049), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding2"""', '[self.decode_voc_size, self.char_embedding_size]'], {}), "('char_embedding2', [self.decode_voc_size, self.\n char_embedding_size])\n", (26975, 27049), True, 'import tensorflow as tf\n'), ((29895, 29908), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (29903, 29908), True, 'import tensorflow as tf\n'), ((38501, 38526), 'tensorflow.name_scope', 'tf.name_scope', (['"""Training"""'], {}), "('Training')\n", (38514, 38526), True, 'import tensorflow as tf\n'), ((39312, 39400), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding"""', '[self.decode_voc_size, self.char_embedding_size]'], {}), "('char_embedding', [self.decode_voc_size, self.\n char_embedding_size])\n", (39327, 39400), True, 'import tensorflow as tf\n'), ((39513, 39602), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_embedding2"""', '[self.decode_voc_size, self.char_embedding_size]'], {}), "('char_embedding2', [self.decode_voc_size, self.\n char_embedding_size])\n", (39528, 39602), True, 'import tensorflow as tf\n'), ((43818, 43831), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (43826, 43831), True, 'import tensorflow as tf\n'), ((9332, 9351), 'tensorflow.shape', 'tf.shape', (['input_len'], {}), '(input_len)\n', (9340, 9351), True, 'import tensorflow as tf\n'), ((21180, 21201), 'tensorflow.shape', 'tf.shape', (['encoded_seq'], {}), '(encoded_seq)\n', (21188, 21201), True, 'import tensorflow as tf\n'), ((23348, 23361), 'tensorflow.shape', 'tf.shape', (['loc'], {}), '(loc)\n', (23356, 23361), True, 'import tensorflow as tf\n'), ((23888, 23902), 'tensorflow.square', 'tf.square', (['loc'], {}), '(loc)\n', (23897, 23902), True, 'import tensorflow as tf\n'), ((23915, 23932), 'tensorflow.exp', 'tf.exp', (['log_scale'], {}), '(log_scale)\n', (23921, 23932), True, 'import tensorflow as tf\n'), ((9035, 9055), 'tensorflow.shape', 'tf.shape', (['target_len'], {}), '(target_len)\n', (9043, 9055), True, 'import tensorflow as tf\n'), ((2658, 2716), 'numpy.load', 'np.load', (['hparams.encode_vocabulary_file'], {'allow_pickle': '(True)'}), '(hparams.encode_vocabulary_file, allow_pickle=True)\n', (2665, 2716), True, 'import numpy as np\n'), ((2865, 2923), 'numpy.load', 'np.load', (['hparams.decode_vocabulary_file'], {'allow_pickle': '(True)'}), '(hparams.decode_vocabulary_file, allow_pickle=True)\n', (2872, 2923), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import pysal
from pysal.contrib.handler import Model
from functools import partial
import pysal.spreg.diagnostics_tsls as diagnostics_tsls
import pysal.spreg.diagnostics as diagnostics
#from pysal.spreg.ols import OLS as OLS
OLS = Model
#from pysal.spreg.twosls import TSLS as TSLS
TSLS = partial(Model, mtype='TSLS')
#from pysal.spreg.twosls_sp import GM_Lag
GM_Lag = partial(Model, mtype='GM_Lag')
from scipy.stats import pearsonr
# create regression object used by the apatial tests
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
y = np.array(db.by_col("CRIME"))
y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X = np.array(X).T
yd = []
yd.append(db.by_col("HOVAL"))
yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
q = np.array(q).T
reg = TSLS(y, X, yd, q)
# create regression object for spatial test
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
y = np.array(db.by_col("HOVAL"))
y = np.reshape(y, (49,1))
X = np.array(db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
regsp = GM_Lag(y, X, w=w, yend=yd, q=q, w_lags=2)
class TestTStat(unittest.TestCase):
def test_t_stat(self):
obs = diagnostics_tsls.t_stat(reg)
exp = [(5.8452644704588588, 4.9369075950019865e-07),
(0.36760156683572748, 0.71485634049075841),
(-1.9946891307832111, 0.052021795864651159)]
for i in range(3):
for j in range(2):
self.assertAlmostEquals(obs[i][j],exp[i][j])
class TestPr2Aspatial(unittest.TestCase):
def test_pr2_aspatial(self):
obs = diagnostics_tsls.pr2_aspatial(reg)
exp = 0.2793613712817381
self.assertAlmostEquals(obs,exp)
class TestPr2Spatial(unittest.TestCase):
def test_pr2_spatial(self):
obs = diagnostics_tsls.pr2_spatial(regsp)
exp = 0.29964855438065163
self.assertAlmostEquals(obs,exp)
if __name__ == '__main__':
unittest.main()
| [
"pysal.spreg.diagnostics_tsls.pr2_spatial",
"numpy.reshape",
"unittest.main",
"pysal.spreg.diagnostics_tsls.pr2_aspatial",
"numpy.array",
"pysal.spreg.diagnostics_tsls.t_stat",
"functools.partial",
"pysal.examples.get_path"
] | [((324, 352), 'functools.partial', 'partial', (['Model'], {'mtype': '"""TSLS"""'}), "(Model, mtype='TSLS')\n", (331, 352), False, 'from functools import partial\n'), ((404, 434), 'functools.partial', 'partial', (['Model'], {'mtype': '"""GM_Lag"""'}), "(Model, mtype='GM_Lag')\n", (411, 434), False, 'from functools import partial\n'), ((621, 643), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (631, 643), True, 'import numpy as np\n'), ((979, 1001), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (989, 1001), True, 'import numpy as np\n'), ((1036, 1058), 'numpy.reshape', 'np.reshape', (['X', '(49, 1)'], {}), '(X, (49, 1))\n', (1046, 1058), True, 'import numpy as np\n'), ((1097, 1120), 'numpy.reshape', 'np.reshape', (['yd', '(49, 1)'], {}), '(yd, (49, 1))\n', (1107, 1120), True, 'import numpy as np\n'), ((1158, 1180), 'numpy.reshape', 'np.reshape', (['q', '(49, 1)'], {}), '(q, (49, 1))\n', (1168, 1180), True, 'import numpy as np\n'), ((539, 578), 'pysal.examples.get_path', 'pysal.examples.get_path', (['"""columbus.dbf"""'], {}), "('columbus.dbf')\n", (562, 578), False, 'import pysal\n'), ((681, 692), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (689, 692), True, 'import numpy as np\n'), ((742, 754), 'numpy.array', 'np.array', (['yd'], {}), '(yd)\n', (750, 754), True, 'import numpy as np\n'), ((798, 809), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (806, 809), True, 'import numpy as np\n'), ((897, 936), 'pysal.examples.get_path', 'pysal.examples.get_path', (['"""columbus.dbf"""'], {}), "('columbus.dbf')\n", (920, 936), False, 'import pysal\n'), ((1210, 1249), 'pysal.examples.get_path', 'pysal.examples.get_path', (['"""columbus.shp"""'], {}), "('columbus.shp')\n", (1233, 1249), False, 'import pysal\n'), ((2158, 2173), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2171, 2173), False, 'import unittest\n'), ((1399, 1427), 'pysal.spreg.diagnostics_tsls.t_stat', 'diagnostics_tsls.t_stat', (['reg'], {}), '(reg)\n', (1422, 1427), True, 'import pysal.spreg.diagnostics_tsls as diagnostics_tsls\n'), ((1817, 1851), 'pysal.spreg.diagnostics_tsls.pr2_aspatial', 'diagnostics_tsls.pr2_aspatial', (['reg'], {}), '(reg)\n', (1846, 1851), True, 'import pysal.spreg.diagnostics_tsls as diagnostics_tsls\n'), ((2014, 2049), 'pysal.spreg.diagnostics_tsls.pr2_spatial', 'diagnostics_tsls.pr2_spatial', (['regsp'], {}), '(regsp)\n', (2042, 2049), True, 'import pysal.spreg.diagnostics_tsls as diagnostics_tsls\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 22:15:31 2019
@author: shaun
"""
import os
os.chdir("/Users/shaun/mowbot/track_slantcsi")
files = os.listdir(".")
files.sort()
from skimage.color import rgb2gray
import numpy as np
import cv2
import matplotlib.pyplot as plt
from scipy import ndimage
image = plt.imread(files[0])
image.shape
plt.imshow(image)
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
def do_kmeans(sample_img):
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
sample_img = np.array(sample_img, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(sample_img.shape)
assert d == 3
image_array = np.reshape(sample_img, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
return kmeans, labels, labels_random, w, h
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
n_colors = 5
# Load the Summer Palace photo
n_img = 4500
for i in range(len(files)):
n_img = i
sample_img = plt.imread(files[n_img])
kmeans, labels, labels_random, w, h = do_kmeans(sample_img)
kmeans_img = recreate_image(kmeans.cluster_centers_, labels, w, h)*255
f_name = "kmeans_N"+str(n_colors)+"_"+files[n_img]
print(f_name)
cv2.imwrite(f_name, kmeans_img)
plt.imshow(kmeans_img)
# Display all results, alongside original image
plt.figure(1)
plt.clf()
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(sample_img)
plt.figure(2)
plt.clf()
plt.axis('off')
plt.title('Quantized image ('+str(n_colors)+' colors, K-Means)')
plt.imshow(kmeans_img)
#plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
plt.axis('off')
plt.title('Quantized image ('+str(n_colors)+' colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show() | [
"matplotlib.pyplot.imshow",
"cv2.imwrite",
"sklearn.cluster.KMeans",
"os.listdir",
"matplotlib.pyplot.title",
"numpy.reshape",
"sklearn.utils.shuffle",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.clf",
"sklearn.metrics.pairwise_distances_argmin",
"os.chdir",
"numpy.array",
"matplotlib.pyp... | [((118, 164), 'os.chdir', 'os.chdir', (['"""/Users/shaun/mowbot/track_slantcsi"""'], {}), "('/Users/shaun/mowbot/track_slantcsi')\n", (126, 164), False, 'import os\n'), ((173, 188), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (183, 188), False, 'import os\n'), ((335, 355), 'matplotlib.pyplot.imread', 'plt.imread', (['files[0]'], {}), '(files[0])\n', (345, 355), True, 'import matplotlib.pyplot as plt\n'), ((368, 385), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (378, 385), True, 'import matplotlib.pyplot as plt\n'), ((2837, 2850), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2847, 2850), True, 'import matplotlib.pyplot as plt\n'), ((2851, 2860), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2858, 2860), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2876), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2869, 2876), True, 'import matplotlib.pyplot as plt\n'), ((2877, 2920), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image (96,615 colors)"""'], {}), "('Original image (96,615 colors)')\n", (2886, 2920), True, 'import matplotlib.pyplot as plt\n'), ((2921, 2943), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample_img'], {}), '(sample_img)\n', (2931, 2943), True, 'import matplotlib.pyplot as plt\n'), ((2945, 2958), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2955, 2958), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2968), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2966, 2968), True, 'import matplotlib.pyplot as plt\n'), ((2969, 2984), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2977, 2984), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3072), 'matplotlib.pyplot.imshow', 'plt.imshow', (['kmeans_img'], {}), '(kmeans_img)\n', (3060, 3072), True, 'import matplotlib.pyplot as plt\n'), ((3141, 3154), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (3151, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3155, 3164), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3162, 3164), True, 'import matplotlib.pyplot as plt\n'), ((3165, 3180), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3173, 3180), True, 'import matplotlib.pyplot as plt\n'), ((3310, 3320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3318, 3320), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1113), 'numpy.reshape', 'np.reshape', (['sample_img', '(w * h, d)'], {}), '(sample_img, (w * h, d))\n', (1089, 1113), True, 'import numpy as np\n'), ((1189, 1195), 'time.time', 'time', ([], {}), '()\n', (1193, 1195), False, 'from time import time\n'), ((1503, 1509), 'time.time', 'time', ([], {}), '()\n', (1507, 1509), False, 'from time import time\n'), ((1750, 1756), 'time.time', 'time', ([], {}), '()\n', (1754, 1756), False, 'from time import time\n'), ((1777, 1840), 'sklearn.metrics.pairwise_distances_argmin', 'pairwise_distances_argmin', (['codebook_random', 'image_array'], {'axis': '(0)'}), '(codebook_random, image_array, axis=0)\n', (1802, 1840), False, 'from sklearn.metrics import pairwise_distances_argmin\n'), ((2184, 2203), 'numpy.zeros', 'np.zeros', (['(w, h, d)'], {}), '((w, h, d))\n', (2192, 2203), True, 'import numpy as np\n'), ((2488, 2512), 'matplotlib.pyplot.imread', 'plt.imread', (['files[n_img]'], {}), '(files[n_img])\n', (2498, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2760), 'cv2.imwrite', 'cv2.imwrite', (['f_name', 'kmeans_img'], {}), '(f_name, kmeans_img)\n', (2740, 2760), False, 'import cv2\n'), ((2765, 2787), 'matplotlib.pyplot.imshow', 'plt.imshow', (['kmeans_img'], {}), '(kmeans_img)\n', (2775, 2787), True, 'import matplotlib.pyplot as plt\n'), ((886, 924), 'numpy.array', 'np.array', (['sample_img'], {'dtype': 'np.float64'}), '(sample_img, dtype=np.float64)\n', (894, 924), True, 'import numpy as np\n'), ((1221, 1257), 'sklearn.utils.shuffle', 'shuffle', (['image_array'], {'random_state': '(0)'}), '(image_array, random_state=0)\n', (1228, 1257), False, 'from sklearn.utils import shuffle\n'), ((1628, 1664), 'sklearn.utils.shuffle', 'shuffle', (['image_array'], {'random_state': '(0)'}), '(image_array, random_state=0)\n', (1635, 1664), False, 'from sklearn.utils import shuffle\n'), ((1278, 1321), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_colors', 'random_state': '(0)'}), '(n_clusters=n_colors, random_state=0)\n', (1284, 1321), False, 'from sklearn.cluster import KMeans\n'), ((1377, 1383), 'time.time', 'time', ([], {}), '()\n', (1381, 1383), False, 'from time import time\n'), ((1582, 1588), 'time.time', 'time', ([], {}), '()\n', (1586, 1588), False, 'from time import time\n'), ((1964, 1970), 'time.time', 'time', ([], {}), '()\n', (1968, 1970), False, 'from time import time\n')] |
# %matplotlib notebook
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# import numpy as np
# from scipy.stats import multivariate_normal
#
# X = np.linspace(-5,5,50)
# Y = np.linspace(-5,5,50)
# X, Y = np.meshgrid(X,Y)
# X_mean = 0; Y_mean = 0
# X_var = 5; Y_var = 8
#
# pos = np.empty(X.shape+(2,))
# pos[:,:,0]=X
# pos[:,:,1]=Y
# rv = multivariate_normal([X_mean, Y_mean],[[X_var, 0], [0, Y_var]])
#
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot_surface(X, Y, rv.pdf(pos), cmap="plasma")
# plt.show()
import matplotlib as mpl
# mpl.use('Qt5Agg')
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def main():
np.random.seed(42)
xs = np.random.random(100) * 10 + 20
ys = np.random.random(100) * 5 + 7
zs = np.random.random(100) * 15 + 50
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, ys, zs)
ax.scatter(xs, ys, zs, marker="x", c="red")
ax.set_xlabel("Atomic mass (dalton)")
ax.set_ylabel("Atomic radius (pm)")
ax.set_zlabel("Atomic velocity (x10⁶ m/s)")
plt.show()
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# load some test data for demonstration and plot a wireframe
X, Y, Z = axes3d.get_test_data(0.1)
ax.plot_wireframe(X, Y, Z, rstride=5, cstride=5)
# rotate the axes and update
for angle in range(0, 360):
ax.view_init(30, angle)
plt.draw()
plt.pause(.001)
if __name__ == '__main__':
main()
| [
"matplotlib.use",
"numpy.random.random",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"mpl_toolkits.mplot3d.axes3d.get_test_data",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.show"
] | [((608, 624), 'matplotlib.use', 'mpl.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (615, 624), True, 'import matplotlib as mpl\n'), ((736, 754), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (750, 754), True, 'import numpy as np\n'), ((887, 899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (897, 899), True, 'import matplotlib.pyplot as plt\n'), ((1158, 1168), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1166, 1168), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1237), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1235, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1390), 'mpl_toolkits.mplot3d.axes3d.get_test_data', 'axes3d.get_test_data', (['(0.1)'], {}), '(0.1)\n', (1385, 1390), False, 'from mpl_toolkits.mplot3d import axes3d\n'), ((1550, 1560), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1558, 1560), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1585), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (1578, 1585), True, 'import matplotlib.pyplot as plt\n'), ((764, 785), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (780, 785), True, 'import numpy as np\n'), ((805, 826), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (821, 826), True, 'import numpy as np\n'), ((844, 865), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (860, 865), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 11:08:12 2020
@author: nmei
"""
import os
import gc
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
from sklearn import metrics
from sklearn.utils import shuffle
from utils_deep import (data_loader,
createLossAndOptimizer,
behavioral_evaluate,
build_model,
hidden_activation_functions,
resample_ttest_2sample,
noise_fuc,
make_decoder,
decode_hidden_layer,
resample_ttest,
resample_behavioral_estimate
)
from matplotlib import pyplot as plt
#plt.switch_backend('agg')
print('set up random seeds')
torch.manual_seed(12345)
# experiment control
model_dir = '../models'
train_folder = 'greyscaled'
valid_folder = 'experiment_images_greyscaled'
train_root = f'../data/{train_folder}/'
valid_root = f'../data/{valid_folder}'
print_train = True #
image_resize = 128
batch_size = 8
lr = 1e-4
n_epochs = int(1e3)
device = 'cpu'
pretrain_model_name = 'vgg19_bn'
hidden_units = 20
hidden_func_name = 'relu'
hidden_activation = hidden_activation_functions(hidden_func_name)
hidden_dropout = 0.
patience = 5
output_activation = 'softmax'
model_saving_name = f'{pretrain_model_name}_{hidden_units}_{hidden_func_name}_{hidden_dropout}_{output_activation}'
testing = True #
n_experiment_runs = 20
n_noise_levels = 50
n_keep_going = 32
start_decoding = False
to_round = 9
results_dir = '../results/'
if not os.path.exists(results_dir):
os.mkdir(results_dir)
if not os.path.exists(os.path.join(results_dir,model_saving_name)):
os.mkdir(os.path.join(results_dir,model_saving_name))
if output_activation == 'softmax':
output_units = 2
categorical = True
elif output_activation == 'sigmoid':
output_units = 1
categorical = False
if not os.path.exists(os.path.join(model_dir,model_saving_name)):
os.mkdir(os.path.join(model_dir,model_saving_name))
if torch.cuda.is_available():torch.cuda.empty_cache();torch.cuda.manual_seed(12345);
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'device:{device}')
noise_levels = np.concatenate([[0],[item for item in np.logspace(-1,3,n_noise_levels)]])
csv_saving_name = os.path.join(results_dir,model_saving_name,'performance_results.csv')
if True:#not os.path.exists(csv_saving_name):
results = dict(model_name = [],
hidden_units = [],
hidden_activation = [],
output_activation = [],
noise_level = [],
score_mean = [],
score_std = [],
chance_mean = [],
chance_std = [],
model = [],
pval = [],
confidence_mean = [],
confidence_std = [],
dropout = [],
)
else:
df_temp = pd.read_csv(csv_saving_name)
results = {col_name:list(df_temp[col_name]) for col_name in df_temp.columns}
res_temp = []
print(noise_levels)
for var in noise_levels:
var = round(var,to_round)
if True:#var not in np.array(results['noise_level']).round(to_round):
print(f'\nworking on {var:1.1e}')
noise_folder = os.path.join(results_dir,model_saving_name,f'{var:1.1e}')
if not os.path.exists(noise_folder):
os.mkdir(noise_folder)
# define augmentation function + noise function
augmentations = {
'visualize':transforms.Compose([
transforms.Resize((image_resize,image_resize)),
transforms.RandomHorizontalFlip(p = 0.5),
transforms.RandomRotation(25,),
transforms.RandomVerticalFlip(p = 0.5,),
transforms.ToTensor(),
transforms.Lambda(lambda x: noise_fuc(x,var)),
]),
'valid':transforms.Compose([
transforms.Resize((image_resize,image_resize)),
transforms.RandomHorizontalFlip(p = 0.5),
transforms.RandomRotation(25,),
transforms.RandomVerticalFlip(p = 0.5,),
transforms.ToTensor(),
transforms.Lambda(lambda x: noise_fuc(x,var)),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
}
valid_loader = data_loader(
valid_root,
augmentations = augmentations['valid'],
batch_size = batch_size,
# here I turn on the shuffle like it is in a real experiment
)
visualize_loader = data_loader(
valid_root,
augmentations = augmentations['visualize'],
batch_size = 2 * batch_size,
)
# load model architecture
print('loading the trained model')
model_to_train = build_model(
pretrain_model_name,
hidden_units,
hidden_activation,
hidden_dropout,
output_units,
)
model_to_train.to(device)
for params in model_to_train.parameters():
params.requires_grad = False
f_name = os.path.join(model_dir,model_saving_name,model_saving_name+'.pth')
# load trained model
model_to_train = torch.load(f_name)
loss_func,optimizer = createLossAndOptimizer(model_to_train,learning_rate = lr)
# evaluate the model
y_trues,y_preds,features,labels = behavioral_evaluate(
model_to_train,
n_experiment_runs,
loss_func,
valid_loader,
device,
categorical = categorical,
output_activation = output_activation,
)
print(var.round(5),metrics.roc_auc_score(y_trues,y_preds))
# estimate chance level scores
np.random.seed(12345)
yy_trues = torch.cat(y_trues).detach().cpu().numpy()
yy_preds = torch.cat(y_preds).detach().cpu().numpy()
chance_scores = resample_behavioral_estimate(yy_trues,yy_preds,shuffle = True)
pval = resample_ttest_2sample(scores,chance_scores,
match_sample_size = False,
one_tail = False,
n_ps = 1,
n_permutation = int(1e5),
n_jobs = -1,
verbose = 1,
)
# save the features and labels from the hidden layer
decode_features = torch.cat([torch.cat(run) for run in features])
decode_labels = torch.cat([torch.cat(run) for run in labels])
decode_features = decode_features.detach().cpu().numpy()
decode_labels = decode_labels.detach().cpu().numpy()
if categorical:
decode_labels = decode_labels[:,-1]
np.save(os.path.join(noise_folder,'features.npy'),decode_features)
np.save(os.path.join(noise_folder,'labels.npy' ),decode_labels)
gc.collect()
results['model_name' ].append(pretrain_model_name)
results['hidden_units' ].append(hidden_units)
results['hidden_activation' ].append(hidden_func_name)
results['output_activation' ].append(output_activation)
results['noise_level' ].append(round(var,to_round))
results['score_mean' ].append(np.mean(scores))
results['score_std' ].append(np.std(scores))
results['chance_mean' ].append(np.mean(chance_scores))
results['chance_std' ].append(np.std(chance_scores))
results['model' ].append('CNN')
results['pval' ].append(np.mean(pval))
results['dropout' ].append(hidden_dropout)
y_preds = torch.cat(y_preds).detach().cpu().numpy()
if len(y_preds.shape) > 1:
confidence = y_preds.max(1)
else:
confidence = y_preds.copy()
results['confidence_mean' ].append(np.mean(confidence))
results['confidence_std' ].append(np.std(confidence))
print(var.round(5),np.mean(scores))
# save example noisy images
print('plotting example images')
batches,batch_labels = next(iter(visualize_loader))
PIL_transformer = transforms.ToPILImage()
plt.close('all')
fig,axes = plt.subplots(figsize = (16,16),nrows = 4,ncols = 4)
for ax,batch_,batch_label in zip(axes.flatten(),batches,batch_labels):
batch_ = np.array(PIL_transformer(batch_))
ax.imshow(batch_,)
ax.axis('off')
ax.set(title = {0:'living',
1:'nonliving'}[int(batch_label.numpy())])
fig.suptitle(f'noise level = {var:1.1e}, performance = {np.mean(scores):.3f} +/- {np.std(scores):.3f}\nconfidence = {np.mean(confidence):.3f} +/- {np.std(confidence):.3f}')
fig.savefig(os.path.join(noise_folder,'examples.jpeg'),bbox_inches = 'tight')
plt.close('all')
gc.collect()
if np.mean(scores) < 0.55:
start_decoding = True
if start_decoding:
decode_scores = []
for decoder_name in ['linear-SVM','RBF-SVM','RF']:#,'logit']:
decoder = make_decoder(decoder_name,n_jobs = 1)
res,cv = decode_hidden_layer(decoder,
decode_features,
decode_labels,
n_splits = 100,
test_size = 0.2,
categorical = categorical,
output_activation = output_activation,)
decode_scores.append(res['test_score'].mean())
y_preds = []
for (_,idx_test),est in zip(cv.split(decode_features,decode_labels),res['estimator']):
y_pred_ = est.predict_proba(decode_features[idx_test])
y_preds.append(y_pred_)
y_preds = np.concatenate(y_preds)
if len(y_preds.shape) > 1:
confidence = y_preds.max(1)
else:
confidence = y_preds.copy()
pval = resample_ttest(res['test_score'],
0.5,
one_tail = True,
n_jobs = -1,
n_ps = 50,
n_permutation = int(1e5),
verbose = 1,
)
gc.collect()
results['model_name' ].append(pretrain_model_name)
results['hidden_units' ].append(hidden_units)
results['hidden_activation' ].append(hidden_func_name)
results['output_activation' ].append(output_activation)
results['noise_level' ].append(round(var,to_round))
results['score_mean' ].append(np.mean(res['test_score']))
results['score_std' ].append(np.std(res['test_score']))
results['chance_mean' ].append(.5)
results['chance_std' ].append(0.)
results['model' ].append(decoder_name)
results['pval' ].append(np.mean(pval))
results['confidence_mean' ].append(np.mean(confidence))
results['confidence_std' ].append(np.std(confidence))
results['dropout' ].append(hidden_dropout)
print(f"\nwith {var:1.1e} noise images, {decoder_name} = {np.mean(res['test_score']):.4f}+/-{np.std(res['test_score']):.4f},pval = {np.mean(pval):1.1e}")
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(csv_saving_name,index = False)
res_temp.append(np.mean(scores))
# CNN being too good above 0.5 for only a little bit
if len(res_temp) > n_keep_going:
criterion1 = (np.abs(np.median(res_temp[-n_keep_going:]) - .5) < 1e-3)
else:
criterion1 = False
# CNN is below 0.5 for a long time
criterion2 = (np.median(res_temp[-n_keep_going:]) < 0.5)
# decoders can no longer decoder
try:
criterion3 = np.logical_and(len(decode_scores) > 1,all(np.array(decode_scores) < 0.5))
except:
criterion3 = False
if criterion1 and criterion2 and criterion3:
break
else:
idx_ , = np.where(np.array(results['noise_level']).round(5) == var)
score_mean = results['score_mean'][idx_[0]]
score_std = results['score_std'][idx_[0]]
confidence_mean = results['confidence_mean'][idx_[0]]
confidence_std = results['confidence_std'][idx_[0]]
print(f'\nwith {var:1.1e} noise images, score = {score_mean:.4f}+/-{score_std:.4f},confidence = {confidence_mean:.2f}+/-{confidence_std:.2f}')
res_temp.append(score_mean)
if ((np.abs(np.mean(res_temp[-n_keep_going:]) - .5) < 1e-3) and (len(res_temp) > n_keep_going)) or (np.mean(res_temp[-n_keep_going:]) < 0.5):
break
print('done')
| [
"utils_deep.decode_hidden_layer",
"torchvision.transforms.ToPILImage",
"pandas.read_csv",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"torch.cuda.is_available",
"utils_deep.behavioral_evaluate",
"os.path.exists",
"numpy.mean",
"utils_deep.noise_fuc",
"utils_deep.build_model",
"matplotlib.p... | [((878, 902), 'torch.manual_seed', 'torch.manual_seed', (['(12345)'], {}), '(12345)\n', (895, 902), False, 'import torch\n'), ((1479, 1524), 'utils_deep.hidden_activation_functions', 'hidden_activation_functions', (['hidden_func_name'], {}), '(hidden_func_name)\n', (1506, 1524), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((2471, 2496), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2494, 2496), False, 'import torch\n'), ((2766, 2837), 'os.path.join', 'os.path.join', (['results_dir', 'model_saving_name', '"""performance_results.csv"""'], {}), "(results_dir, model_saving_name, 'performance_results.csv')\n", (2778, 2837), False, 'import os\n'), ((1969, 1996), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (1983, 1996), False, 'import os\n'), ((2002, 2023), 'os.mkdir', 'os.mkdir', (['results_dir'], {}), '(results_dir)\n', (2010, 2023), False, 'import os\n'), ((2497, 2521), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2519, 2521), False, 'import torch\n'), ((2522, 2551), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(12345)'], {}), '(12345)\n', (2544, 2551), False, 'import torch\n'), ((3696, 3724), 'pandas.read_csv', 'pd.read_csv', (['csv_saving_name'], {}), '(csv_saving_name)\n', (3707, 3724), True, 'import pandas as pd\n'), ((2046, 2090), 'os.path.join', 'os.path.join', (['results_dir', 'model_saving_name'], {}), '(results_dir, model_saving_name)\n', (2058, 2090), False, 'import os\n'), ((2105, 2149), 'os.path.join', 'os.path.join', (['results_dir', 'model_saving_name'], {}), '(results_dir, model_saving_name)\n', (2117, 2149), False, 'import os\n'), ((2367, 2409), 'os.path.join', 'os.path.join', (['model_dir', 'model_saving_name'], {}), '(model_dir, model_saving_name)\n', (2379, 2409), False, 'import os\n'), ((2424, 2466), 'os.path.join', 'os.path.join', (['model_dir', 'model_saving_name'], {}), '(model_dir, model_saving_name)\n', (2436, 2466), False, 'import os\n'), ((2585, 2610), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2608, 2610), False, 'import torch\n'), ((4057, 4116), 'os.path.join', 'os.path.join', (['results_dir', 'model_saving_name', 'f"""{var:1.1e}"""'], {}), "(results_dir, model_saving_name, f'{var:1.1e}')\n", (4069, 4116), False, 'import os\n'), ((5208, 5297), 'utils_deep.data_loader', 'data_loader', (['valid_root'], {'augmentations': "augmentations['valid']", 'batch_size': 'batch_size'}), "(valid_root, augmentations=augmentations['valid'], batch_size=\n batch_size)\n", (5219, 5297), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((5478, 5574), 'utils_deep.data_loader', 'data_loader', (['valid_root'], {'augmentations': "augmentations['visualize']", 'batch_size': '(2 * batch_size)'}), "(valid_root, augmentations=augmentations['visualize'],\n batch_size=2 * batch_size)\n", (5489, 5574), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((5756, 5855), 'utils_deep.build_model', 'build_model', (['pretrain_model_name', 'hidden_units', 'hidden_activation', 'hidden_dropout', 'output_units'], {}), '(pretrain_model_name, hidden_units, hidden_activation,\n hidden_dropout, output_units)\n', (5767, 5855), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((6156, 6226), 'os.path.join', 'os.path.join', (['model_dir', 'model_saving_name', "(model_saving_name + '.pth')"], {}), "(model_dir, model_saving_name, model_saving_name + '.pth')\n", (6168, 6226), False, 'import os\n'), ((6282, 6300), 'torch.load', 'torch.load', (['f_name'], {}), '(f_name)\n', (6292, 6300), False, 'import torch\n'), ((6331, 6387), 'utils_deep.createLossAndOptimizer', 'createLossAndOptimizer', (['model_to_train'], {'learning_rate': 'lr'}), '(model_to_train, learning_rate=lr)\n', (6353, 6387), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((6460, 6618), 'utils_deep.behavioral_evaluate', 'behavioral_evaluate', (['model_to_train', 'n_experiment_runs', 'loss_func', 'valid_loader', 'device'], {'categorical': 'categorical', 'output_activation': 'output_activation'}), '(model_to_train, n_experiment_runs, loss_func,\n valid_loader, device, categorical=categorical, output_activation=\n output_activation)\n', (6479, 6618), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((7199, 7220), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (7213, 7220), True, 'import numpy as np\n'), ((7383, 7445), 'utils_deep.resample_behavioral_estimate', 'resample_behavioral_estimate', (['yy_trues', 'yy_preds'], {'shuffle': '(True)'}), '(yy_trues, yy_preds, shuffle=True)\n', (7411, 7445), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((8536, 8548), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8546, 8548), False, 'import gc\n'), ((9849, 9872), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (9870, 9872), False, 'from torchvision import transforms\n'), ((9881, 9897), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9890, 9897), True, 'from matplotlib import pyplot as plt\n'), ((9932, 9980), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 16)', 'nrows': '(4)', 'ncols': '(4)'}), '(figsize=(16, 16), nrows=4, ncols=4)\n', (9944, 9980), True, 'from matplotlib import pyplot as plt\n'), ((10588, 10604), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10597, 10604), True, 'from matplotlib import pyplot as plt\n'), ((10613, 10625), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10623, 10625), False, 'import gc\n'), ((13586, 13607), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (13598, 13607), True, 'import pandas as pd\n'), ((4130, 4158), 'os.path.exists', 'os.path.exists', (['noise_folder'], {}), '(noise_folder)\n', (4144, 4158), False, 'import os\n'), ((4172, 4194), 'os.mkdir', 'os.mkdir', (['noise_folder'], {}), '(noise_folder)\n', (4180, 4194), False, 'import os\n'), ((7111, 7150), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_trues', 'y_preds'], {}), '(y_trues, y_preds)\n', (7132, 7150), False, 'from sklearn import metrics\n'), ((8396, 8438), 'os.path.join', 'os.path.join', (['noise_folder', '"""features.npy"""'], {}), "(noise_folder, 'features.npy')\n", (8408, 8438), False, 'import os\n'), ((8471, 8511), 'os.path.join', 'os.path.join', (['noise_folder', '"""labels.npy"""'], {}), "(noise_folder, 'labels.npy')\n", (8483, 8511), False, 'import os\n'), ((8912, 8927), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (8919, 8927), True, 'import numpy as np\n'), ((8974, 8988), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (8980, 8988), True, 'import numpy as np\n'), ((9035, 9057), 'numpy.mean', 'np.mean', (['chance_scores'], {}), '(chance_scores)\n', (9042, 9057), True, 'import numpy as np\n'), ((9104, 9125), 'numpy.std', 'np.std', (['chance_scores'], {}), '(chance_scores)\n', (9110, 9125), True, 'import numpy as np\n'), ((9224, 9237), 'numpy.mean', 'np.mean', (['pval'], {}), '(pval)\n', (9231, 9237), True, 'import numpy as np\n'), ((9534, 9553), 'numpy.mean', 'np.mean', (['confidence'], {}), '(confidence)\n', (9541, 9553), True, 'import numpy as np\n'), ((9600, 9618), 'numpy.std', 'np.std', (['confidence'], {}), '(confidence)\n', (9606, 9618), True, 'import numpy as np\n'), ((9647, 9662), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (9654, 9662), True, 'import numpy as np\n'), ((10514, 10557), 'os.path.join', 'os.path.join', (['noise_folder', '"""examples.jpeg"""'], {}), "(noise_folder, 'examples.jpeg')\n", (10526, 10557), False, 'import os\n'), ((10638, 10653), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (10645, 10653), True, 'import numpy as np\n'), ((13695, 13710), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (13702, 13710), True, 'import numpy as np\n'), ((14015, 14050), 'numpy.median', 'np.median', (['res_temp[-n_keep_going:]'], {}), '(res_temp[-n_keep_going:])\n', (14024, 14050), True, 'import numpy as np\n'), ((2706, 2740), 'numpy.logspace', 'np.logspace', (['(-1)', '(3)', 'n_noise_levels'], {}), '(-1, 3, n_noise_levels)\n', (2717, 2740), True, 'import numpy as np\n'), ((8069, 8083), 'torch.cat', 'torch.cat', (['run'], {}), '(run)\n', (8078, 8083), False, 'import torch\n'), ((8143, 8157), 'torch.cat', 'torch.cat', (['run'], {}), '(run)\n', (8152, 8157), False, 'import torch\n'), ((10861, 10897), 'utils_deep.make_decoder', 'make_decoder', (['decoder_name'], {'n_jobs': '(1)'}), '(decoder_name, n_jobs=1)\n', (10873, 10897), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((10929, 11089), 'utils_deep.decode_hidden_layer', 'decode_hidden_layer', (['decoder', 'decode_features', 'decode_labels'], {'n_splits': '(100)', 'test_size': '(0.2)', 'categorical': 'categorical', 'output_activation': 'output_activation'}), '(decoder, decode_features, decode_labels, n_splits=100,\n test_size=0.2, categorical=categorical, output_activation=output_activation\n )\n', (10948, 11089), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((11753, 11776), 'numpy.concatenate', 'np.concatenate', (['y_preds'], {}), '(y_preds)\n', (11767, 11776), True, 'import numpy as np\n'), ((12392, 12404), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12402, 12404), False, 'import gc\n'), ((14958, 14991), 'numpy.mean', 'np.mean', (['res_temp[-n_keep_going:]'], {}), '(res_temp[-n_keep_going:])\n', (14965, 14991), True, 'import numpy as np\n'), ((4343, 4390), 'torchvision.transforms.Resize', 'transforms.Resize', (['(image_resize, image_resize)'], {}), '((image_resize, image_resize))\n', (4360, 4390), False, 'from torchvision import transforms\n'), ((4407, 4445), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (4438, 4445), False, 'from torchvision import transforms\n'), ((4465, 4494), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(25)'], {}), '(25)\n', (4490, 4494), False, 'from torchvision import transforms\n'), ((4513, 4549), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (4542, 4549), False, 'from torchvision import transforms\n'), ((4570, 4591), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4589, 4591), False, 'from torchvision import transforms\n'), ((4737, 4784), 'torchvision.transforms.Resize', 'transforms.Resize', (['(image_resize, image_resize)'], {}), '((image_resize, image_resize))\n', (4754, 4784), False, 'from torchvision import transforms\n'), ((4801, 4839), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (4832, 4839), False, 'from torchvision import transforms\n'), ((4859, 4888), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(25)'], {}), '(25)\n', (4884, 4888), False, 'from torchvision import transforms\n'), ((4907, 4943), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (4936, 4943), False, 'from torchvision import transforms\n'), ((4964, 4985), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4983, 4985), False, 'from torchvision import transforms\n'), ((5066, 5141), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (5086, 5141), False, 'from torchvision import transforms\n'), ((10377, 10392), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (10384, 10392), True, 'import numpy as np\n'), ((10403, 10417), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (10409, 10417), True, 'import numpy as np\n'), ((10438, 10457), 'numpy.mean', 'np.mean', (['confidence'], {}), '(confidence)\n', (10445, 10457), True, 'import numpy as np\n'), ((10468, 10486), 'numpy.std', 'np.std', (['confidence'], {}), '(confidence)\n', (10474, 10486), True, 'import numpy as np\n'), ((12816, 12842), 'numpy.mean', 'np.mean', (["res['test_score']"], {}), "(res['test_score'])\n", (12823, 12842), True, 'import numpy as np\n'), ((12897, 12922), 'numpy.std', 'np.std', (["res['test_score']"], {}), "(res['test_score'])\n", (12903, 12922), True, 'import numpy as np\n'), ((13158, 13171), 'numpy.mean', 'np.mean', (['pval'], {}), '(pval)\n', (13165, 13171), True, 'import numpy as np\n'), ((13226, 13245), 'numpy.mean', 'np.mean', (['confidence'], {}), '(confidence)\n', (13233, 13245), True, 'import numpy as np\n'), ((13300, 13318), 'numpy.std', 'np.std', (['confidence'], {}), '(confidence)\n', (13306, 13318), True, 'import numpy as np\n'), ((13849, 13884), 'numpy.median', 'np.median', (['res_temp[-n_keep_going:]'], {}), '(res_temp[-n_keep_going:])\n', (13858, 13884), True, 'import numpy as np\n'), ((14180, 14203), 'numpy.array', 'np.array', (['decode_scores'], {}), '(decode_scores)\n', (14188, 14203), True, 'import numpy as np\n'), ((14377, 14409), 'numpy.array', 'np.array', (["results['noise_level']"], {}), "(results['noise_level'])\n", (14385, 14409), True, 'import numpy as np\n'), ((4637, 4654), 'utils_deep.noise_fuc', 'noise_fuc', (['x', 'var'], {}), '(x, var)\n', (4646, 4654), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((5031, 5048), 'utils_deep.noise_fuc', 'noise_fuc', (['x', 'var'], {}), '(x, var)\n', (5040, 5048), False, 'from utils_deep import data_loader, createLossAndOptimizer, behavioral_evaluate, build_model, hidden_activation_functions, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate\n'), ((13463, 13489), 'numpy.mean', 'np.mean', (["res['test_score']"], {}), "(res['test_score'])\n", (13470, 13489), True, 'import numpy as np\n'), ((13498, 13523), 'numpy.std', 'np.std', (["res['test_score']"], {}), "(res['test_score'])\n", (13504, 13523), True, 'import numpy as np\n'), ((13537, 13550), 'numpy.mean', 'np.mean', (['pval'], {}), '(pval)\n', (13544, 13550), True, 'import numpy as np\n'), ((14870, 14903), 'numpy.mean', 'np.mean', (['res_temp[-n_keep_going:]'], {}), '(res_temp[-n_keep_going:])\n', (14877, 14903), True, 'import numpy as np\n'), ((7247, 7265), 'torch.cat', 'torch.cat', (['y_trues'], {}), '(y_trues)\n', (7256, 7265), False, 'import torch\n'), ((7315, 7333), 'torch.cat', 'torch.cat', (['y_preds'], {}), '(y_preds)\n', (7324, 7333), False, 'import torch\n'), ((9318, 9336), 'torch.cat', 'torch.cat', (['y_preds'], {}), '(y_preds)\n', (9327, 9336), False, 'import torch\n')] |
"""
Visualize map with COVID-19 cases
"""
from os.path import join
import logging
import numpy as np
from bokeh.plotting import figure
from bokeh.models import DateSlider
from bokeh.models import (
CustomJS,
GeoJSONDataSource,
HoverTool,
Legend,
LinearColorMapper,
Select,
GroupFilter,
CDSView,
Button,
Label
)
from bokeh.layouts import column, row
from bokeh.io import curdoc
from bokeh.palettes import Purples
from bokeh.themes import Theme
from database import DataBase
from utilities import cwd
from sql import (
US_MAP_PIVOT_VIEW_TABLE,
OPTIONS_TABLE
)
from nytimes import (
LEVELS_TABLE,
DATES_TABLE
)
from wrangler import STATE_MAP_TABLE
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class Map:
"""
Map Layout Class
"""
def __init__(self, **kwargs):
self.palette = kwargs.pop('palette')
# init metadata dictionary
self.meta = dict()
# get data and metadata from database
_db = DataBase()
self.counties = _db.get_geotable(US_MAP_PIVOT_VIEW_TABLE)
self.meta['levels'] = _db.get_table(LEVELS_TABLE)
self.meta['dates'] = _db.get_table(DATES_TABLE, parse_dates=['date'])
self.meta['options'] = _db.get_table(OPTIONS_TABLE)
_cols = ['state_id', 'geometry']
self.states = _db.get_geotable(STATE_MAP_TABLE, columns=_cols)
_db.close()
# format metadata
self.meta['levels'] = list(self.meta['levels']['level'])
self.meta['dates'] = list(self.meta['dates']['date'])
_id, _state = self.meta['options']['id'], self.meta['options']['state']
self.meta['options'] = list(zip(_id, _state))
# init plot
self.plot = figure(match_aspect=True, toolbar_location='right',
tools="box_zoom, wheel_zoom, pan, reset, save",
name='maps', **kwargs)
# hide axes
self.plot.axis.visible = False
# init class variables
self.controls = dict()
self.srcs = dict(counties=GeoJSONDataSource(geojson=self.counties.to_json()),
states=GeoJSONDataSource(geojson=self.states.to_json()))
# build map
self.plot_map()
log.debug('map init')
def __add_counties(self):
"""Add county patches to figure
"""
# build county colors and line parameters
_color_mapper = LinearColorMapper(palette=self.palette, low=0, high=9)
_color = dict(field='m', transform=_color_mapper)
_params = dict(line_color='darkgrey',
fill_color=_color, line_width=0.5)
_params['name'] = 'counties'
# add counties to plot
self.plot.patches(xs='xs', ys='ys',
source=self.srcs['counties'], **_params)
log.debug('patches added')
def __add_states(self):
"""Add state lines to figure
"""
# build state colors and line parameters
_params = dict(line_color='darkgrey', line_width=0.5, name='states')
# add state to plot
self.plot.multi_line(
xs='xs', ys='ys', source=self.srcs['states'], **_params)
log.debug('state lines added')
def __add_label(self):
""" Add date label for animation
"""
self.controls['label'] = Label(x=0.35 * self.plot.plot_width,
y=0.01 * self.plot.plot_height,
x_units='screen', y_units='screen',
text='', render_mode='css',
text_font_size=f"{0.10*self.plot.plot_height}px",
text_color='#eeeeee')
self.plot.add_layout(self.controls['label'])
log.debug('label added')
def __add_hover(self):
"""Add hove tool to figure
"""
_hover = HoverTool(renderers=self.plot.select('counties'),
tooltips=[('County', '@name'),
('Cases', '@c{0,0}'),
('Deaths', '@d{0,0}'),
('Population', '@pop{0,0}')])
self.plot.add_tools(_hover)
log.debug('hover tool added')
def __add_legend(self):
"""Add legend to plot
"""
_levels = self.meta['levels']
# names for custom legend
_names = []
for _level, _lead in zip(_levels, _levels[1:] + [np.nan]):
if _level == 0:
_names.append(f'{_level:,.0f}')
elif not np.isinf(_lead):
_names.append(f'{_level:,.0f} to {_lead:,.0f}')
else:
_names.append(f'{_level:,.0f}+')
break
# quad parameters
_params = dict(top=0, bottom=0, left=0, right=0, fill_color=None,
visible=False)
_items = []
for i in reversed(range(len(self.palette))):
_params['fill_color'] = self.palette[i]
_items += [(_names[i], [self.plot.quad(**_params)])]
# add lagend to plot
self.plot.add_layout(Legend(items=_items, location='bottom_right'))
self.plot.x_range.only_visible = True
self.plot.y_range.only_visible = True
log.debug('legend added added')
def add_select(self):
"""Build select control
"""
# select control
self.controls['select'] = Select(value='a', options=self.meta['options'],
max_width=self.plot.plot_width-35)
# map views
_filter = GroupFilter(column_name='state_id', group='12')
_counties_on = CDSView(source=self.srcs['counties'], filters=[_filter])
_counties_off = CDSView(source=self.srcs['counties'], filters=[])
_states_on = CDSView(source=self.srcs['states'], filters=[_filter])
_states_off = CDSView(source=self.srcs['states'], filters=[])
_args = dict(counties_src=self.srcs['counties'], states_src=self.srcs['states'],
counties_glyph=self.plot.select('counties')[0],
states_glyph=self.plot.select(
'states')[0], filter=_filter,
counties_view_on=_counties_on, states_view_on=_states_on,
counties_view_off=_counties_off, states_view_off=_states_off)
_callback = CustomJS(args=_args,
code="""
if (cb_obj.value != '00'){
console.log(cb_obj.value);
filter.group = cb_obj.value;
counties_glyph.view = counties_view_on;
states_glyph.view = states_view_on;
}
else{
console.log(cb_obj.value);
counties_glyph.view = counties_view_off;
states_glyph.view = states_view_off;
}
counties_src.change.emit();
states_src.change.emit();
""")
self.controls['select'].js_on_change('value', _callback)
log.debug('select control added')
def add_slider(self):
"""Build slider
"""
self.controls['slider'] = DateSlider(start=self.meta['dates'][-1].date(),
end=self.meta['dates'][0].date(),
value=self.meta['dates'][0].date(),
width=self.plot.plot_width-40-84,
title='Reported Date')
_callback = CustomJS(args=dict(source=self.srcs['counties'],
date=self.controls['slider']),
code="""
// javascript code
var data = source.data;
var cur_day = data['day'];
// from DateSlider
var day = Math.floor((date.end - date.value)/(1000*60*60*24));
// create column names
var ci = 'c'.concat(day.toString());
var di = 'd'.concat(day.toString());
var mi = 'm'.concat(day.toString());
// change data
if (cur_day[0] != day){
for (var i=0; i < cur_day.length; i++){
data['c'][i] = data[ci][i];
data['d'][i] = data[di][i];
data['m'][i] = data[mi][i];
cur_day[0] = day;
}
}
source.change.emit();
""")
self.controls['slider'].js_on_change('value', _callback)
log.debug('slider added')
def add_button(self):
"""Build animation button
"""
self.controls['button'] = Button(label='► Play', width=80, height=60)
_callback = CustomJS(args=dict(button=self.controls['button'],
slider=self.controls['slider'],
label=self.controls['label']),
code="""
function fDate(ms){
const months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
var d = new Date(ms);
var date = d.getDate();
if (date < 10){
date = '0' + date;
}
return `${date} ${months[d.getMonth()]} ${d.getFullYear()}`
};
var increment_slider = function(){
if (button.label == '► Play'){
label.text = ""
clearInterval(interval);
}
else{
// update slider value
var temp = slider.value;
temp = temp + 1000*60*60*24;
if (temp > slider.end){
temp = slider.start;
}
slider.value = temp;
// add date label to graph
var d = new Date(temp + 1000*60*60*24);
label.text = fDate(d)
}
};
if (button.label == '► Play'){
button.label = '❚❚ Pause';
var interval = setInterval(increment_slider, 750, slider);
}
else{
button.label = '► Play';
clearInterval(interval);
};
""")
self.controls['button'].js_on_click(_callback)
log.debug('button added')
def plot_map(self):
""" Build map elements
"""
self.__add_counties()
self.__add_states()
self.__add_hover()
self.__add_label()
self.__add_legend()
self.add_select()
self.add_slider()
self.add_button()
if __name__[:9] == 'bokeh_app':
print('unit testing...')
# unit test module in stand alone mode
PALETTE = list(reversed(Purples[8]))
PLOT = Map(plot_width=800, plot_height=400, palette=PALETTE)
LAYOUT = column(PLOT.controls['select'],
PLOT.plot,
row(PLOT.controls['slider'], PLOT.controls['button']))
curdoc().add_root(LAYOUT)
curdoc().title = 'maps'
curdoc().theme = Theme(filename=join(cwd(), "theme.yaml"))
| [
"logging.basicConfig",
"logging.getLogger",
"database.DataBase",
"bokeh.plotting.figure",
"bokeh.models.Label",
"bokeh.layouts.row",
"bokeh.models.GroupFilter",
"bokeh.models.LinearColorMapper",
"bokeh.models.CustomJS",
"bokeh.models.Select",
"bokeh.io.curdoc",
"bokeh.models.CDSView",
"bokeh... | [((710, 749), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (729, 749), False, 'import logging\n'), ((756, 783), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (773, 783), False, 'import logging\n'), ((1042, 1052), 'database.DataBase', 'DataBase', ([], {}), '()\n', (1050, 1052), False, 'from database import DataBase\n'), ((1779, 1906), 'bokeh.plotting.figure', 'figure', ([], {'match_aspect': '(True)', 'toolbar_location': '"""right"""', 'tools': '"""box_zoom, wheel_zoom, pan, reset, save"""', 'name': '"""maps"""'}), "(match_aspect=True, toolbar_location='right', tools=\n 'box_zoom, wheel_zoom, pan, reset, save', name='maps', **kwargs)\n", (1785, 1906), False, 'from bokeh.plotting import figure\n'), ((2479, 2533), 'bokeh.models.LinearColorMapper', 'LinearColorMapper', ([], {'palette': 'self.palette', 'low': '(0)', 'high': '(9)'}), '(palette=self.palette, low=0, high=9)\n', (2496, 2533), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((3398, 3611), 'bokeh.models.Label', 'Label', ([], {'x': '(0.35 * self.plot.plot_width)', 'y': '(0.01 * self.plot.plot_height)', 'x_units': '"""screen"""', 'y_units': '"""screen"""', 'text': '""""""', 'render_mode': '"""css"""', 'text_font_size': 'f"""{0.1 * self.plot.plot_height}px"""', 'text_color': '"""#eeeeee"""'}), "(x=0.35 * self.plot.plot_width, y=0.01 * self.plot.plot_height,\n x_units='screen', y_units='screen', text='', render_mode='css',\n text_font_size=f'{0.1 * self.plot.plot_height}px', text_color='#eeeeee')\n", (3403, 3611), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((5547, 5636), 'bokeh.models.Select', 'Select', ([], {'value': '"""a"""', 'options': "self.meta['options']", 'max_width': '(self.plot.plot_width - 35)'}), "(value='a', options=self.meta['options'], max_width=self.plot.\n plot_width - 35)\n", (5553, 5636), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((5710, 5757), 'bokeh.models.GroupFilter', 'GroupFilter', ([], {'column_name': '"""state_id"""', 'group': '"""12"""'}), "(column_name='state_id', group='12')\n", (5721, 5757), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((5781, 5837), 'bokeh.models.CDSView', 'CDSView', ([], {'source': "self.srcs['counties']", 'filters': '[_filter]'}), "(source=self.srcs['counties'], filters=[_filter])\n", (5788, 5837), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((5862, 5911), 'bokeh.models.CDSView', 'CDSView', ([], {'source': "self.srcs['counties']", 'filters': '[]'}), "(source=self.srcs['counties'], filters=[])\n", (5869, 5911), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((5933, 5987), 'bokeh.models.CDSView', 'CDSView', ([], {'source': "self.srcs['states']", 'filters': '[_filter]'}), "(source=self.srcs['states'], filters=[_filter])\n", (5940, 5987), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((6010, 6057), 'bokeh.models.CDSView', 'CDSView', ([], {'source': "self.srcs['states']", 'filters': '[]'}), "(source=self.srcs['states'], filters=[])\n", (6017, 6057), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((6507, 7075), 'bokeh.models.CustomJS', 'CustomJS', ([], {'args': '_args', 'code': '"""\n if (cb_obj.value != \'00\'){\n console.log(cb_obj.value);\n filter.group = cb_obj.value;\n counties_glyph.view = counties_view_on;\n states_glyph.view = states_view_on;\n }\n else{\n console.log(cb_obj.value);\n counties_glyph.view = counties_view_off;\n states_glyph.view = states_view_off;\n }\n counties_src.change.emit();\n states_src.change.emit();\n """'}), '(args=_args, code=\n """\n if (cb_obj.value != \'00\'){\n console.log(cb_obj.value);\n filter.group = cb_obj.value;\n counties_glyph.view = counties_view_on;\n states_glyph.view = states_view_on;\n }\n else{\n console.log(cb_obj.value);\n counties_glyph.view = counties_view_off;\n states_glyph.view = states_view_off;\n }\n counties_src.change.emit();\n states_src.change.emit();\n """\n )\n', (6515, 7075), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((8823, 8866), 'bokeh.models.Button', 'Button', ([], {'label': '"""► Play"""', 'width': '(80)', 'height': '(60)'}), "(label='► Play', width=80, height=60)\n", (8829, 8866), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((11237, 11290), 'bokeh.layouts.row', 'row', (["PLOT.controls['slider']", "PLOT.controls['button']"], {}), "(PLOT.controls['slider'], PLOT.controls['button'])\n", (11240, 11290), False, 'from bokeh.layouts import column, row\n'), ((11327, 11335), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (11333, 11335), False, 'from bokeh.io import curdoc\n'), ((11355, 11363), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (11361, 11363), False, 'from bokeh.io import curdoc\n'), ((5237, 5282), 'bokeh.models.Legend', 'Legend', ([], {'items': '_items', 'location': '"""bottom_right"""'}), "(items=_items, location='bottom_right')\n", (5243, 5282), False, 'from bokeh.models import CustomJS, GeoJSONDataSource, HoverTool, Legend, LinearColorMapper, Select, GroupFilter, CDSView, Button, Label\n'), ((11297, 11305), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (11303, 11305), False, 'from bokeh.io import curdoc\n'), ((11392, 11397), 'utilities.cwd', 'cwd', ([], {}), '()\n', (11395, 11397), False, 'from utilities import cwd\n'), ((4677, 4692), 'numpy.isinf', 'np.isinf', (['_lead'], {}), '(_lead)\n', (4685, 4692), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 11:15:29 2020
@author:
Dr. <NAME>
European Space Agency (ESA)
European Space Research and Technology Centre (ESTEC)
Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands
Email: <EMAIL>
GitHub: mnguenther
Twitter: m_n_guenther
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: modules
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.timeseries import LombScargle
from statsmodels.graphics.tsaplots import plot_acf
#::: my modules
from ..exoworlds_rdx.lightcurves.lightcurve_tools import plot_phase_folded_lightcurve
from allesfitter.time_series import clean, sigma_clip, slide_clip
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
###############################################################################
#::: run a periodogram via astropy to get the dominant period and FAP
###############################################################################
def estimate_period(time, y, y_err, clip=True, plot=True, **kwargs):
"""
Run a Lomb-Scargle Periodogram to find periodic signals. It's recommended
to use the allesfitter.time_series functions sigma_clip and slide_clip beforehand.
Parameters
----------
time : array of float
e.g. time array (usually in days)
y : array of float
e.g. flux or RV array (usually as normalized flux or RV in km/s)
yerr : array of float
e.g. flux or RV error array (usually as normalized flux or RV in km/s)
clip : bool, optional
Automatically clip the input data with sigma_clip(low=4, high=4)
and slide_clip(window_length=1, low=4, high=4). The default is True.
plot : bool, optional
To plot or not, that is the question. The default is False.
**kwargs : collection of keyword arguments
Any keyword arguments will be passed onto the astropy periodogram class.
Returns
-------
best_period : float
The best period found.
FAP : float
The false alarm probability for the best period.
fig : matplotlib.figure object, optional
The summary figure. Only returned if plot is True.
"""
#==========================================================================
#::: clean the inputs
#==========================================================================
time, y, y_err = clean(time, y, y_err)
plot_bool = plot
if clip:
y = sigma_clip(time, y, low=4, high=4)
y = slide_clip(time, y, window_length=1, low=4, high=4)
time, y, y_err = clean(time, y, y_err)
#==========================================================================
#::: handle inputs
#==========================================================================
cadence = np.nanmedian(np.diff(time))
if kwargs is None: kwargs = {}
if 'minperiod' not in kwargs: kwargs['minperiod'] = 10. * cadence
if 'maxperiod' not in kwargs: kwargs['maxperiod'] = time[-1]-time[0]
minfreq = 1./kwargs['maxperiod']
maxfreq = 1./kwargs['minperiod']
#==========================================================================
#::: now do the periodogram
#==========================================================================
ls = LombScargle(time, y) #Analyze our dates and s-index data using the AstroPy Lomb Scargle module
frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq) #Determine the LS periodogram
best_power = np.nanmax(power)
best_frequency = frequency[np.argmax(power)]
best_period = 1./best_frequency
FAP=ls.false_alarm_probability(best_power) #Calculate the FAP for the highest peak in the power array
#==========================================================================
#::: plot
#==========================================================================
def plot():
peak_loc=round(float(1./best_frequency),2)
FAP_probabilities = [0.5, 0.1, 0.01] #Enter FAP values you want to determine
FAP_levels=ls.false_alarm_level(FAP_probabilities) #Get corresponding LS Power values
fig, axes = plt.subplots(4, 1, figsize=[10,15], tight_layout=True)
#::: plot the periodogram
ax = axes[0]
ax.semilogx(1./frequency,power,color='b')
ax.plot(peak_loc, best_power, marker='d', markersize=12, color='r')
ax.text(peak_loc*1.2,best_power*0.95,'Peak Period: '+str(peak_loc)+' days')
ax.text(peak_loc*1.2,best_power*0.85,'FAP: '+str(FAP))
ax.hlines(FAP_levels, kwargs['minperiod'], kwargs['maxperiod'], color='grey', lw=1)
ax.text(kwargs['maxperiod'], FAP_levels[0],'0.5% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[1],'0.1% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[2],'0.01% FAP ', ha='right')
ax.set(xlabel='Period (days)', ylabel='L-S power')
ax.tick_params(axis='both',which='major')
#::: plot the phase-folded data
ax = axes[1]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylim=[np.nanmin(y), np.nanmax(y)], ylabel='Data (clipped; phased)')
#::: plot the phase-folded data, zoomed
ax = axes[2]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylabel='Data (clipped; phased; y-zoom)')
#::: plot the autocorrelation of the data
ax = axes[3]
plot_acf(pd.Series(y, index=time), ax=ax, lags=np.linspace(start=1,stop=2*best_period/cadence,num=100,dtype=int))
ax.set(xlabel='Lag', ylabel='Autocorrelation', title='')
return fig
#==========================================================================
#::: return
#==========================================================================
if plot_bool:
fig = plot()
return best_period, FAP, fig
else:
return best_period, FAP
# ###############################################################################
# #::: run a periodogram via astropy to get the dominant period and FAP
# ###############################################################################
def estimate_period_old(time, y, y_err, periodogram_kwargs=None, astropy_kwargs=None, wotan_kwargs=None, options=None):
'''
Parameters
----------
time : TYPE
DESCRIPTION.
y : TYPE
DESCRIPTION.
y_err : TYPE
DESCRIPTION.
periodogram_kwargs : TYPE, optional
DESCRIPTION. The default is None.
astropy_kwargs : TYPE, optional
DESCRIPTION. The default is None.
wotan_kwargs : TYPE, optional
DESCRIPTION. The default is None.
options : None or dictionary, optional
The default is None, which will evaluate to:
options = {}
options['show_plot'] = True #show a plot in the terminal?
options['save_plot'] = True #save a plot?
options['fname_plot'] = 'periodogram' #filenmae of the plot
options['outdir'] = '.' #output directory for the plot
If a dictionary is given, it may contain and overwrite all these keys.
Returns
-------
None.
'''
#==========================================================================
#::: handle inputs
#==========================================================================
cadence = np.nanmedian(np.diff(time))
if periodogram_kwargs is None: periodogram_kwargs = {}
if 'minperiod' not in periodogram_kwargs: periodogram_kwargs['minperiod'] = 10. * cadence
if 'maxperiod' not in periodogram_kwargs: periodogram_kwargs['maxperiod'] = time[-1]-time[0]
if astropy_kwargs is None: astropy_kwargs = {}
if 'sigma' not in astropy_kwargs: astropy_kwargs['sigma'] = 5
if wotan_kwargs is None: wotan_kwargs = {}
if 'slide_clip' not in wotan_kwargs: wotan_kwargs['slide_clip'] = {}
if 'window_length' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['window_length'] = 1.
if 'low' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['low'] = 5
if 'high' not in wotan_kwargs['slide_clip']: wotan_kwargs['slide_clip']['high'] = 5
if options is None: options = {}
if 'show_plot' not in options: options['show_plot'] = False
if 'save_plot' not in options: options['save_plot'] = False
if 'return_plot' not in options: options['return_plot'] = False
if 'fname_plot' not in options: options['fname_plot'] = 'periodogram'
if 'outdir' not in options: options['outdir'] = '.'
minfreq = 1./periodogram_kwargs['maxperiod']
maxfreq = 1./periodogram_kwargs['minperiod']
#==========================================================================
#::: first, a global 5 sigma clip
#==========================================================================
ff = sigma_clip(time, np.ma.masked_invalid(y), low=astropy_kwargs['sigma'], high=astropy_kwargs['sigma']) #astropy wants masked arrays
# ff = np.array(ff.filled(np.nan)) #use NaN instead of masked arrays, because masked arrays drive me crazy
#==========================================================================
#::: fast slide clip (1 day, 5 sigma) [replaces Wotan's slow slide clip]
#==========================================================================
try: ff = slide_clip(time, ff, **wotan_kwargs['slide_clip'])
except: print('Fast slide clip failed and was skipped.')
#==========================================================================
#::: now do the periodogram
#==========================================================================
ind_notnan = np.where(~np.isnan(time*ff*y_err))
ls = LombScargle(time[ind_notnan], ff[ind_notnan]) #Analyze our dates and s-index data using the AstroPy Lomb Scargle module
frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq) #Determine the LS periodogram
best_power = np.nanmax(power)
best_frequency = frequency[np.argmax(power)]
FAP=ls.false_alarm_probability(best_power) #Calculate the FAP for the highest peak in the power array
#==========================================================================
#::: plots
#==========================================================================
if options['show_plot'] or options['save_plot'] or options['return_plot']:
peak_loc=round(float(1./best_frequency),2)
FAP_probabilities = [0.5, 0.1, 0.01] #Enter FAP values you want to determine
FAP_levels=ls.false_alarm_level(FAP_probabilities) #Get corresponding LS Power values
fig, axes = plt.subplots(4, 1, figsize=[10,15], tight_layout=True)
# axes = np.atleast_1d(axes)
# ax = axes[0]
# ind_clipped = np.where(np.isnan(ff))[0]
# ax.plot(time[ind_clipped], flux[ind_clipped], 'r.', rasterized=True)
# ax.plot(time, ff, 'b.', rasterized=True)
# ax.set(xlabel='Time (BJD)', ylabel='Flux')
# ax = axes[1]
# ax.plot(time, ff, 'b.', rasterized=True)
# ax.set(xlabel='Time (BJD)', ylabel='Flux (clipped)')
ax = axes[0]
ax.semilogx(1./frequency,power,color='b')
ax.plot(peak_loc, best_power, marker='d', markersize=12, color='r')
ax.text(peak_loc*1.2,best_power*0.95,'Peak Period: '+str(peak_loc)+' days')
ax.text(peak_loc*1.2,best_power*0.85,'FAP: '+str(FAP))
ax.hlines(FAP_levels, periodogram_kwargs['minperiod'], periodogram_kwargs['maxperiod'], color='grey', lw=1)
ax.text(periodogram_kwargs['maxperiod'], FAP_levels[0],'0.5% FAP ', ha='right')
ax.text(periodogram_kwargs['maxperiod'], FAP_levels[1],'0.1% FAP ', ha='right')
ax.text(periodogram_kwargs['maxperiod'], FAP_levels[2],'0.01% FAP ', ha='right')
ax.set(xlabel='Period (days)', ylabel='L-S power')
ax.tick_params(axis='both',which='major')
# ax.text(peak_loc*1.2,best_power*0.75,'std_old:'+str(std_old*1e3)[0:4]+' --> '+'std_new:'+str(std_new*1e3)[0:4])
ax = axes[1]
plot_phase_folded_lightcurve(time, ff, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylim=[np.nanmin(ff), np.nanmax(ff)], ylabel='Data (clipped; phased)')
ax = axes[2]
plot_phase_folded_lightcurve(time, ff, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylabel='Data (clipped; phased; y-zoom)')
#::: plot the autocorrelation of the data
ax = axes[3]
plot_acf(pd.Series(ff[ind_notnan], index=time[ind_notnan]), ax=ax, lags=np.linspace(start=1,stop=2000,num=100,dtype=int))
ax.set(xlabel='Lag', ylabel='Autocorrelation', title='')
if options['save_plot']:
if not os.path.exists(options['outdir']): os.makedirs(options['outdir'])
fig.savefig(os.path.join(options['outdir'],options['fname_plot']+'.pdf'), bbox_inches='tight')
if options['show_plot']:
plt.show(fig)
else:
plt.close(fig)
if options['return_plot'] is True:
return 1./best_frequency, FAP, axes
else:
return 1./best_frequency, FAP
###############################################################################
#::: estimate a good window length for spline knots, running median etc.
###############################################################################
# def estimate_window_length(time, flux, flux_err, periodogram_kwargs=None, wotan_kwargs=None, options=None):
# window_length_min = 12./24. #at least 12h to not destroy planets
# window_length_max = 1. #at most 1 day
# cadence = np.median(np.diff(time))
# best_period, FAP = estimate_period(time, flux, flux_err)
# if best_period < 100.*cadence:
# window_length = best_period/10.
# return np.min()
# return best_period/10.
# else:
# return None
# warnings.warn('Returning None. Best period was found to be', best_period*24*60, 'min., but cadence is only', cadence*24*60, 'min.')
###############################################################################
#::: remove periodic trends
###############################################################################
# def estimate_trend(time, flux, flux_err):
# iterations = 3
# wotan_kwargs = {'slide_clip':{}, 'flatten':{}}
# wotan_kwargs['slide_clip']['window_length'] = 1.
# wotan_kwargs['slide_clip']['low'] = 3.
# wotan_kwargs['slide_clip']['high'] = 3.
# wotan_kwargs['flatten']['method'] = 'rspline'
# wotan_kwargs['flatten']['window_length'] = None
# wotan_kwargs['flatten']['break_tolerance'] = 0.5
# trend = np.ones_like(time)
# #::: global sigma clipping
# flux = sigma_clip(flux, sigma_upper=3, sigma_lower=20)
# #::: 1 day slide clip
# flux = slide_clip(time, flux, **wotan_kwargs['slide_clip'])
# for i in range(iterations):
# wotan_kwargs['flatten']['window_length'] = estimate_window_length(time, flux, flux_err)
# print(wotan_kwargs['flatten']['window_length']*10)
# if wotan_kwargs['flatten']['window_length'] is not None:
# flux, trend1 = flatten(time, flux, return_trend=True, **wotan_kwargs['flatten'])
# trend += (trend1 - 1.)
# plt.figure()
# plt.plot(time, flux)
# plt.title(i)
# return trend
| [
"seaborn.set_style",
"allesfitter.time_series.sigma_clip",
"numpy.nanmin",
"os.path.exists",
"seaborn.set",
"numpy.diff",
"allesfitter.time_series.clean",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.nanmax",
"numpy.ma.masked_invalid",
"seaborn.set_context",
"numpy.argmax",
"numpy.i... | [((955, 1067), 'seaborn.set', 'sns.set', ([], {'context': '"""paper"""', 'style': '"""ticks"""', 'palette': '"""deep"""', 'font': '"""sans-serif"""', 'font_scale': '(1.5)', 'color_codes': '(True)'}), "(context='paper', style='ticks', palette='deep', font='sans-serif',\n font_scale=1.5, color_codes=True)\n", (962, 1067), True, 'import seaborn as sns\n'), ((1064, 1129), 'seaborn.set_style', 'sns.set_style', (["{'xtick.direction': 'in', 'ytick.direction': 'in'}"], {}), "({'xtick.direction': 'in', 'ytick.direction': 'in'})\n", (1077, 1129), True, 'import seaborn as sns\n'), ((1129, 1177), 'seaborn.set_context', 'sns.set_context', ([], {'rc': "{'lines.markeredgewidth': 1}"}), "(rc={'lines.markeredgewidth': 1})\n", (1144, 1177), True, 'import seaborn as sns\n'), ((2830, 2851), 'allesfitter.time_series.clean', 'clean', (['time', 'y', 'y_err'], {}), '(time, y, y_err)\n', (2835, 2851), False, 'from allesfitter.time_series import clean, sigma_clip, slide_clip\n'), ((3751, 3771), 'astropy.timeseries.LombScargle', 'LombScargle', (['time', 'y'], {}), '(time, y)\n', (3762, 3771), False, 'from astropy.timeseries import LombScargle\n'), ((3983, 3999), 'numpy.nanmax', 'np.nanmax', (['power'], {}), '(power)\n', (3992, 3999), True, 'import numpy as np\n'), ((10422, 10467), 'astropy.timeseries.LombScargle', 'LombScargle', (['time[ind_notnan]', 'ff[ind_notnan]'], {}), '(time[ind_notnan], ff[ind_notnan])\n', (10433, 10467), False, 'from astropy.timeseries import LombScargle\n'), ((10681, 10697), 'numpy.nanmax', 'np.nanmax', (['power'], {}), '(power)\n', (10690, 10697), True, 'import numpy as np\n'), ((2903, 2937), 'allesfitter.time_series.sigma_clip', 'sigma_clip', (['time', 'y'], {'low': '(4)', 'high': '(4)'}), '(time, y, low=4, high=4)\n', (2913, 2937), False, 'from allesfitter.time_series import clean, sigma_clip, slide_clip\n'), ((2950, 3001), 'allesfitter.time_series.slide_clip', 'slide_clip', (['time', 'y'], {'window_length': '(1)', 'low': '(4)', 'high': '(4)'}), '(time, y, window_length=1, low=4, high=4)\n', (2960, 3001), False, 'from allesfitter.time_series import clean, sigma_clip, slide_clip\n'), ((3027, 3048), 'allesfitter.time_series.clean', 'clean', (['time', 'y', 'y_err'], {}), '(time, y, y_err)\n', (3032, 3048), False, 'from allesfitter.time_series import clean, sigma_clip, slide_clip\n'), ((3273, 3286), 'numpy.diff', 'np.diff', (['time'], {}), '(time)\n', (3280, 3286), True, 'import numpy as np\n'), ((4031, 4047), 'numpy.argmax', 'np.argmax', (['power'], {}), '(power)\n', (4040, 4047), True, 'import numpy as np\n'), ((4663, 4718), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '[10, 15]', 'tight_layout': '(True)'}), '(4, 1, figsize=[10, 15], tight_layout=True)\n', (4675, 4718), True, 'import matplotlib.pyplot as plt\n'), ((8054, 8067), 'numpy.diff', 'np.diff', (['time'], {}), '(time)\n', (8061, 8067), True, 'import numpy as np\n'), ((9557, 9580), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['y'], {}), '(y)\n', (9577, 9580), True, 'import numpy as np\n'), ((10042, 10092), 'allesfitter.time_series.slide_clip', 'slide_clip', (['time', 'ff'], {}), "(time, ff, **wotan_kwargs['slide_clip'])\n", (10052, 10092), False, 'from allesfitter.time_series import clean, sigma_clip, slide_clip\n'), ((10729, 10745), 'numpy.argmax', 'np.argmax', (['power'], {}), '(power)\n', (10738, 10745), True, 'import numpy as np\n'), ((11440, 11495), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '[10, 15]', 'tight_layout': '(True)'}), '(4, 1, figsize=[10, 15], tight_layout=True)\n', (11452, 11495), True, 'import matplotlib.pyplot as plt\n'), ((6091, 6115), 'pandas.Series', 'pd.Series', (['y'], {'index': 'time'}), '(y, index=time)\n', (6100, 6115), True, 'import pandas as pd\n'), ((10388, 10415), 'numpy.isnan', 'np.isnan', (['(time * ff * y_err)'], {}), '(time * ff * y_err)\n', (10396, 10415), True, 'import numpy as np\n'), ((13395, 13444), 'pandas.Series', 'pd.Series', (['ff[ind_notnan]'], {'index': 'time[ind_notnan]'}), '(ff[ind_notnan], index=time[ind_notnan])\n', (13404, 13444), True, 'import pandas as pd\n'), ((13848, 13861), 'matplotlib.pyplot.show', 'plt.show', (['fig'], {}), '(fig)\n', (13856, 13861), True, 'import matplotlib.pyplot as plt\n'), ((13888, 13902), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13897, 13902), True, 'import matplotlib.pyplot as plt\n'), ((6129, 6201), 'numpy.linspace', 'np.linspace', ([], {'start': '(1)', 'stop': '(2 * best_period / cadence)', 'num': '(100)', 'dtype': 'int'}), '(start=1, stop=2 * best_period / cadence, num=100, dtype=int)\n', (6140, 6201), True, 'import numpy as np\n'), ((13458, 13509), 'numpy.linspace', 'np.linspace', ([], {'start': '(1)', 'stop': '(2000)', 'num': '(100)', 'dtype': 'int'}), '(start=1, stop=2000, num=100, dtype=int)\n', (13469, 13509), True, 'import numpy as np\n'), ((13630, 13663), 'os.path.exists', 'os.path.exists', (["options['outdir']"], {}), "(options['outdir'])\n", (13644, 13663), False, 'import os\n'), ((13665, 13695), 'os.makedirs', 'os.makedirs', (["options['outdir']"], {}), "(options['outdir'])\n", (13676, 13695), False, 'import os\n'), ((13720, 13783), 'os.path.join', 'os.path.join', (["options['outdir']", "(options['fname_plot'] + '.pdf')"], {}), "(options['outdir'], options['fname_plot'] + '.pdf')\n", (13732, 13783), False, 'import os\n'), ((5710, 5722), 'numpy.nanmin', 'np.nanmin', (['y'], {}), '(y)\n', (5719, 5722), True, 'import numpy as np\n'), ((5724, 5736), 'numpy.nanmax', 'np.nanmax', (['y'], {}), '(y)\n', (5733, 5736), True, 'import numpy as np\n'), ((13059, 13072), 'numpy.nanmin', 'np.nanmin', (['ff'], {}), '(ff)\n', (13068, 13072), True, 'import numpy as np\n'), ((13074, 13087), 'numpy.nanmax', 'np.nanmax', (['ff'], {}), '(ff)\n', (13083, 13087), True, 'import numpy as np\n')] |
"""
<NAME>
2014 August 20
Plot dN/dA as a function of angular separation from the center of light. dN =
number of objects between radius 1 and radius 2. dA = area between radius 1
and radius 2.
"""
from astropy.table import Table
from astropy.io import ascii
import matplotlib
import matplotlib.pyplot as plt
from pylab import savefig
import numpy
import sep_util
# set font properties
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 12}
matplotlib.rc('font', **font)
matplotlib.rcParams['axes.linewidth'] = 1.2
fig = plt.figure(figsize=(5.0, 4.5))
nbins = 10
binwidth = 1.0
bin_edges = numpy.arange(0, nbins + 1, binwidth)
# ***********
# ALMA sample with ALESS uv coverage and 1.4 mJy flux limit
# ***********
# NOTE: scaling factor by which to reduce flux of each ALMA source in my
# sample should be determined by comparing total 870um flux densities for each
# Herschel source and each LESS source. So far, the factor of 3 was meant to
# reproduce the median S500 flux density ratio, but S870 will be more accurate.
simcolor = 'MediumSpringGreen'
simms = 4
simfmt = 's-'
fluxcomponent_file = '../Data/table_ALESSsim.dat'
fluxcomponent = Table.read(fluxcomponent_file, format='ascii')
# filter out single source systems
fluxcomponent = sep_util.rmSingles(fluxcomponent, targetstring='target')
# filter out objects below a given flux threshold
fluxcomponent = sep_util.setThresh(fluxcomponent, 1.4, fluxstring='peakflux')
nmultiples = len(fluxcomponent)
simalma = sep_util.getSeparation(fluxcomponent, rastring='ra_alma', \
decstring='dec_alma', fluxstring='peakflux')
avgsep_simalma, wmeansep_simalma, ra_simalma, dec_simalma = simalma
sep_util.histArea(avgsep_simalma, nbins, color=simcolor, fmt=simfmt, ms=simms,
norm=nmultiples, label='Herschel-ALMA ALESS-Sim')
#sep_util.simArea(fluxcomponent, nsim, bin_edges, fluxstring='S_870_observed',
# edgecolor=asimcolor, facecolor='none', hatch='\\', norm=nmultiples)
# *********
# Cowley simulation
# *********
c15color = 'Orange'
c15ms = 6
c15fmt = 'x-'
c15mew = 1.5
c15 = ascii.read('../Data/SPIRE_ALMA_Cat_v4.txt')
s500_c15 = c15['SourceS500']
zc = c15['z']
hithresh = (s500_c15 > 50) & (zc > 1)
c15 = c15[hithresh]
c15 = sep_util.rmSingles(c15, targetstring='SurveyID')
nmultiples = len(c15)
simc15 = sep_util.getSeparation(c15, degrees=True, rastring='GalaxyX', \
decstring='GalaxyY', fluxstring='GalaxyS850', targetstring='SurveyID')
avgsep_c15, wmeansep_c15, ra_c15, dec_c15 = simc15
sep_util.histArea(avgsep_c15, nbins, color=c15color, fmt=c15fmt,
ms=c15ms, norm=nmultiples, showerror=False, label='C15 Simulation',
mew=c15mew)
# *********
# ALESS
# *********
# Plotting parameters
hodgecolor = 'LightPink'
hodgesimcolor = 'LightPink'
hodgems = 4
hodgefmt = 'D-'
# Load the data
fluxcomponent_file = '../Data/hodge2013.dat'
fluxcomponent = Table.read(fluxcomponent_file, format='ascii')
# filter out single source systems
fluxcomponent = sep_util.rmSingles(fluxcomponent, targetstring='lessid')
nmultiples = len(fluxcomponent)
hodge = sep_util.getSeparation(fluxcomponent, rastring='ra_alma', \
decstring = 'dec_alma', targetstring='lessid')
avgsep_hodge, wmeansep_hodge, ra_hodge, dec_hodge = hodge
deltasep = avgsep_hodge.max() - avgsep_hodge.min()
#nbins = deltasep / binwidth
sep_util.histArea(avgsep_hodge, nbins, color=hodgecolor, fmt=hodgefmt,
ms=hodgems, norm=nmultiples, label='ALESS')
indexsort = numpy.argsort(avgsep_hodge)
avgsep_hodge = avgsep_hodge[indexsort]
flux_hodge = fluxcomponent['f880'][indexsort]
nflux = flux_hodge.size
sumflux_hodge = numpy.zeros(nflux)
for i in range(nflux):
sumflux_hodge[i] = flux_hodge[0:i].sum()
#plt.plot(avgsep_hodge, sumflux_hodge)
# plot simulated positions
nsim = 1000
#sep_util.simArea(fluxcomponent, nsim, bin_edges, targetstring='lessid',
# edgecolor=hodgesimcolor, facecolor='none', hatch='//', norm=nmultiples)
# ***********
# ALMA sample
# ***********
# plotting parameters
acolor = 'green'
asimcolor = '0.2'
ams = 5
afmt = 's-'
fluxcomponent_file = '../Data/table_intrinsic.dat'
fluxcomponent = Table.read(fluxcomponent_file, format='ascii')
# filter out single source systems
fluxcomponent = sep_util.rmSingles(fluxcomponent, targetstring='target')
nmultiples = len(fluxcomponent)
alma = sep_util.getSeparation(fluxcomponent, fluxstring='f870')
avgsep_alma, wmeansep_alma, ra_alma, dec_alma = alma
indexsort = numpy.argsort(avgsep_alma)
avgsep_alma = avgsep_alma[indexsort]
flux_alma = fluxcomponent['f870'][indexsort]
nflux = flux_alma.size
sumflux_alma = numpy.zeros(nflux)
for i in range(nflux):
sumflux_alma[i] = flux_alma[0:i].sum()
#plt.plot(avgsep_alma, sumflux_alma)
#plt.show()
#import pdb; pdb.set_trace()
sep_util.histArea(avgsep_alma, nbins, color=acolor, fmt=afmt, ms=ams,
norm=nmultiples, label='Herschel-ALMA')
sep_util.simArea(fluxcomponent, nsim, bin_edges, fluxstring='f870',
edgecolor=asimcolor, facecolor='none', hatch='\\', norm=nmultiples,
label='Randomly Distributed')
hayward = Table.read('../Data/dNdA_40arcsec_bright.txt', format='ascii')
xxx = hayward['separation']
yyy = hayward['dNdA']
plt.plot(xxx, yyy, '+-', ms=8, mew=1.5, color='blue', linewidth=1.5,
label='HB13 Simulation')
#import pdb; pdb.set_trace()
#plt.semilogy()
xmin = 0
ymin = 0
xmax = 6
ymax = 0.15
plt.axis([xmin, xmax, ymin, ymax])
plt.xlabel(r'${\rm Radial\,Offset\,from\,Centroid\,(arcsec)}$', fontsize='large')
plt.ylabel(r'$dN/dA \, ({\rm arcsec}^{-2}$)', fontsize='large')
plt.minorticks_on()
plt.tick_params(width=1.2, which='both')
plt.tick_params(length=2, which='minor')
plt.tick_params(length=4, which='major')
fake = numpy.arange(2) + 1e5
#plt.plot(fake, color=hodgecolor, label='ALESS')
#plt.plot(fake, 'x-', ms=6, mew=2, linewidth=1.5, color=c15color, label='C15 Simulation')
##plt.plot(fake, color=hodgesimcolor, linestyle='--',
## label='Hodge+13 Random')
#plt.plot(fake, color=acolor, label='Herschel-ALMA')
#plt.plot(fake, color=simcolor, label='Herschel-ALMA ALESS-Sim')
#plt.plot(fake, color=asimcolor, linestyle='--',
# label='Randomly Distributed')
plt.legend(loc='upper right', numpoints=1, handletextpad=0.35, borderpad=0.4,
labelspacing=0.18, handlelength=1.0)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
#plt.setp(ltext, fontsize='medium')
plt.subplots_adjust(left=0.14, right=0.95, top=0.97, bottom=0.13, wspace=0.39)
savefig('../Figures/dNdA.pdf')
import pdb; pdb.set_trace()
| [
"sep_util.simArea",
"matplotlib.pyplot.ylabel",
"pylab.savefig",
"sep_util.histArea",
"numpy.argsort",
"matplotlib.rc",
"numpy.arange",
"sep_util.setThresh",
"matplotlib.pyplot.xlabel",
"sep_util.rmSingles",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.minorticks_on",
"matplotlib.pyplot.axis... | [((475, 504), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (488, 504), False, 'import matplotlib\n'), ((556, 586), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.0, 4.5)'}), '(figsize=(5.0, 4.5))\n', (566, 586), True, 'import matplotlib.pyplot as plt\n'), ((626, 662), 'numpy.arange', 'numpy.arange', (['(0)', '(nbins + 1)', 'binwidth'], {}), '(0, nbins + 1, binwidth)\n', (638, 662), False, 'import numpy\n'), ((1186, 1232), 'astropy.table.Table.read', 'Table.read', (['fluxcomponent_file'], {'format': '"""ascii"""'}), "(fluxcomponent_file, format='ascii')\n", (1196, 1232), False, 'from astropy.table import Table\n'), ((1285, 1341), 'sep_util.rmSingles', 'sep_util.rmSingles', (['fluxcomponent'], {'targetstring': '"""target"""'}), "(fluxcomponent, targetstring='target')\n", (1303, 1341), False, 'import sep_util\n'), ((1409, 1470), 'sep_util.setThresh', 'sep_util.setThresh', (['fluxcomponent', '(1.4)'], {'fluxstring': '"""peakflux"""'}), "(fluxcomponent, 1.4, fluxstring='peakflux')\n", (1427, 1470), False, 'import sep_util\n'), ((1514, 1621), 'sep_util.getSeparation', 'sep_util.getSeparation', (['fluxcomponent'], {'rastring': '"""ra_alma"""', 'decstring': '"""dec_alma"""', 'fluxstring': '"""peakflux"""'}), "(fluxcomponent, rastring='ra_alma', decstring=\n 'dec_alma', fluxstring='peakflux')\n", (1536, 1621), False, 'import sep_util\n'), ((1696, 1829), 'sep_util.histArea', 'sep_util.histArea', (['avgsep_simalma', 'nbins'], {'color': 'simcolor', 'fmt': 'simfmt', 'ms': 'simms', 'norm': 'nmultiples', 'label': '"""Herschel-ALMA ALESS-Sim"""'}), "(avgsep_simalma, nbins, color=simcolor, fmt=simfmt, ms=\n simms, norm=nmultiples, label='Herschel-ALMA ALESS-Sim')\n", (1713, 1829), False, 'import sep_util\n'), ((2100, 2143), 'astropy.io.ascii.read', 'ascii.read', (['"""../Data/SPIRE_ALMA_Cat_v4.txt"""'], {}), "('../Data/SPIRE_ALMA_Cat_v4.txt')\n", (2110, 2143), False, 'from astropy.io import ascii\n'), ((2251, 2299), 'sep_util.rmSingles', 'sep_util.rmSingles', (['c15'], {'targetstring': '"""SurveyID"""'}), "(c15, targetstring='SurveyID')\n", (2269, 2299), False, 'import sep_util\n'), ((2332, 2469), 'sep_util.getSeparation', 'sep_util.getSeparation', (['c15'], {'degrees': '(True)', 'rastring': '"""GalaxyX"""', 'decstring': '"""GalaxyY"""', 'fluxstring': '"""GalaxyS850"""', 'targetstring': '"""SurveyID"""'}), "(c15, degrees=True, rastring='GalaxyX', decstring=\n 'GalaxyY', fluxstring='GalaxyS850', targetstring='SurveyID')\n", (2354, 2469), False, 'import sep_util\n'), ((2526, 2674), 'sep_util.histArea', 'sep_util.histArea', (['avgsep_c15', 'nbins'], {'color': 'c15color', 'fmt': 'c15fmt', 'ms': 'c15ms', 'norm': 'nmultiples', 'showerror': '(False)', 'label': '"""C15 Simulation"""', 'mew': 'c15mew'}), "(avgsep_c15, nbins, color=c15color, fmt=c15fmt, ms=c15ms,\n norm=nmultiples, showerror=False, label='C15 Simulation', mew=c15mew)\n", (2543, 2674), False, 'import sep_util\n'), ((2902, 2948), 'astropy.table.Table.read', 'Table.read', (['fluxcomponent_file'], {'format': '"""ascii"""'}), "(fluxcomponent_file, format='ascii')\n", (2912, 2948), False, 'from astropy.table import Table\n'), ((3001, 3057), 'sep_util.rmSingles', 'sep_util.rmSingles', (['fluxcomponent'], {'targetstring': '"""lessid"""'}), "(fluxcomponent, targetstring='lessid')\n", (3019, 3057), False, 'import sep_util\n'), ((3099, 3206), 'sep_util.getSeparation', 'sep_util.getSeparation', (['fluxcomponent'], {'rastring': '"""ra_alma"""', 'decstring': '"""dec_alma"""', 'targetstring': '"""lessid"""'}), "(fluxcomponent, rastring='ra_alma', decstring=\n 'dec_alma', targetstring='lessid')\n", (3121, 3206), False, 'import sep_util\n'), ((3353, 3472), 'sep_util.histArea', 'sep_util.histArea', (['avgsep_hodge', 'nbins'], {'color': 'hodgecolor', 'fmt': 'hodgefmt', 'ms': 'hodgems', 'norm': 'nmultiples', 'label': '"""ALESS"""'}), "(avgsep_hodge, nbins, color=hodgecolor, fmt=hodgefmt, ms=\n hodgems, norm=nmultiples, label='ALESS')\n", (3370, 3472), False, 'import sep_util\n'), ((3489, 3516), 'numpy.argsort', 'numpy.argsort', (['avgsep_hodge'], {}), '(avgsep_hodge)\n', (3502, 3516), False, 'import numpy\n'), ((3642, 3660), 'numpy.zeros', 'numpy.zeros', (['nflux'], {}), '(nflux)\n', (3653, 3660), False, 'import numpy\n'), ((4151, 4197), 'astropy.table.Table.read', 'Table.read', (['fluxcomponent_file'], {'format': '"""ascii"""'}), "(fluxcomponent_file, format='ascii')\n", (4161, 4197), False, 'from astropy.table import Table\n'), ((4250, 4306), 'sep_util.rmSingles', 'sep_util.rmSingles', (['fluxcomponent'], {'targetstring': '"""target"""'}), "(fluxcomponent, targetstring='target')\n", (4268, 4306), False, 'import sep_util\n'), ((4347, 4403), 'sep_util.getSeparation', 'sep_util.getSeparation', (['fluxcomponent'], {'fluxstring': '"""f870"""'}), "(fluxcomponent, fluxstring='f870')\n", (4369, 4403), False, 'import sep_util\n'), ((4470, 4496), 'numpy.argsort', 'numpy.argsort', (['avgsep_alma'], {}), '(avgsep_alma)\n', (4483, 4496), False, 'import numpy\n'), ((4617, 4635), 'numpy.zeros', 'numpy.zeros', (['nflux'], {}), '(nflux)\n', (4628, 4635), False, 'import numpy\n'), ((4782, 4896), 'sep_util.histArea', 'sep_util.histArea', (['avgsep_alma', 'nbins'], {'color': 'acolor', 'fmt': 'afmt', 'ms': 'ams', 'norm': 'nmultiples', 'label': '"""Herschel-ALMA"""'}), "(avgsep_alma, nbins, color=acolor, fmt=afmt, ms=ams, norm=\n nmultiples, label='Herschel-ALMA')\n", (4799, 4896), False, 'import sep_util\n'), ((4901, 5074), 'sep_util.simArea', 'sep_util.simArea', (['fluxcomponent', 'nsim', 'bin_edges'], {'fluxstring': '"""f870"""', 'edgecolor': 'asimcolor', 'facecolor': '"""none"""', 'hatch': '"""\\\\"""', 'norm': 'nmultiples', 'label': '"""Randomly Distributed"""'}), "(fluxcomponent, nsim, bin_edges, fluxstring='f870',\n edgecolor=asimcolor, facecolor='none', hatch='\\\\', norm=nmultiples,\n label='Randomly Distributed')\n", (4917, 5074), False, 'import sep_util\n'), ((5094, 5156), 'astropy.table.Table.read', 'Table.read', (['"""../Data/dNdA_40arcsec_bright.txt"""'], {'format': '"""ascii"""'}), "('../Data/dNdA_40arcsec_bright.txt', format='ascii')\n", (5104, 5156), False, 'from astropy.table import Table\n'), ((5208, 5306), 'matplotlib.pyplot.plot', 'plt.plot', (['xxx', 'yyy', '"""+-"""'], {'ms': '(8)', 'mew': '(1.5)', 'color': '"""blue"""', 'linewidth': '(1.5)', 'label': '"""HB13 Simulation"""'}), "(xxx, yyy, '+-', ms=8, mew=1.5, color='blue', linewidth=1.5, label=\n 'HB13 Simulation')\n", (5216, 5306), True, 'import matplotlib.pyplot as plt\n'), ((5398, 5432), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, ymin, ymax]'], {}), '([xmin, xmax, ymin, ymax])\n', (5406, 5432), True, 'import matplotlib.pyplot as plt\n'), ((5434, 5523), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""${\\\\rm Radial\\\\,Offset\\\\,from\\\\,Centroid\\\\,(arcsec)}$"""'], {'fontsize': '"""large"""'}), "('${\\\\rm Radial\\\\,Offset\\\\,from\\\\,Centroid\\\\,(arcsec)}$',\n fontsize='large')\n", (5444, 5523), True, 'import matplotlib.pyplot as plt\n'), ((5516, 5580), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$dN/dA \\\\, ({\\\\rm arcsec}^{-2}$)"""'], {'fontsize': '"""large"""'}), "('$dN/dA \\\\, ({\\\\rm arcsec}^{-2}$)', fontsize='large')\n", (5526, 5580), True, 'import matplotlib.pyplot as plt\n'), ((5580, 5599), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (5597, 5599), True, 'import matplotlib.pyplot as plt\n'), ((5600, 5640), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'width': '(1.2)', 'which': '"""both"""'}), "(width=1.2, which='both')\n", (5615, 5640), True, 'import matplotlib.pyplot as plt\n'), ((5641, 5681), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'length': '(2)', 'which': '"""minor"""'}), "(length=2, which='minor')\n", (5656, 5681), True, 'import matplotlib.pyplot as plt\n'), ((5682, 5722), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'length': '(4)', 'which': '"""major"""'}), "(length=4, which='major')\n", (5697, 5722), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6308), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'numpoints': '(1)', 'handletextpad': '(0.35)', 'borderpad': '(0.4)', 'labelspacing': '(0.18)', 'handlelength': '(1.0)'}), "(loc='upper right', numpoints=1, handletextpad=0.35, borderpad=\n 0.4, labelspacing=0.18, handlelength=1.0)\n", (6199, 6308), True, 'import matplotlib.pyplot as plt\n'), ((6402, 6480), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.14)', 'right': '(0.95)', 'top': '(0.97)', 'bottom': '(0.13)', 'wspace': '(0.39)'}), '(left=0.14, right=0.95, top=0.97, bottom=0.13, wspace=0.39)\n', (6421, 6480), True, 'import matplotlib.pyplot as plt\n'), ((6482, 6512), 'pylab.savefig', 'savefig', (['"""../Figures/dNdA.pdf"""'], {}), "('../Figures/dNdA.pdf')\n", (6489, 6512), False, 'from pylab import savefig\n'), ((6525, 6540), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (6538, 6540), False, 'import pdb\n'), ((5731, 5746), 'numpy.arange', 'numpy.arange', (['(2)'], {}), '(2)\n', (5743, 5746), False, 'import numpy\n'), ((6318, 6327), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6325, 6327), True, 'import matplotlib.pyplot as plt\n')] |
import os
import cv2
import numpy as np
from PIL import Image
from random import shuffle
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import utils
import transforms as T
from engine import train_one_epoch, evaluate
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
colors = [[np.random.randint(0, 255),
np.random.randint(0, 255),
np.random.randint(0, 255)]for i in range(100)]
# 为了最终实例分割显示明显,定义常见类别为深色
colors[1] = [255, 0, 0] # person
colors[2] = [0, 255, 0] # bicycle
colors[3] = [0, 0, 255] # car
colors[4] = [255, 255, 0] # motorcycle
def demo():
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
img_dir = '/home/zyk/dataset/PennFudanPed/PNGImages'
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
model.to(device)
# put the model in evaluation mode
model.eval()
imgs = os.listdir(img_dir)
shuffle(imgs)
for i in range(50):
imgsrc = cv2.imread(os.path.join(img_dir, imgs[i]))
all_cls_mask_color = np.zeros_like(imgsrc)
all_cls_mask_index = np.zeros_like(imgsrc)
img = imgsrc / 255.
img = np.transpose(img, (2, 0, 1))
img = torch.tensor(img, dtype=torch.float)
img = img.to(device)
with torch.no_grad():
prediction = model([img])[0]
scores = prediction['scores']
for idx, score in enumerate(scores):
if score > 0.5:
mask = prediction['masks'][idx][0].cpu().numpy()
mask = mask > 0.5
cls_id = prediction['labels'][idx].item()
all_cls_mask_color[mask] = colors[cls_id]
all_cls_mask_index[mask] = 1
img_weight = cv2.addWeighted(imgsrc, 0.4, all_cls_mask_color, 0.6, 0) # 线性混合
all_mask = all_cls_mask_index == 1
result = np.copy(imgsrc)
# 只取mask的混合部分
result[all_mask] = img_weight[all_mask]
union = np.concatenate((imgsrc, result), axis=1)
cv2.imshow('', union)
cv2.waitKey(0)
def get_instance_segmentation_model(num_classes):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# 把模型打印出来,按照名字,输入输出更换backbone,或者改变输出
print(model)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
class backbone_body(torch.nn.ModuleDict):
def __init__(self, layers, return_layers):
super().__init__(layers)
self.return_layers = return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class BackboneFPN(torch.nn.Sequential):
def __init__(self, body, fpn, out_channels):
d = OrderedDict([("body", body),
("fpn", fpn)])
super(BackboneFPN, self).__init__(d)
self.out_channels = out_channels
def maskrcnn_resnet18_fpn(num_classes):
src_backbone = torchvision.models.resnet18(pretrained=True)
# 去掉后面的全连接层
return_layers = {'layer1': 0,
'layer2': 1,
'layer3': 2,
'layer4': 3}
names = [name for name, _ in src_backbone.named_children()]
# just 验证,失败则报异常
if not set(return_layers).issubset(names):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
# 复制一份到 layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in src_backbone.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
# 得到去掉池化、全连接层的模型
backbone_module = backbone_body(layers, orig_return_layers)
# FPN层,resnet18 layer4 chanels为 512,fpn顶层512/8
in_channels_stage2 = 64
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 64
fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
backbone_fpn = BackboneFPN(backbone_module,
fpn,
out_channels)
model = MaskRCNN(backbone_fpn, num_classes)
return model
| [
"torchvision.models.resnet18",
"cv2.imshow",
"torchvision.models.detection.mask_rcnn.MaskRCNNPredictor",
"torch.cuda.is_available",
"os.listdir",
"cv2.addWeighted",
"numpy.concatenate",
"cv2.waitKey",
"random.shuffle",
"torchvision.models.detection.faster_rcnn.FastRCNNPredictor",
"numpy.transpos... | [((1997, 2064), 'torchvision.models.detection.maskrcnn_resnet50_fpn', 'torchvision.models.detection.maskrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2047, 2064), False, 'import torchvision\n'), ((2153, 2172), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (2163, 2172), False, 'import os\n'), ((2177, 2190), 'random.shuffle', 'shuffle', (['imgs'], {}), '(imgs)\n', (2184, 2190), False, 'from random import shuffle\n'), ((3472, 3539), 'torchvision.models.detection.maskrcnn_resnet50_fpn', 'torchvision.models.detection.maskrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3522, 3539), False, 'import torchvision\n'), ((3812, 3855), 'torchvision.models.detection.faster_rcnn.FastRCNNPredictor', 'FastRCNNPredictor', (['in_features', 'num_classes'], {}), '(in_features, num_classes)\n', (3829, 3855), False, 'from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n'), ((4112, 4174), 'torchvision.models.detection.mask_rcnn.MaskRCNNPredictor', 'MaskRCNNPredictor', (['in_features_mask', 'hidden_layer', 'num_classes'], {}), '(in_features_mask, hidden_layer, num_classes)\n', (4129, 4174), False, 'from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\n'), ((5053, 5097), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (5080, 5097), False, 'import torchvision\n'), ((1474, 1499), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (1491, 1499), True, 'import numpy as np\n'), ((1512, 1537), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (1529, 1537), True, 'import numpy as np\n'), ((1550, 1575), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (1567, 1575), True, 'import numpy as np\n'), ((1814, 1839), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1837, 1839), False, 'import torch\n'), ((1790, 1810), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1802, 1810), False, 'import torch\n'), ((1845, 1864), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1857, 1864), False, 'import torch\n'), ((2304, 2325), 'numpy.zeros_like', 'np.zeros_like', (['imgsrc'], {}), '(imgsrc)\n', (2317, 2325), True, 'import numpy as np\n'), ((2355, 2376), 'numpy.zeros_like', 'np.zeros_like', (['imgsrc'], {}), '(imgsrc)\n', (2368, 2376), True, 'import numpy as np\n'), ((2419, 2447), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (2431, 2447), True, 'import numpy as np\n'), ((2462, 2498), 'torch.tensor', 'torch.tensor', (['img'], {'dtype': 'torch.float'}), '(img, dtype=torch.float)\n', (2474, 2498), False, 'import torch\n'), ((3025, 3081), 'cv2.addWeighted', 'cv2.addWeighted', (['imgsrc', '(0.4)', 'all_cls_mask_color', '(0.6)', '(0)'], {}), '(imgsrc, 0.4, all_cls_mask_color, 0.6, 0)\n', (3040, 3081), False, 'import cv2\n'), ((3150, 3165), 'numpy.copy', 'np.copy', (['imgsrc'], {}), '(imgsrc)\n', (3157, 3165), True, 'import numpy as np\n'), ((3252, 3292), 'numpy.concatenate', 'np.concatenate', (['(imgsrc, result)'], {'axis': '(1)'}), '((imgsrc, result), axis=1)\n', (3266, 3292), True, 'import numpy as np\n'), ((3301, 3322), 'cv2.imshow', 'cv2.imshow', (['""""""', 'union'], {}), "('', union)\n", (3311, 3322), False, 'import cv2\n'), ((3331, 3345), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3342, 3345), False, 'import cv2\n'), ((2243, 2273), 'os.path.join', 'os.path.join', (['img_dir', 'imgs[i]'], {}), '(img_dir, imgs[i])\n', (2255, 2273), False, 'import os\n'), ((2542, 2557), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2555, 2557), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/17 07:01 PM
# @Author : zhangzhen
# @Site :
# @File : keras2_demo.py
# @Software: PyCharm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
def build(input_dim=20, out_dim=10, activation='softmax'):
model = tf.keras.Sequential()
model.add(layers.Dense(64,activation='relu', input_dim=input_dim))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(out_dim, activation=activation))
return model
def softmax():
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = build()
sgd = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
keras.utils.plot_model(model, 'sequential_mlp.png', show_shapes=True)
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
def binary():
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))
model = build(input_dim=20, out_dim=1, activation='sigmoid')
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
keras.utils.plot_model(model, 'sequential_mlp.png', show_shapes=True)
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
if __name__ == '__main__':
# softmax()
binary()
| [
"tensorflow.keras.Sequential",
"numpy.random.random",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.optimizers.SGD",
"numpy.random.randint",
"tensorflow.keras.layers.Dense"
] | [((346, 367), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (365, 367), True, 'import tensorflow as tf\n'), ((669, 697), 'numpy.random.random', 'np.random.random', (['(1000, 20)'], {}), '((1000, 20))\n', (685, 697), True, 'import numpy as np\n'), ((807, 834), 'numpy.random.random', 'np.random.random', (['(100, 20)'], {}), '((100, 20))\n', (823, 834), True, 'import numpy as np\n'), ((961, 1030), 'tensorflow.keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'learning_rate': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(learning_rate=0.01, momentum=0.9, nesterov=True)\n', (981, 1030), False, 'from tensorflow import keras\n'), ((1123, 1192), 'tensorflow.keras.utils.plot_model', 'keras.utils.plot_model', (['model', '"""sequential_mlp.png"""'], {'show_shapes': '(True)'}), "(model, 'sequential_mlp.png', show_shapes=True)\n", (1145, 1192), False, 'from tensorflow import keras\n'), ((1361, 1389), 'numpy.random.random', 'np.random.random', (['(1000, 20)'], {}), '((1000, 20))\n', (1377, 1389), True, 'import numpy as np\n'), ((1404, 1440), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1000, 1)'}), '(2, size=(1000, 1))\n', (1421, 1440), True, 'import numpy as np\n'), ((1454, 1481), 'numpy.random.random', 'np.random.random', (['(100, 20)'], {}), '((100, 20))\n', (1470, 1481), True, 'import numpy as np\n'), ((1495, 1530), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(100, 1)'}), '(2, size=(100, 1))\n', (1512, 1530), True, 'import numpy as np\n'), ((1690, 1759), 'tensorflow.keras.utils.plot_model', 'keras.utils.plot_model', (['model', '"""sequential_mlp.png"""'], {'show_shapes': '(True)'}), "(model, 'sequential_mlp.png', show_shapes=True)\n", (1712, 1759), False, 'from tensorflow import keras\n'), ((382, 438), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'input_dim': 'input_dim'}), "(64, activation='relu', input_dim=input_dim)\n", (394, 438), False, 'from tensorflow.keras import layers\n'), ((453, 472), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (467, 472), False, 'from tensorflow.keras import layers\n'), ((489, 524), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (501, 524), False, 'from tensorflow.keras import layers\n'), ((540, 559), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (554, 559), False, 'from tensorflow.keras import layers\n'), ((576, 620), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['out_dim'], {'activation': 'activation'}), '(out_dim, activation=activation)\n', (588, 620), False, 'from tensorflow.keras import layers\n'), ((739, 776), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(1000, 1)'}), '(10, size=(1000, 1))\n', (756, 776), True, 'import numpy as np\n'), ((875, 911), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(100, 1)'}), '(10, size=(100, 1))\n', (892, 911), True, 'import numpy as np\n')] |
import sys
import numpy as np
a, b = np.array(sys.stdin.read().split(), dtype=np.int64)[1:].reshape(2, -1)
np.cumsum(a, out=a)
b = np.cumsum(b[::-1])[::-1]
def main():
print(np.amax(a + b))
if __name__ == "__main__":
main()
| [
"numpy.cumsum",
"sys.stdin.read",
"numpy.amax"
] | [((114, 133), 'numpy.cumsum', 'np.cumsum', (['a'], {'out': 'a'}), '(a, out=a)\n', (123, 133), True, 'import numpy as np\n'), ((139, 157), 'numpy.cumsum', 'np.cumsum', (['b[::-1]'], {}), '(b[::-1])\n', (148, 157), True, 'import numpy as np\n'), ((192, 206), 'numpy.amax', 'np.amax', (['(a + b)'], {}), '(a + b)\n', (199, 206), True, 'import numpy as np\n'), ((52, 68), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (66, 68), False, 'import sys\n')] |
import numpy
n,m=map(int,input().split())
a=numpy.zeros((n,m),int)
for i in range(n):
a[i]=numpy.array(input().split())
print(numpy.max(numpy.min(a,axis=1)))
| [
"numpy.zeros",
"numpy.min"
] | [((44, 68), 'numpy.zeros', 'numpy.zeros', (['(n, m)', 'int'], {}), '((n, m), int)\n', (55, 68), False, 'import numpy\n'), ((140, 160), 'numpy.min', 'numpy.min', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (149, 160), False, 'import numpy\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from evaluate import evaluate
from utils import get_data, tf_melspectogram
from shallow_nn import shallow_nn
from deep_nn import deep_nn
from shallow_nn_improve import shallow_nn as shallow_nn_improve
from deep_nn_improve import deep_nn as deep_nn_improve
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('epochs', 100,
'Number of mini-batches to train on. (default: %(default)d)')
tf.app.flags.DEFINE_integer('network', 0,
'Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)')
tf.app.flags.DEFINE_integer('improve', 0,
'Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)')
tf.app.flags.DEFINE_float('decay', 0,
'Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d')
tf.app.flags.DEFINE_integer('log_frequency', 100,
'Number of steps between logging results to the console and saving summaries (default: %(default)d)')
tf.app.flags.DEFINE_integer('augment', 0,
'Use augmentation, 0 for off, 1 for on (default: %(default)d)')
tf.app.flags.DEFINE_integer('num_parallel_calls', 1,
'Number of cpu cores to use to preprocess data')
tf.app.flags.DEFINE_integer('save_model', 1000,
'Number of steps between model saves (default: %(default)d)')
tf.app.flags.DEFINE_integer('save_images', 0,
'Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)')
# Optimisation hyperparameters
tf.app.flags.DEFINE_integer(
'batch_size', 16, 'Number of examples per mini-batch (default: %(default)d)')
tf.app.flags.DEFINE_float(
'learning_rate', 5e-5, 'Learning rate (default: %(default)d)')
tf.app.flags.DEFINE_integer(
"input_width", 80, "Input width (default: %(default)d)")
tf.app.flags.DEFINE_integer(
"input_height", 80, "Input height (default: %(default)d)")
tf.app.flags.DEFINE_integer(
"input_channels", 1, "Input channels (default: %(default)d)"
)
tf.app.flags.DEFINE_integer(
"num_classes", 10, "Number of classes (default: %(default)d)"
)
tf.app.flags.DEFINE_string(
"log_dir",
"{cwd}/logs/".format(cwd=os.getcwd()),
"Directory where to write event logs and checkpoint. (default: %(default)s)",
)
run_log_dir = os.path.join(FLAGS.log_dir, 'exp_lr_{learning_rate}_decay_{decay}_bs_{batch_size}_e_{epochs}_{network}_improve_{improve}_augment_{augment}'.format(
learning_rate=FLAGS.learning_rate, decay=FLAGS.decay, batch_size=FLAGS.batch_size, epochs=FLAGS.epochs, network='shallow' if (FLAGS.network == 0) else 'deep', improve=FLAGS.improve, augment=FLAGS.augment))
def model(iterator, is_training, nn):
next_x, next_y = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, img_summary = nn(next_x, is_training)
# Compute categorical loss
with tf.variable_scope("cross_entropy"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=next_y, logits=y_out)
)
# L1 regularise
regularization_penalty = tf.losses.get_regularization_loss(
name="total_regularization_loss"
)
regularized_loss = cross_entropy + regularization_penalty
accuracy, accuracy_op = tf.metrics.accuracy(
tf.argmax(next_y, axis=1), tf.argmax(y_out, axis=1), name="accuracy_train")
return regularized_loss, img_summary, accuracy, accuracy_op
def calc_accuracy(iterator, is_training, nn):
next_x, next_y = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, _ = nn(next_x, is_training)
accuracy, accuracy_op = tf.metrics.accuracy(
tf.argmax(next_y, axis=1), tf.argmax(y_out, axis=1), name="accuracy_test")
return accuracy, accuracy_op
def accumulate_results(iterator, is_training, nn):
x, y, i = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, _ = nn(x, is_training)
return (x, y, y_out, i)
def _preprocess(features, label):
label = tf.one_hot(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)
return features, label
def main(_):
(
train_set_data,
train_set_labels,
_,
test_set_data,
test_set_labels,
test_set_track_ids,
) = get_data()
print("Making TF graph")
start = time.time()
is_training_placeholder = tf.placeholder_with_default(False, shape=())
features_placeholder = tf.placeholder(
tf.float32, (None, np.shape(train_set_data)[1])
)
labels_placeholder = tf.placeholder(tf.uint8, (None))
track_ids_placeholder = tf.placeholder(tf.uint8, (None))
shuffle_buffer_size = len(train_set_data)
dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder)
)
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
_preprocess, FLAGS.batch_size, num_parallel_calls=FLAGS.num_parallel_calls)
)
dataset = dataset.prefetch(1)
train_iterator = dataset.make_initializable_iterator()
test_iterator = dataset.make_initializable_iterator()
eval_dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder, track_ids_placeholder)
)
eval_dataset = eval_dataset.map(
lambda features, label, track_id: (
features,
tf.one_hot(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8),
track_id,
)
)
eval_dataset = eval_dataset.batch(1)
eval_iterator = eval_dataset.make_initializable_iterator()
if (FLAGS.network == 0):
if (FLAGS.improve == 0):
print("Using Shallow network")
nn = shallow_nn
else:
print("Using Shallow Improved network")
nn = shallow_nn_improve
else:
if (FLAGS.improve == 0):
print("Using Deep Network")
nn = deep_nn
else:
print("Using Deep Improved Network")
nn = deep_nn_improve
loss, _, train_acc, train_acc_op = model(
train_iterator, is_training_placeholder, nn)
global_step = tf.Variable(0, trainable=False)
if (FLAGS.decay > 0):
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate, global_step, 15000, FLAGS.decay)
else:
learning_rate = FLAGS.learning_rate
# Adam Optimiser
# default values match that in paper
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimiser = tf.train.AdamOptimizer(
learning_rate, name="AdamOpt").minimize(loss, global_step=global_step)
validation_accuracy, acc_op = calc_accuracy(
test_iterator, is_training_placeholder, nn)
evaluator = accumulate_results(
eval_iterator, is_training_placeholder, nn)
loss_summary = tf.summary.scalar("Loss", loss)
acc_summary = tf.summary.scalar("Accuracy", validation_accuracy)
train_acc_summary = tf.summary.scalar("Accuracy", train_acc)
training_summary = tf.summary.merge([loss_summary, train_acc_summary])
validation_summary = tf.summary.merge([acc_summary])
# Isolate the variables stored behind the scenes by the metric operation
running_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy_test")
train_running_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy_train")
# Define initializer to initialize/reset running variables
running_vars_initializer = tf.variables_initializer(
var_list=running_vars)
train_running_vars_initializer = tf.variables_initializer(
var_list=train_running_vars)
end = time.time()
print("Time to prep TF ops: {:.2f}s".format(end - start))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.graph.finalize()
summary_writer = tf.summary.FileWriter(
run_log_dir + "_train", sess.graph)
summary_writer_validation = tf.summary.FileWriter(
run_log_dir + "_validate", sess.graph
)
for epoch in range(FLAGS.epochs):
sess.run(running_vars_initializer)
sess.run(train_running_vars_initializer)
sess.run(train_iterator.initializer, feed_dict={
features_placeholder: train_set_data, labels_placeholder: train_set_labels})
# Run until all samples done
while True:
try:
_, acc_train, summary_str = sess.run([optimiser, train_acc_op, training_summary], feed_dict={
is_training_placeholder: True})
except tf.errors.OutOfRangeError:
break
summary_writer.add_summary(summary_str, epoch)
sess.run(test_iterator.initializer, feed_dict={
features_placeholder: test_set_data, labels_placeholder: test_set_labels})
while True:
try:
acc, acc_summary_str = sess.run(
[acc_op, validation_summary])
except tf.errors.OutOfRangeError:
break
summary_writer_validation.add_summary(acc_summary_str, epoch)
print("Accuracy after epoch {} - Training: {:.2f}% Validation: {:.2f}%".format(
str(epoch), acc_train * 100.0, acc * 100.0))
sess.run(eval_iterator.initializer, feed_dict={
features_placeholder: test_set_data, labels_placeholder: test_set_labels, track_ids_placeholder: test_set_track_ids})
results = [None] * np.shape(test_set_data)[0]
count = 0
while True:
try:
evaluated = sess.run(evaluator)
results[count] = evaluated
count += 1
except tf.errors.OutOfRangeError:
break
raw_probability, maximum_probability, majority_vote = evaluate(results)
print("-----===== Summary =====-----")
print("Raw Probability: {:.2f}%".format(raw_probability * 100.0))
print("Maximum Probability: {:.2f}%".format(
maximum_probability * 100.0))
print("Majority Vote: {:.2f}%".format(majority_vote * 100))
if __name__ == "__main__":
tf.app.run(main=main)
| [
"utils.get_data",
"tensorflow.control_dependencies",
"tensorflow.variables_initializer",
"tensorflow.app.run",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.train.exponential_decay",
"tensorflow.summary.scalar",
"tensorflow.train.AdamOpti... | [((461, 569), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epochs"""', '(100)', '"""Number of mini-batches to train on. (default: %(default)d)"""'], {}), "('epochs', 100,\n 'Number of mini-batches to train on. (default: %(default)d)')\n", (488, 569), True, 'import tensorflow as tf\n'), ((594, 721), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""network"""', '(0)', '"""Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)"""'], {}), "('network', 0,\n 'Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)'\n )\n", (621, 721), True, 'import tensorflow as tf\n'), ((741, 880), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""improve"""', '(0)', '"""Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)"""'], {}), "('improve', 0,\n 'Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)'\n )\n", (768, 880), True, 'import tensorflow as tf\n'), ((900, 1034), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""decay"""', '(0)', '"""Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d"""'], {}), "('decay', 0,\n 'Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d'\n )\n", (925, 1034), True, 'import tensorflow as tf\n'), ((1052, 1212), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""log_frequency"""', '(100)', '"""Number of steps between logging results to the console and saving summaries (default: %(default)d)"""'], {}), "('log_frequency', 100,\n 'Number of steps between logging results to the console and saving summaries (default: %(default)d)'\n )\n", (1079, 1212), True, 'import tensorflow as tf\n'), ((1232, 1341), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""augment"""', '(0)', '"""Use augmentation, 0 for off, 1 for on (default: %(default)d)"""'], {}), "('augment', 0,\n 'Use augmentation, 0 for off, 1 for on (default: %(default)d)')\n", (1259, 1341), True, 'import tensorflow as tf\n'), ((1366, 1471), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_parallel_calls"""', '(1)', '"""Number of cpu cores to use to preprocess data"""'], {}), "('num_parallel_calls', 1,\n 'Number of cpu cores to use to preprocess data')\n", (1393, 1471), True, 'import tensorflow as tf\n'), ((1496, 1609), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_model"""', '(1000)', '"""Number of steps between model saves (default: %(default)d)"""'], {}), "('save_model', 1000,\n 'Number of steps between model saves (default: %(default)d)')\n", (1523, 1609), True, 'import tensorflow as tf\n'), ((1634, 1776), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_images"""', '(0)', '"""Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)"""'], {}), "('save_images', 0,\n 'Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)'\n )\n", (1661, 1776), True, 'import tensorflow as tf\n'), ((1828, 1937), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(16)', '"""Number of examples per mini-batch (default: %(default)d)"""'], {}), "('batch_size', 16,\n 'Number of examples per mini-batch (default: %(default)d)')\n", (1855, 1937), True, 'import tensorflow as tf\n'), ((1939, 2032), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(5e-05)', '"""Learning rate (default: %(default)d)"""'], {}), "('learning_rate', 5e-05,\n 'Learning rate (default: %(default)d)')\n", (1964, 2032), True, 'import tensorflow as tf\n'), ((2033, 2121), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_width"""', '(80)', '"""Input width (default: %(default)d)"""'], {}), "('input_width', 80,\n 'Input width (default: %(default)d)')\n", (2060, 2121), True, 'import tensorflow as tf\n'), ((2123, 2213), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_height"""', '(80)', '"""Input height (default: %(default)d)"""'], {}), "('input_height', 80,\n 'Input height (default: %(default)d)')\n", (2150, 2213), True, 'import tensorflow as tf\n'), ((2215, 2308), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_channels"""', '(1)', '"""Input channels (default: %(default)d)"""'], {}), "('input_channels', 1,\n 'Input channels (default: %(default)d)')\n", (2242, 2308), True, 'import tensorflow as tf\n'), ((2311, 2405), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_classes"""', '(10)', '"""Number of classes (default: %(default)d)"""'], {}), "('num_classes', 10,\n 'Number of classes (default: %(default)d)')\n", (2338, 2405), True, 'import tensorflow as tf\n'), ((3423, 3490), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {'name': '"""total_regularization_loss"""'}), "(name='total_regularization_loss')\n", (3456, 3490), True, 'import tensorflow as tf\n'), ((4385, 4451), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'label', 'depth': 'FLAGS.num_classes', 'dtype': 'tf.uint8'}), '(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)\n', (4395, 4451), True, 'import tensorflow as tf\n'), ((4646, 4656), 'utils.get_data', 'get_data', ([], {}), '()\n', (4654, 4656), False, 'from utils import get_data, tf_melspectogram\n'), ((4699, 4710), 'time.time', 'time.time', ([], {}), '()\n', (4708, 4710), False, 'import time\n'), ((4742, 4786), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()'}), '(False, shape=())\n', (4769, 4786), True, 'import tensorflow as tf\n'), ((4918, 4948), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'None'], {}), '(tf.uint8, None)\n', (4932, 4948), True, 'import tensorflow as tf\n'), ((4979, 5009), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'None'], {}), '(tf.uint8, None)\n', (4993, 5009), True, 'import tensorflow as tf\n'), ((5073, 5151), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(features_placeholder, labels_placeholder)'], {}), '((features_placeholder, labels_placeholder))\n', (5107, 5151), True, 'import tensorflow as tf\n'), ((5568, 5673), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(features_placeholder, labels_placeholder, track_ids_placeholder)'], {}), '((features_placeholder,\n labels_placeholder, track_ids_placeholder))\n', (5602, 5673), True, 'import tensorflow as tf\n'), ((6568, 6599), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (6579, 6599), True, 'import tensorflow as tf\n'), ((6877, 6919), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (6894, 6919), True, 'import tensorflow as tf\n'), ((7304, 7335), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'loss'], {}), "('Loss', loss)\n", (7321, 7335), True, 'import tensorflow as tf\n'), ((7354, 7404), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'validation_accuracy'], {}), "('Accuracy', validation_accuracy)\n", (7371, 7404), True, 'import tensorflow as tf\n'), ((7429, 7469), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'train_acc'], {}), "('Accuracy', train_acc)\n", (7446, 7469), True, 'import tensorflow as tf\n'), ((7494, 7545), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, train_acc_summary]'], {}), '([loss_summary, train_acc_summary])\n', (7510, 7545), True, 'import tensorflow as tf\n'), ((7571, 7602), 'tensorflow.summary.merge', 'tf.summary.merge', (['[acc_summary]'], {}), '([acc_summary])\n', (7587, 7602), True, 'import tensorflow as tf\n'), ((7700, 7770), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.LOCAL_VARIABLES'], {'scope': '"""accuracy_test"""'}), "(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_test')\n", (7717, 7770), True, 'import tensorflow as tf\n'), ((7805, 7876), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.LOCAL_VARIABLES'], {'scope': '"""accuracy_train"""'}), "(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_train')\n", (7822, 7876), True, 'import tensorflow as tf\n'), ((7981, 8028), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'running_vars'}), '(var_list=running_vars)\n', (8005, 8028), True, 'import tensorflow as tf\n'), ((8075, 8128), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'train_running_vars'}), '(var_list=train_running_vars)\n', (8099, 8128), True, 'import tensorflow as tf\n'), ((8149, 8160), 'time.time', 'time.time', ([], {}), '()\n', (8158, 8160), False, 'import time\n'), ((10739, 10760), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main'}), '(main=main)\n', (10749, 10760), True, 'import tensorflow as tf\n'), ((3043, 3090), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (3060, 3090), True, 'import tensorflow as tf\n'), ((3186, 3220), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cross_entropy"""'], {}), "('cross_entropy')\n", (3203, 3220), True, 'import tensorflow as tf\n'), ((3625, 3650), 'tensorflow.argmax', 'tf.argmax', (['next_y'], {'axis': '(1)'}), '(next_y, axis=1)\n', (3634, 3650), True, 'import tensorflow as tf\n'), ((3652, 3676), 'tensorflow.argmax', 'tf.argmax', (['y_out'], {'axis': '(1)'}), '(y_out, axis=1)\n', (3661, 3676), True, 'import tensorflow as tf\n'), ((3865, 3912), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (3882, 3912), True, 'import tensorflow as tf\n'), ((4015, 4040), 'tensorflow.argmax', 'tf.argmax', (['next_y'], {'axis': '(1)'}), '(next_y, axis=1)\n', (4024, 4040), True, 'import tensorflow as tf\n'), ((4042, 4066), 'tensorflow.argmax', 'tf.argmax', (['y_out'], {'axis': '(1)'}), '(y_out, axis=1)\n', (4051, 4066), True, 'import tensorflow as tf\n'), ((4221, 4268), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (4238, 4268), True, 'import tensorflow as tf\n'), ((5266, 5380), 'tensorflow.data.experimental.map_and_batch', 'tf.data.experimental.map_and_batch', (['_preprocess', 'FLAGS.batch_size'], {'num_parallel_calls': 'FLAGS.num_parallel_calls'}), '(_preprocess, FLAGS.batch_size,\n num_parallel_calls=FLAGS.num_parallel_calls)\n', (5300, 5380), True, 'import tensorflow as tf\n'), ((6650, 6735), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['FLAGS.learning_rate', 'global_step', '(15000)', 'FLAGS.decay'], {}), '(FLAGS.learning_rate, global_step, 15000, FLAGS.decay\n )\n', (6676, 6735), True, 'import tensorflow as tf\n'), ((6929, 6964), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (6952, 6964), True, 'import tensorflow as tf\n'), ((8233, 8245), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8243, 8245), True, 'import tensorflow as tf\n'), ((8363, 8420), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_train')", 'sess.graph'], {}), "(run_log_dir + '_train', sess.graph)\n", (8384, 8420), True, 'import tensorflow as tf\n'), ((8470, 8530), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_validate')", 'sess.graph'], {}), "(run_log_dir + '_validate', sess.graph)\n", (8491, 8530), True, 'import tensorflow as tf\n'), ((10403, 10420), 'evaluate.evaluate', 'evaluate', (['results'], {}), '(results)\n', (10411, 10420), False, 'from evaluate import evaluate\n'), ((2480, 2491), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2489, 2491), False, 'import os\n'), ((3274, 3345), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'next_y', 'logits': 'y_out'}), '(labels=next_y, logits=y_out)\n', (3316, 3345), True, 'import tensorflow as tf\n'), ((8272, 8305), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8303, 8305), True, 'import tensorflow as tf\n'), ((4858, 4882), 'numpy.shape', 'np.shape', (['train_set_data'], {}), '(train_set_data)\n', (4866, 4882), True, 'import numpy as np\n'), ((5799, 5865), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'label', 'depth': 'FLAGS.num_classes', 'dtype': 'tf.uint8'}), '(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)\n', (5809, 5865), True, 'import tensorflow as tf\n'), ((6986, 7039), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""AdamOpt"""'}), "(learning_rate, name='AdamOpt')\n", (7008, 7039), True, 'import tensorflow as tf\n'), ((10071, 10094), 'numpy.shape', 'np.shape', (['test_set_data'], {}), '(test_set_data)\n', (10079, 10094), True, 'import numpy as np\n')] |
from pathlib import Path
import numpy as np
import pytest
from tensorflow.keras.models import Model
from imagededup.methods.cnn import CNN
from imagededup.utils.image_utils import load_image
p = Path(__file__)
TEST_IMAGE = p.parent / 'data' / 'base_images' / 'ukbench00120.jpg'
TEST_IMAGE_DIR = p.parent / 'data' / 'base_images'
TEST_IMAGE_FORMATS_DIR = p.parent / 'data' / 'formats_images'
TEST_IMAGE_DIR_MIXED = p.parent / 'data' / 'mixed_images'
TEST_BATCH_SIZE = 64
TEST_TARGET_SIZE = (224, 224)
def data_encoding_map():
return {
'ukbench00002.jpg': np.array([1, 0, 0, 1]),
'ukbench00003.jpg': np.array([1, 1, 0, 1]),
'ukbench00002_dup.jpg': np.array([1, 0, 0, 1]),
}
@pytest.fixture(scope='module')
def cnn():
return CNN()
@pytest.fixture
def mocker_save_json(mocker):
return mocker.patch('imagededup.methods.cnn.save_json')
def test__init(cnn):
assert cnn.batch_size == TEST_BATCH_SIZE
assert cnn.target_size == TEST_TARGET_SIZE
assert isinstance(cnn.model, Model)
def test__get_cnn_features_single(cnn):
img = load_image(TEST_IMAGE, target_size=(224, 224))
result = cnn._get_cnn_features_single(img)
assert isinstance(result, np.ndarray)
assert result.shape == (1, 1024)
def test__get_cnn_features_batch(cnn):
result = cnn._get_cnn_features_batch(TEST_IMAGE_DIR)
expected_predicted_files = [
'ukbench00120.jpg',
'ukbench01380.jpg',
'ukbench08976.jpg',
'ukbench08996.jpg',
'ukbench09012.jpg',
'ukbench09040.jpg',
'ukbench09060.jpg',
'ukbench09268.jpg',
'ukbench09348.jpg',
'ukbench09380.jpg',
]
assert list(result.keys()) == expected_predicted_files
for i in result.values():
assert isinstance(i, np.ndarray)
assert i.shape == (1024,)
result = cnn._get_cnn_features_batch(TEST_IMAGE_FORMATS_DIR)
expected_predicted_files = [
'ukbench09380.bmp',
'ukbench09380.jpeg',
'ukbench09380.png',
'ukbench09380.svg',
]
assert list(result.keys()) == expected_predicted_files
for i in result.values():
assert isinstance(i, np.ndarray)
assert i.shape == (1024,)
def test_encode_image(cnn):
result = cnn.encode_image(TEST_IMAGE)
assert isinstance(result, np.ndarray)
assert result.shape == (1, 1024) # 1024 = 3*3*1024*2
result = cnn.encode_image(str(TEST_IMAGE))
assert isinstance(result, np.ndarray)
assert result.shape == (1, 1024) # 1024 = 3*3*1024*2
with pytest.raises(ValueError):
cnn.encode_image("")
image_array = load_image(TEST_IMAGE)
result = cnn.encode_image(image_array=image_array)
assert result.shape == (1, 1024) # 1024 = 3*3*1024*2
def test_encode_images(cnn):
result = cnn.encode_images(TEST_IMAGE_DIR)
expected_predicted_files = [
'ukbench00120.jpg',
'ukbench01380.jpg',
'ukbench08976.jpg',
'ukbench08996.jpg',
'ukbench09012.jpg',
'ukbench09040.jpg',
'ukbench09060.jpg',
'ukbench09268.jpg',
'ukbench09348.jpg',
'ukbench09380.jpg',
]
assert list(result.keys()) == expected_predicted_files
for i in result.values():
assert isinstance(i, np.ndarray)
assert i.shape == (1024,)
result = cnn.encode_images(TEST_IMAGE_FORMATS_DIR)
expected_predicted_files = [
'ukbench09380.bmp',
'ukbench09380.jpeg',
'ukbench09380.png',
'ukbench09380.svg',
]
assert list(result.keys()) == expected_predicted_files
for i in result.values():
assert isinstance(i, np.ndarray)
assert i.shape == (1024,)
result = cnn.encode_images(str(TEST_IMAGE_FORMATS_DIR))
expected_predicted_files = [
'ukbench09380.bmp',
'ukbench09380.jpeg',
'ukbench09380.png',
'ukbench09380.svg',
]
assert list(result.keys()) == expected_predicted_files
for i in result.values():
assert isinstance(i, np.ndarray)
assert i.shape == (1024,)
with pytest.raises(ValueError):
cnn.encode_images('abc')
def test__check_threshold_bounds_input_not_float(cnn):
with pytest.raises(TypeError):
cnn._check_threshold_bounds(thresh=1)
def test__check_threshold_bounds_input_out_of_range(cnn):
with pytest.raises(ValueError):
cnn._check_threshold_bounds(thresh=1.1)
def test__find_duplicates_dict_scores_false(cnn):
# check correctness
encoding_map = data_encoding_map()
dict_ret = cnn._find_duplicates_dict(
encoding_map, min_similarity_threshold=0.9, scores=False
)
assert isinstance(dict_ret['ukbench00002.jpg'], list)
assert len(dict_ret['ukbench00002.jpg']) == 1
assert not isinstance(dict_ret['ukbench00002.jpg'][0], tuple)
assert dict_ret['ukbench00002.jpg'][0] == 'ukbench00002_dup.jpg'
def test__find_duplicates_dict_scores_true(cnn, mocker_save_json):
# check correctness, also check that saving file is not triggered as outfile default value is False
encoding_map = data_encoding_map()
dict_ret = cnn._find_duplicates_dict(
encoding_map, min_similarity_threshold=0.9, scores=True
)
assert isinstance(dict_ret['ukbench00002.jpg'], list)
assert len(dict_ret['ukbench00002.jpg']) == 1
assert isinstance(dict_ret['ukbench00002.jpg'][0], tuple)
assert dict_ret['ukbench00002.jpg'][0][0] == 'ukbench00002_dup.jpg'
assert isinstance(dict_ret['ukbench00002.jpg'][0][1], float)
np.testing.assert_almost_equal(dict_ret['ukbench00002.jpg'][0][1], 1.0)
mocker_save_json.assert_not_called()
def test__find_duplicates_dict_outfile_true(cnn, mocker_save_json):
encoding_map = data_encoding_map()
threshold = 0.8
scores = True
outfile = True
cnn._find_duplicates_dict(
encoding_map=encoding_map,
min_similarity_threshold=threshold,
scores=scores,
outfile=outfile,
)
mocker_save_json.assert_called_once_with(cnn.results, outfile)
# _find_duplicates_dir
def test__find_duplicates_dir(cnn, mocker):
encoding_map = data_encoding_map()
threshold = 0.8
scores = True
outfile = True
ret_val_find_dup_dict = {
'filename1.jpg': [('dup1.jpg', 0.82)],
'filename2.jpg': [('dup2.jpg', 0.90)],
}
encode_images_mocker = mocker.patch('imagededup.methods.cnn.CNN.encode_images')
cnn.encoding_map = encoding_map
find_dup_dict_mocker = mocker.patch(
'imagededup.methods.cnn.CNN._find_duplicates_dict',
return_value=ret_val_find_dup_dict,
)
cnn._find_duplicates_dir(
image_dir=TEST_IMAGE_DIR,
min_similarity_threshold=threshold,
scores=scores,
outfile=outfile,
)
encode_images_mocker.assert_called_once_with(image_dir=TEST_IMAGE_DIR)
find_dup_dict_mocker.assert_called_once_with(
encoding_map=cnn.encoding_map,
min_similarity_threshold=threshold,
scores=scores,
outfile=outfile,
)
# find_duplicates
def test_find_duplicates_dir(cnn, mocker):
threshold = 0.9
scores = True
outfile = True
find_dup_dir_mocker = mocker.patch(
'imagededup.methods.cnn.CNN._find_duplicates_dir'
)
cnn.find_duplicates(
image_dir=TEST_IMAGE_DIR,
min_similarity_threshold=threshold,
outfile=outfile,
scores=scores,
)
find_dup_dir_mocker.assert_called_once_with(
image_dir=TEST_IMAGE_DIR,
min_similarity_threshold=threshold,
scores=scores,
outfile=outfile,
)
def test_find_duplicates_dict(cnn, mocker):
encoding_map = data_encoding_map()
threshold = 0.9
scores = True
outfile = True
find_dup_dict_mocker = mocker.patch(
'imagededup.methods.cnn.CNN._find_duplicates_dict'
)
cnn.find_duplicates(
encoding_map=encoding_map,
min_similarity_threshold=threshold,
outfile=outfile,
scores=scores,
)
find_dup_dict_mocker.assert_called_once_with(
encoding_map=encoding_map,
min_similarity_threshold=threshold,
scores=scores,
outfile=outfile,
)
def test_find_duplicates_wrong_threhsold_input(cnn):
with pytest.raises(ValueError):
cnn.find_duplicates(min_similarity_threshold=1.3)
def test_find_duplicates_wrong_input(cnn):
with pytest.raises(ValueError):
cnn.find_duplicates()
# find_duplicates_to_remove
def test_find_duplicates_to_remove_outfile_false(cnn, mocker, mocker_save_json):
threshold = 0.9
outfile = False
ret_val_find_dup_dict = {
'filename.jpg': [('dup1.jpg', 3)],
'filename2.jpg': [('dup2.jpg', 10)],
}
find_duplicates_mocker = mocker.patch(
'imagededup.methods.cnn.CNN.find_duplicates', return_value=ret_val_find_dup_dict
)
get_files_to_remove_mocker = mocker.patch(
'imagededup.methods.cnn.get_files_to_remove'
)
cnn.find_duplicates_to_remove(
image_dir=TEST_IMAGE_DIR, min_similarity_threshold=threshold, outfile=outfile
)
find_duplicates_mocker.assert_called_once_with(
image_dir=TEST_IMAGE_DIR,
encoding_map=None,
min_similarity_threshold=threshold,
scores=False,
)
get_files_to_remove_mocker.assert_called_once_with(ret_val_find_dup_dict)
mocker_save_json.assert_not_called()
def test_find_duplicates_to_remove_outfile_true(cnn, mocker, mocker_save_json):
threshold = 0.9
outfile = True
ret_val_find_dup_dict = {
'filename.jpg': ['dup1.jpg'],
'filename2.jpg': ['dup2.jpg'],
}
ret_val_get_files_to_remove = ['1.jpg', '2.jpg']
find_duplicates_mocker = mocker.patch(
'imagededup.methods.cnn.CNN.find_duplicates', return_value=ret_val_find_dup_dict
)
get_files_to_remove_mocker = mocker.patch(
'imagededup.methods.cnn.get_files_to_remove',
return_value=ret_val_get_files_to_remove,
)
cnn.find_duplicates_to_remove(
image_dir=TEST_IMAGE_DIR, min_similarity_threshold=threshold, outfile=outfile
)
find_duplicates_mocker.assert_called_once_with(
image_dir=TEST_IMAGE_DIR,
encoding_map=None,
min_similarity_threshold=threshold,
scores=False,
)
get_files_to_remove_mocker.assert_called_once_with(ret_val_find_dup_dict)
mocker_save_json.assert_called_once_with(ret_val_get_files_to_remove, outfile)
def test_find_duplicates_to_remove_encoding_map(cnn, mocker, mocker_save_json):
threshold = 0.9
outfile = True
ret_val_find_dup_dict = {
'filename.jpg': ['dup1.jpg'],
'filename2.jpg': ['dup2.jpg'],
}
ret_val_get_files_to_remove = ['1.jpg', '2.jpg']
encoding_map = data_encoding_map()
find_duplicates_mocker = mocker.patch(
'imagededup.methods.cnn.CNN.find_duplicates', return_value=ret_val_find_dup_dict
)
get_files_to_remove_mocker = mocker.patch(
'imagededup.methods.cnn.get_files_to_remove',
return_value=ret_val_get_files_to_remove,
)
cnn.find_duplicates_to_remove(
encoding_map=encoding_map, min_similarity_threshold=threshold, outfile=outfile
)
find_duplicates_mocker.assert_called_once_with(
encoding_map=encoding_map,
image_dir=None,
min_similarity_threshold=threshold,
scores=False,
)
get_files_to_remove_mocker.assert_called_once_with(ret_val_find_dup_dict)
mocker_save_json.assert_called_once_with(ret_val_get_files_to_remove, outfile)
# Integration tests
# test find_duplicates with directory path
def test_find_duplicates_dir_integration(cnn):
expected_duplicates = {
'ukbench00120.jpg': [
('ukbench00120_hflip.jpg', 0.9672552),
('ukbench00120_resize.jpg', 0.98120844),
],
'ukbench00120_hflip.jpg': [
('ukbench00120.jpg', 0.9672552),
('ukbench00120_resize.jpg', 0.95676106),
],
'ukbench00120_resize.jpg': [
('ukbench00120.jpg', 0.98120844),
('ukbench00120_hflip.jpg', 0.95676106),
],
'ukbench00120_rotation.jpg': [],
'ukbench09268.jpg': [],
}
duplicates = cnn.find_duplicates(
image_dir=TEST_IMAGE_DIR_MIXED,
min_similarity_threshold=0.9,
scores=True,
outfile=False,
)
# verify variable type
assert isinstance(duplicates['ukbench00120.jpg'][0][1], np.float32)
# verify that all files have been considered for deduplication
assert len(duplicates) == len(expected_duplicates)
# verify for each file that expected files have been received as duplicates
for k in duplicates.keys():
dup_val = duplicates[k]
expected_val = expected_duplicates[k]
dup_ret = set(map(lambda x: x[0], dup_val))
expected_ret = set(map(lambda x: x[0], expected_val))
assert dup_ret == expected_ret
# test find_duplicates with encoding map
def test_find_duplicates_encoding_integration(cnn):
expected_duplicates = {
'ukbench00120.jpg': [
('ukbench00120_hflip.jpg', 0.9672552),
('ukbench00120_resize.jpg', 0.98120844),
],
'ukbench00120_hflip.jpg': [
('ukbench00120.jpg', 0.9672552),
('ukbench00120_resize.jpg', 0.95676106),
],
'ukbench00120_resize.jpg': [
('ukbench00120.jpg', 0.98120844),
('ukbench00120_hflip.jpg', 0.95676106),
],
'ukbench00120_rotation.jpg': [],
'ukbench09268.jpg': [],
}
encodings = cnn.encode_images(TEST_IMAGE_DIR_MIXED)
duplicates = cnn.find_duplicates(
encoding_map=encodings, min_similarity_threshold=0.9, scores=True, outfile=False
)
# verify variable type
assert isinstance(duplicates['ukbench00120.jpg'][0][1], np.float32)
# verify that all files have been considered for deduplication
assert len(duplicates) == len(expected_duplicates)
# verify for each file that expected files have been received as duplicates
for k in duplicates.keys():
dup_val = duplicates[k]
expected_val = expected_duplicates[k]
dup_ret = set(map(lambda x: x[0], dup_val))
expected_ret = set(map(lambda x: x[0], expected_val))
assert dup_ret == expected_ret
# test find_duplicates_to_remove with directory path
def test_find_duplicates_to_remove_dir_integration(cnn):
duplicates_list = cnn.find_duplicates_to_remove(
image_dir=TEST_IMAGE_DIR_MIXED, min_similarity_threshold=0.9, outfile=False
)
assert set(duplicates_list) == set(
['ukbench00120_resize.jpg', 'ukbench00120_hflip.jpg']
)
# test find_duplicates_to_remove with encoding map
def test_find_duplicates_to_remove_encoding_integration(cnn):
encodings = cnn.encode_images(TEST_IMAGE_DIR_MIXED)
duplicates_list = cnn.find_duplicates_to_remove(
encoding_map=encodings, min_similarity_threshold=0.9, outfile=False
)
assert set(duplicates_list) == set(
['ukbench00120_resize.jpg', 'ukbench00120_hflip.jpg']
)
# test to fix float32 not json serializable bug in find_duplicates
def test_find_duplicates_to_fix_not_json_serializable(cnn):
#expected .json file format
duplicates_json = {
'image_1.jpg': [
['image_2.jpg', '0.8673661'],
['image_3.jpg', '0.8125544'],
],
'image_2.jpg': [
['image_1.jpg', '0.8673661'],
['image_3.jpg', '0.8567523'],
],
'image_3.jpg': [
['image_1.jpg', '0.8125544'],
['image_2.jpg', '0.8567523'],
],
'image_4.jpg': []
}
duplicates = cnn.find_duplicates(
image_dir=TEST_IMAGE_DIR_MIXED,
min_similarity_threshold=0.8,
scores=True,
outfile= 'duplicates.json',
)
| [
"pathlib.Path",
"numpy.testing.assert_almost_equal",
"numpy.array",
"pytest.raises",
"pytest.fixture",
"imagededup.methods.cnn.CNN",
"imagededup.utils.image_utils.load_image"
] | [((199, 213), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (203, 213), False, 'from pathlib import Path\n'), ((714, 744), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (728, 744), False, 'import pytest\n'), ((767, 772), 'imagededup.methods.cnn.CNN', 'CNN', ([], {}), '()\n', (770, 772), False, 'from imagededup.methods.cnn import CNN\n'), ((1088, 1134), 'imagededup.utils.image_utils.load_image', 'load_image', (['TEST_IMAGE'], {'target_size': '(224, 224)'}), '(TEST_IMAGE, target_size=(224, 224))\n', (1098, 1134), False, 'from imagededup.utils.image_utils import load_image\n'), ((2640, 2662), 'imagededup.utils.image_utils.load_image', 'load_image', (['TEST_IMAGE'], {}), '(TEST_IMAGE)\n', (2650, 2662), False, 'from imagededup.utils.image_utils import load_image\n'), ((5554, 5625), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["dict_ret['ukbench00002.jpg'][0][1]", '(1.0)'], {}), "(dict_ret['ukbench00002.jpg'][0][1], 1.0)\n", (5584, 5625), True, 'import numpy as np\n'), ((573, 595), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (581, 595), True, 'import numpy as np\n'), ((625, 647), 'numpy.array', 'np.array', (['[1, 1, 0, 1]'], {}), '([1, 1, 0, 1])\n', (633, 647), True, 'import numpy as np\n'), ((681, 703), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (689, 703), True, 'import numpy as np\n'), ((2565, 2590), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2578, 2590), False, 'import pytest\n'), ((4105, 4130), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4118, 4130), False, 'import pytest\n'), ((4231, 4255), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4244, 4255), False, 'import pytest\n'), ((4372, 4397), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4385, 4397), False, 'import pytest\n'), ((8273, 8298), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8286, 8298), False, 'import pytest\n'), ((8412, 8437), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8425, 8437), False, 'import pytest\n')] |
try:
from osgeo import gdal, ogr, gdal_array # I/O image data
import numpy as np # math and array handling
import matplotlib.pyplot as plt # plot figures
import pandas as pd # handling large data as table sheets
from joblib import dump, load
from operator import itemgetter
import sys, os
from sklearn.model_selection import train_test_split
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_predict
# sys.path.append(r"F:\Work\Maptor\venv\Model")
from ReportModule import ReportModule
from sklearn import preprocessing
import joblib
except Exception as e:
print('Can not import files:' + str(e))
input("Press Enter to exit!")
sys.exit(0)
class PLSR_NSS_Model():
def ComponentRegressor(self,features,X,y):
mse = []
component = np.arange(1, features.shape[1])
for i in component:
pls = PLSRegression(n_components=i)
y_cv = cross_val_predict(pls, X, y, cv=10,n_jobs=-1,verbose=2)
mse.append(mean_squared_error(y, y_cv))
return [mse,component]
def Regressor(self, suggested_comp, train_features, train_labels):
plsr = PLSRegression(n_components=suggested_comp,
scale=True,
max_iter=500,
tol=1e-06,
copy=True)
plsr.fit(train_features, train_labels)
print(plsr.score(train_features, train_labels))
return plsr
def bandimportance(self,train_features,plsr):
importance = self.vip(plsr)
return importance
def testpredict(self, plsr,test_features):
predictions_test_ds = plsr.predict(test_features)
return predictions_test_ds
def finaltraining(self,plsr,features,labels):
plsr.fit(features, labels)
return plsr
def finalprediction(self,plsr,img_as_array):
prediction_ = plsr.predict(img_as_array)
return prediction_
def vip(self,model):
t = model.x_scores_
w = model.x_weights_
q = model.y_loadings_
p, h = w.shape
vips = np.zeros((p,))
s = np.diag(t.T @ t @ q.T @ q).reshape(h, -1)
total_s = np.sum(s)
for i in range(p):
weight = np.array([(w[i, j] / np.linalg.norm(w[:, j])) ** 2 for j in range(h)])
vips[i] = np.sqrt(p * (s.T @ weight) / total_s)
return vips
def saveimage(self,img,prediction_,prediction_map,img_ds):
cols = img.shape[1]
rows = img.shape[0]
prediction_.astype(np.float32)
driver = gdal.GetDriverByName("gtiff")
outdata = driver.Create(prediction_map, cols, rows, 1, gdal.GDT_Float32)
outdata.SetGeoTransform(img_ds.GetGeoTransform()) ##sets same geotransform as input
outdata.SetProjection(img_ds.GetProjection()) ##sets same projection as input
outdata.GetRasterBand(1).WriteArray(prediction_)
outdata.FlushCache() ##saves to disk!!
print('Image saved to: {}'.format(prediction_map))
def savemodel(self, model, file_name):
try:
joblib.dump(model, file_name)
except ValueError as e:
print(e)
logging.error("Exception occurred", exc_info=True)
print("Could not save image")
sys.exit(0) | [
"osgeo.gdal.GetDriverByName",
"numpy.sqrt",
"joblib.dump",
"numpy.linalg.norm",
"sklearn.metrics.mean_squared_error",
"numpy.diag",
"numpy.sum",
"numpy.zeros",
"sklearn.model_selection.cross_val_predict",
"sys.exit",
"sklearn.cross_decomposition.PLSRegression",
"numpy.arange"
] | [((811, 822), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (819, 822), False, 'import sys, os\n'), ((932, 963), 'numpy.arange', 'np.arange', (['(1)', 'features.shape[1]'], {}), '(1, features.shape[1])\n', (941, 963), True, 'import numpy as np\n'), ((1286, 1381), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': 'suggested_comp', 'scale': '(True)', 'max_iter': '(500)', 'tol': '(1e-06)', 'copy': '(True)'}), '(n_components=suggested_comp, scale=True, max_iter=500, tol=\n 1e-06, copy=True)\n', (1299, 1381), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((2254, 2268), 'numpy.zeros', 'np.zeros', (['(p,)'], {}), '((p,))\n', (2262, 2268), True, 'import numpy as np\n'), ((2341, 2350), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (2347, 2350), True, 'import numpy as np\n'), ((2728, 2757), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""gtiff"""'], {}), "('gtiff')\n", (2748, 2757), False, 'from osgeo import gdal, ogr, gdal_array\n'), ((1010, 1039), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': 'i'}), '(n_components=i)\n', (1023, 1039), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((1059, 1116), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['pls', 'X', 'y'], {'cv': '(10)', 'n_jobs': '(-1)', 'verbose': '(2)'}), '(pls, X, y, cv=10, n_jobs=-1, verbose=2)\n', (1076, 1116), False, 'from sklearn.model_selection import cross_val_predict\n'), ((2492, 2529), 'numpy.sqrt', 'np.sqrt', (['(p * (s.T @ weight) / total_s)'], {}), '(p * (s.T @ weight) / total_s)\n', (2499, 2529), True, 'import numpy as np\n'), ((3252, 3281), 'joblib.dump', 'joblib.dump', (['model', 'file_name'], {}), '(model, file_name)\n', (3263, 3281), False, 'import joblib\n'), ((1138, 1165), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y', 'y_cv'], {}), '(y, y_cv)\n', (1156, 1165), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((2281, 2307), 'numpy.diag', 'np.diag', (['(t.T @ t @ q.T @ q)'], {}), '(t.T @ t @ q.T @ q)\n', (2288, 2307), True, 'import numpy as np\n'), ((3452, 3463), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3460, 3463), False, 'import sys, os\n'), ((2420, 2443), 'numpy.linalg.norm', 'np.linalg.norm', (['w[:, j]'], {}), '(w[:, j])\n', (2434, 2443), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for SpotExtraction class"""
import unittest
from datetime import datetime as dt
from datetime import timedelta
import iris
import numpy as np
from iris.tests import IrisTest
from improver.metadata.constants.mo_attributes import MOSG_GRID_ATTRIBUTES
from improver.metadata.constants.time_types import TIME_COORDS
from improver.metadata.utilities import create_coordinate_hash
from improver.spotdata import UNIQUE_ID_ATTRIBUTE
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.spotdata.spot_extraction import SpotExtraction
from improver.synthetic_data.set_up_test_cubes import (
_create_time_point,
set_up_variable_cube,
)
from improver.utilities.cube_manipulation import enforce_coordinate_ordering
class Test_SpotExtraction(IrisTest):
"""Test class for the SpotExtraction tests, setting up inputs."""
def setUp(self):
"""
Set up cubes and sitelists for use in testing SpotExtraction.
The envisaged scenario is an island (1) surrounded by water (0).
Land-sea Orography Diagnostic
0 0 0 0 0 0 0 0 0 0 0 1 2 3 4
0 1 1 1 0 0 1 2 1 0 5 6 7 8 9
0 1 1 1 0 0 2 3 2 0 10 11 12 13 14
0 1 1 1 0 0 1 2 1 0 15 16 17 18 19
0 0 0 0 0 0 0 0 0 0 20 21 22 23 24
"""
# Set up diagnostic data cube and neighbour cubes.
diagnostic_data = np.arange(25).reshape(5, 5)
# Grid attributes must be included in diagnostic cubes so their removal
# can be tested
attributes = {"mosg__grid_domain": "global", "mosg__grid_type": "standard"}
self.cell_methods = (
iris.coords.CellMethod("maximum", coords="time", intervals="1 hour"),
)
time = dt(2020, 6, 15, 12)
frt = time - timedelta(hours=6)
diagnostic_cube_yx = set_up_variable_cube(
diagnostic_data.T,
name="air_temperature",
units="K",
attributes=attributes,
domain_corner=(0, 0),
grid_spacing=10,
time=time,
frt=frt,
)
diagnostic_cube_yx.cell_methods = self.cell_methods
diagnostic_cube_xy = diagnostic_cube_yx.copy()
enforce_coordinate_ordering(
diagnostic_cube_xy,
[
diagnostic_cube_xy.coord(axis="x").name(),
diagnostic_cube_xy.coord(axis="y").name(),
],
anchor_start=False,
)
times = np.array([time + timedelta(hours=i) for i in range(-3, 2)])
# Create as int64 values
time_points = np.array([_create_time_point(time) for time in times])
bounds = np.array(
[
[
_create_time_point(time - timedelta(hours=1)),
_create_time_point(time),
]
for time in times
]
)
# Broadcast the times to a 2-dimensional grid that matches the diagnostic
# data grid
time_points = np.broadcast_to(time_points, (5, 5))
bounds = np.broadcast_to(bounds, (5, 5, 2))
# Create a 2-dimensional auxiliary time coordinate
self.time_aux_coord = iris.coords.AuxCoord(
time_points, "time", bounds=bounds, units=TIME_COORDS["time"].units
)
diagnostic_cube_2d_time = diagnostic_cube_yx.copy()
diagnostic_cube_2d_time.coord("time").rename("time_in_local_timezone")
diagnostic_cube_2d_time.add_aux_coord(self.time_aux_coord, data_dims=(0, 1))
expected_indices = [[0, 0], [0, 0], [2, 2], [2, 2]]
points = [self.time_aux_coord.points[y, x] for y, x in expected_indices]
bounds = [self.time_aux_coord.bounds[y, x] for y, x in expected_indices]
self.expected_spot_time_coord = self.time_aux_coord.copy(
points=points, bounds=bounds
)
diagnostic_cube_hash = create_coordinate_hash(diagnostic_cube_yx)
# neighbours, each group is for a point under two methods, e.g.
# [ 0. 0. 0.] is the nearest point to the first spot site, whilst
# [ 1. 1. -1.] is the nearest land point to the same site.
neighbours = np.array(
[
[[0.0, 0.0, 2.0, 2.0], [0.0, 0.0, 2.0, 2.0], [0.0, -1.0, 0.0, 1.0]],
[[1.0, 1.0, 2.0, 2.0], [1.0, 1.0, 2.0, 2.0], [-1.0, 0.0, 0.0, 1.0]],
]
)
self.altitudes = np.array([0, 1, 3, 2])
self.latitudes = np.array([10, 10, 20, 20])
self.longitudes = np.array([10, 10, 20, 20])
self.wmo_ids = np.arange(4)
self.unique_site_id = np.arange(4)
self.unique_site_id_key = "met_office_site_id"
grid_attributes = ["x_index", "y_index", "vertical_displacement"]
neighbour_methods = ["nearest", "nearest_land"]
neighbour_cube = build_spotdata_cube(
neighbours,
"grid_neighbours",
1,
self.altitudes,
self.latitudes,
self.longitudes,
self.wmo_ids,
unique_site_id=self.unique_site_id,
unique_site_id_key=self.unique_site_id_key,
grid_attributes=grid_attributes,
neighbour_methods=neighbour_methods,
)
neighbour_cube.attributes["model_grid_hash"] = diagnostic_cube_hash
coordinate_cube = neighbour_cube.extract(
iris.Constraint(neighbour_selection_method_name="nearest")
& iris.Constraint(grid_attributes_key=["x_index", "y_index"])
)
coordinate_cube.data = np.rint(coordinate_cube.data).astype(int)
self.diagnostic_cube_xy = diagnostic_cube_xy
self.diagnostic_cube_yx = diagnostic_cube_yx
self.diagnostic_cube_2d_time = diagnostic_cube_2d_time
self.neighbours = neighbours
self.neighbour_cube = neighbour_cube
self.coordinate_cube = coordinate_cube
self.expected_attributes = self.diagnostic_cube_xy.attributes
for attr in MOSG_GRID_ATTRIBUTES:
self.expected_attributes.pop(attr, None)
self.expected_attributes["title"] = "unknown"
self.expected_attributes["model_grid_hash"] = self.neighbour_cube.attributes[
"model_grid_hash"
]
class Test__repr__(IrisTest):
"""Tests the class __repr__ function."""
def test_basic(self):
"""Test that the __repr__ returns the expected string with defaults."""
plugin = SpotExtraction()
result = str(plugin)
msg = "<SpotExtraction: neighbour_selection_method: nearest>"
self.assertEqual(result, msg)
def test_non_default(self):
"""Test that the __repr__ returns the expected string with non-default
options."""
plugin = SpotExtraction(neighbour_selection_method="nearest_land")
result = str(plugin)
msg = "<SpotExtraction: neighbour_selection_method: nearest_land>"
self.assertEqual(result, msg)
class Test_extract_coordinates(Test_SpotExtraction):
"""Test the extraction of x and y coordinate indices from a neighbour
cube for a given neighbour_selection_method."""
def test_nearest(self):
"""Test extraction of nearest neighbour x and y indices."""
plugin = SpotExtraction(neighbour_selection_method="nearest")
expected = self.neighbours[0, 0:2, :].astype(int)
result = plugin.extract_coordinates(self.neighbour_cube)
self.assertArrayEqual(result.data, expected)
def test_nearest_land(self):
"""Test extraction of nearest land neighbour x and y indices."""
plugin = SpotExtraction(neighbour_selection_method="nearest_land")
expected = self.neighbours[1, 0:2, :].astype(int)
result = plugin.extract_coordinates(self.neighbour_cube)
self.assertArrayEqual(result.data, expected)
def test_invalid_method(self):
"""Test attempt to extract neighbours found with a method that is not
available within the neighbour cube. Raises an exception."""
plugin = SpotExtraction(neighbour_selection_method="furthest")
msg = 'The requested neighbour_selection_method "furthest" is not'
with self.assertRaisesRegex(ValueError, msg):
plugin.extract_coordinates(self.neighbour_cube)
class Test_check_for_unique_id(Test_SpotExtraction):
"""Test identification of unique site ID coordinates from coordinate
attributes."""
def test_unique_is_present(self):
"""Test that the IDs and coordinate name are returned if a unique site
ID coordinate is present on the neighbour cube."""
plugin = SpotExtraction()
result = plugin.check_for_unique_id(self.neighbour_cube)
self.assertArrayEqual(result[0], self.unique_site_id)
self.assertEqual(result[1], self.unique_site_id_key)
def test_unique_is_not_present(self):
"""Test that Nones are returned if no unique site ID coordinate is
present on the neighbour cube."""
self.neighbour_cube.remove_coord("met_office_site_id")
plugin = SpotExtraction()
result = plugin.check_for_unique_id(self.neighbour_cube)
self.assertIsNone(result)
class Test_get_aux_coords(Test_SpotExtraction):
"""Test the extraction of scalar and non-scalar auxiliary coordinates
from a cube."""
def test_only_scalar_coords(self):
"""Test with an input cube containing only scalar auxiliary
coordinates."""
plugin = SpotExtraction()
expected_scalar = self.diagnostic_cube_yx.aux_coords
expected_nonscalar = []
x_indices, y_indices = self.coordinate_cube.data
scalar, nonscalar = plugin.get_aux_coords(
self.diagnostic_cube_yx, x_indices, y_indices
)
self.assertArrayEqual(scalar, expected_scalar)
self.assertArrayEqual(nonscalar, expected_nonscalar)
def test_scalar_and_nonscalar_coords(self):
"""Test with an input cube containing scalar and nonscalar auxiliary
coordinates. The returned non-scalar coordinate is a 1D representation
of the 2D non-scalar input coordinate at spot sites."""
plugin = SpotExtraction()
expected_scalar = [
coord
for coord in self.diagnostic_cube_2d_time.aux_coords
if coord.name()
in ["time_in_local_timezone", "forecast_reference_time", "forecast_period"]
]
expected_nonscalar = [self.expected_spot_time_coord]
x_indices, y_indices = self.coordinate_cube.data
scalar, nonscalar = plugin.get_aux_coords(
self.diagnostic_cube_2d_time, x_indices, y_indices
)
self.assertArrayEqual(scalar, expected_scalar)
self.assertArrayEqual(nonscalar, expected_nonscalar)
def test_multiple_nonscalar_coords(self):
"""Test with an input cube containing multiple nonscalar auxiliary
coordinates. The returned non-scalar coordinates are 1D representations
of the 2D non-scalar input coordinates at spot sites."""
plugin = SpotExtraction()
additional_2d_crd = self.time_aux_coord.copy()
additional_2d_crd.rename("kittens")
self.diagnostic_cube_2d_time.add_aux_coord(additional_2d_crd, data_dims=(0, 1))
additional_expected = self.expected_spot_time_coord.copy()
additional_expected.rename("kittens")
expected_nonscalar = [additional_expected, self.expected_spot_time_coord]
x_indices, y_indices = self.coordinate_cube.data
_, nonscalar = plugin.get_aux_coords(
self.diagnostic_cube_2d_time, x_indices, y_indices
)
self.assertArrayEqual(nonscalar, expected_nonscalar)
class Test_get_coordinate_data(Test_SpotExtraction):
"""Test the extraction of data from the provided coordinates."""
def test_coordinate_with_bounds_extraction(self):
"""Test extraction of coordinate data for a 2-dimensional auxiliary
coordinate. In this case the coordinate has bounds."""
plugin = SpotExtraction()
expected_points = self.expected_spot_time_coord.points
expected_bounds = self.expected_spot_time_coord.bounds
x_indices, y_indices = self.coordinate_cube.data
points, bounds = plugin.get_coordinate_data(
self.diagnostic_cube_2d_time, x_indices, y_indices, coordinate="time"
)
self.assertArrayEqual(points, expected_points)
self.assertArrayEqual(bounds, expected_bounds)
def test_coordinate_without_bounds_extraction(self):
"""Test extraction of coordinate data for a 2-dimensional auxiliary
coordinate. In this case the coordinate has no bounds."""
plugin = SpotExtraction()
expected_points = self.expected_spot_time_coord.points
expected_bounds = None
x_indices, y_indices = self.coordinate_cube.data
self.diagnostic_cube_2d_time.coord("time").bounds = None
points, bounds = plugin.get_coordinate_data(
self.diagnostic_cube_2d_time, x_indices, y_indices, coordinate="time"
)
self.assertArrayEqual(points, expected_points)
self.assertArrayEqual(bounds, expected_bounds)
class Test_build_diagnostic_cube(Test_SpotExtraction):
"""Test the building of a spot data cube with given inputs."""
def test_building_cube(self):
"""Test that a cube is built as expected."""
plugin = SpotExtraction()
spot_values = np.array([0, 0, 12, 12])
result = plugin.build_diagnostic_cube(
self.neighbour_cube,
self.diagnostic_cube_2d_time,
spot_values,
unique_site_id=self.unique_site_id,
unique_site_id_key=self.unique_site_id_key,
auxiliary_coords=[self.expected_spot_time_coord],
)
self.assertArrayEqual(result.coord("latitude").points, self.latitudes)
self.assertArrayEqual(result.coord("longitude").points, self.longitudes)
self.assertArrayEqual(result.coord("altitude").points, self.altitudes)
self.assertArrayEqual(result.coord("wmo_id").points, self.wmo_ids)
self.assertArrayEqual(
result.coord(self.unique_site_id_key).points, self.unique_site_id
)
self.assertArrayEqual(
result.coord("time").points, self.expected_spot_time_coord.points
)
self.assertArrayEqual(result.data, spot_values)
class Test_process(Test_SpotExtraction):
"""Test the process method which extracts data and builds cubes with
metadata added."""
def test_unmatched_cube_error(self):
"""Test that an error is raised if the neighbour cube and diagnostic
cube do not have matching grids."""
self.neighbour_cube.attributes["model_grid_hash"] = "123"
plugin = SpotExtraction()
msg = (
"Cubes do not share or originate from the same grid, so cannot "
"be used together."
)
with self.assertRaisesRegex(ValueError, msg):
plugin.process(self.neighbour_cube, self.diagnostic_cube_xy)
def test_returned_cube_nearest(self):
"""Test that data within the returned cube is as expected for the
nearest neigbours."""
plugin = SpotExtraction()
expected = [0, 0, 12, 12]
result = plugin.process(self.neighbour_cube, self.diagnostic_cube_xy)
self.assertArrayEqual(result.data, expected)
self.assertEqual(result.name(), self.diagnostic_cube_xy.name())
self.assertEqual(result.units, self.diagnostic_cube_xy.units)
self.assertArrayEqual(result.coord("latitude").points, self.latitudes)
self.assertArrayEqual(result.coord("longitude").points, self.longitudes)
self.assertDictEqual(result.attributes, self.expected_attributes)
def test_returned_cube_nearest_land(self):
"""Test that data within the returned cube is as expected for the
nearest land neighbours."""
plugin = SpotExtraction(neighbour_selection_method="nearest_land")
expected = [6, 6, 12, 12]
result = plugin.process(self.neighbour_cube, self.diagnostic_cube_xy)
self.assertArrayEqual(result.data, expected)
self.assertEqual(result.name(), self.diagnostic_cube_xy.name())
self.assertEqual(result.units, self.diagnostic_cube_xy.units)
self.assertArrayEqual(result.coord("latitude").points, self.latitudes)
self.assertArrayEqual(result.coord("longitude").points, self.longitudes)
self.assertDictEqual(result.attributes, self.expected_attributes)
def test_new_title(self):
"""Test title is updated as expected"""
expected_attributes = self.expected_attributes
expected_attributes["title"] = "IMPROVER Spot Forecast"
plugin = SpotExtraction(neighbour_selection_method="nearest_land")
result = plugin.process(
self.neighbour_cube,
self.diagnostic_cube_xy,
new_title="IMPROVER Spot Forecast",
)
self.assertDictEqual(result.attributes, expected_attributes)
def test_cube_with_leading_dimensions(self):
"""Test that a cube with a leading dimension such as realization or
probability results in a spotdata cube with the same leading
dimension."""
realization0 = iris.coords.DimCoord([0], standard_name="realization", units=1)
realization1 = iris.coords.DimCoord([1], standard_name="realization", units=1)
cube0 = self.diagnostic_cube_xy.copy()
cube1 = self.diagnostic_cube_xy.copy()
cube0.add_aux_coord(realization0)
cube1.add_aux_coord(realization1)
cubes = iris.cube.CubeList([cube0, cube1])
cube = cubes.merge_cube()
plugin = SpotExtraction()
expected = [[0, 0, 12, 12], [0, 0, 12, 12]]
expected_coord = iris.coords.DimCoord(
[0, 1], standard_name="realization", units=1
)
result = plugin.process(self.neighbour_cube, cube)
self.assertArrayEqual(result.data, expected)
self.assertEqual(result.name(), cube.name())
self.assertEqual(result.units, cube.units)
self.assertArrayEqual(result.coord("latitude").points, self.latitudes)
self.assertArrayEqual(result.coord("longitude").points, self.longitudes)
self.assertEqual(result.coord("realization"), expected_coord)
self.assertDictEqual(result.attributes, self.expected_attributes)
def test_cell_methods(self):
"""Test cell methods from the gridded input cube are retained on the
spotdata cube."""
plugin = SpotExtraction(neighbour_selection_method="nearest_land")
result = plugin.process(
self.neighbour_cube,
self.diagnostic_cube_xy,
new_title="IMPROVER Spot Forecast",
)
self.assertEqual(result.cell_methods, self.cell_methods)
def test_2d_aux_coords(self):
"""Test 2D auxiliary coordinates from the gridded input cube are
retained as 1D coordinates associated with the spot-index on the
spotdata cube."""
plugin = SpotExtraction()
result = plugin.process(
self.neighbour_cube,
self.diagnostic_cube_2d_time,
new_title="IMPROVER Spot Forecast",
)
self.assertEqual(result.coord("time"), self.expected_spot_time_coord)
def test_removal_of_internal_metadata(self):
"""Test that internal metadata used to identify the unique id coordinate
is removed in the resulting spot diagnostic cube."""
plugin = SpotExtraction()
result = plugin.process(self.neighbour_cube, self.diagnostic_cube_xy)
self.assertNotIn(
UNIQUE_ID_ATTRIBUTE,
[att for att in result.coord(self.unique_site_id_key).attributes],
)
def test_yx_ordered_cube(self):
"""Test extraction of diagnostic data that is natively ordered yx."""
plugin = SpotExtraction()
expected = [0, 0, 12, 12]
result = plugin.process(self.coordinate_cube, self.diagnostic_cube_yx)
self.assertArrayEqual(result.data, expected)
if __name__ == "__main__":
unittest.main()
| [
"datetime.datetime",
"improver.metadata.utilities.create_coordinate_hash",
"iris.cube.CubeList",
"improver.synthetic_data.set_up_test_cubes.set_up_variable_cube",
"improver.spotdata.spot_extraction.SpotExtraction",
"iris.coords.DimCoord",
"iris.Constraint",
"iris.coords.AuxCoord",
"numpy.array",
"... | [((21957, 21972), 'unittest.main', 'unittest.main', ([], {}), '()\n', (21970, 21972), False, 'import unittest\n'), ((3489, 3508), 'datetime.datetime', 'dt', (['(2020)', '(6)', '(15)', '(12)'], {}), '(2020, 6, 15, 12)\n', (3491, 3508), True, 'from datetime import datetime as dt\n'), ((3579, 3743), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['diagnostic_data.T'], {'name': '"""air_temperature"""', 'units': '"""K"""', 'attributes': 'attributes', 'domain_corner': '(0, 0)', 'grid_spacing': '(10)', 'time': 'time', 'frt': 'frt'}), "(diagnostic_data.T, name='air_temperature', units='K',\n attributes=attributes, domain_corner=(0, 0), grid_spacing=10, time=time,\n frt=frt)\n", (3599, 3743), False, 'from improver.synthetic_data.set_up_test_cubes import _create_time_point, set_up_variable_cube\n'), ((4777, 4813), 'numpy.broadcast_to', 'np.broadcast_to', (['time_points', '(5, 5)'], {}), '(time_points, (5, 5))\n', (4792, 4813), True, 'import numpy as np\n'), ((4831, 4865), 'numpy.broadcast_to', 'np.broadcast_to', (['bounds', '(5, 5, 2)'], {}), '(bounds, (5, 5, 2))\n', (4846, 4865), True, 'import numpy as np\n'), ((4955, 5049), 'iris.coords.AuxCoord', 'iris.coords.AuxCoord', (['time_points', '"""time"""'], {'bounds': 'bounds', 'units': "TIME_COORDS['time'].units"}), "(time_points, 'time', bounds=bounds, units=TIME_COORDS[\n 'time'].units)\n", (4975, 5049), False, 'import iris\n'), ((5664, 5706), 'improver.metadata.utilities.create_coordinate_hash', 'create_coordinate_hash', (['diagnostic_cube_yx'], {}), '(diagnostic_cube_yx)\n', (5686, 5706), False, 'from improver.metadata.utilities import create_coordinate_hash\n'), ((5945, 6098), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 2.0, 2.0], [0.0, 0.0, 2.0, 2.0], [0.0, -1.0, 0.0, 1.0]], [[1.0,\n 1.0, 2.0, 2.0], [1.0, 1.0, 2.0, 2.0], [-1.0, 0.0, 0.0, 1.0]]]'], {}), '([[[0.0, 0.0, 2.0, 2.0], [0.0, 0.0, 2.0, 2.0], [0.0, -1.0, 0.0, 1.0\n ]], [[1.0, 1.0, 2.0, 2.0], [1.0, 1.0, 2.0, 2.0], [-1.0, 0.0, 0.0, 1.0]]])\n', (5953, 6098), True, 'import numpy as np\n'), ((6189, 6211), 'numpy.array', 'np.array', (['[0, 1, 3, 2]'], {}), '([0, 1, 3, 2])\n', (6197, 6211), True, 'import numpy as np\n'), ((6237, 6263), 'numpy.array', 'np.array', (['[10, 10, 20, 20]'], {}), '([10, 10, 20, 20])\n', (6245, 6263), True, 'import numpy as np\n'), ((6290, 6316), 'numpy.array', 'np.array', (['[10, 10, 20, 20]'], {}), '([10, 10, 20, 20])\n', (6298, 6316), True, 'import numpy as np\n'), ((6340, 6352), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6349, 6352), True, 'import numpy as np\n'), ((6383, 6395), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6392, 6395), True, 'import numpy as np\n'), ((6606, 6886), 'improver.spotdata.build_spotdata_cube.build_spotdata_cube', 'build_spotdata_cube', (['neighbours', '"""grid_neighbours"""', '(1)', 'self.altitudes', 'self.latitudes', 'self.longitudes', 'self.wmo_ids'], {'unique_site_id': 'self.unique_site_id', 'unique_site_id_key': 'self.unique_site_id_key', 'grid_attributes': 'grid_attributes', 'neighbour_methods': 'neighbour_methods'}), "(neighbours, 'grid_neighbours', 1, self.altitudes, self.\n latitudes, self.longitudes, self.wmo_ids, unique_site_id=self.\n unique_site_id, unique_site_id_key=self.unique_site_id_key,\n grid_attributes=grid_attributes, neighbour_methods=neighbour_methods)\n", (6625, 6886), False, 'from improver.spotdata.build_spotdata_cube import build_spotdata_cube\n'), ((8218, 8234), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (8232, 8234), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((8521, 8578), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {'neighbour_selection_method': '"""nearest_land"""'}), "(neighbour_selection_method='nearest_land')\n", (8535, 8578), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((9017, 9069), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {'neighbour_selection_method': '"""nearest"""'}), "(neighbour_selection_method='nearest')\n", (9031, 9069), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((9370, 9427), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {'neighbour_selection_method': '"""nearest_land"""'}), "(neighbour_selection_method='nearest_land')\n", (9384, 9427), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((9804, 9857), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {'neighbour_selection_method': '"""furthest"""'}), "(neighbour_selection_method='furthest')\n", (9818, 9857), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((10389, 10405), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (10403, 10405), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((10834, 10850), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (10848, 10850), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((11244, 11260), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (11258, 11260), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((11933, 11949), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (11947, 11949), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((12831, 12847), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (12845, 12847), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((13805, 13821), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (13819, 13821), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((14479, 14495), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (14493, 14495), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((15199, 15215), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (15213, 15215), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((15238, 15262), 'numpy.array', 'np.array', (['[0, 0, 12, 12]'], {}), '([0, 0, 12, 12])\n', (15246, 15262), True, 'import numpy as np\n'), ((16580, 16596), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (16594, 16596), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((17023, 17039), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (17037, 17039), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((17756, 17813), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {'neighbour_selection_method': '"""nearest_land"""'}), "(neighbour_selection_method='nearest_land')\n", (17770, 17813), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((18570, 18627), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {'neighbour_selection_method': '"""nearest_land"""'}), "(neighbour_selection_method='nearest_land')\n", (18584, 18627), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((19098, 19161), 'iris.coords.DimCoord', 'iris.coords.DimCoord', (['[0]'], {'standard_name': '"""realization"""', 'units': '(1)'}), "([0], standard_name='realization', units=1)\n", (19118, 19161), False, 'import iris\n'), ((19185, 19248), 'iris.coords.DimCoord', 'iris.coords.DimCoord', (['[1]'], {'standard_name': '"""realization"""', 'units': '(1)'}), "([1], standard_name='realization', units=1)\n", (19205, 19248), False, 'import iris\n'), ((19444, 19478), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[cube0, cube1]'], {}), '([cube0, cube1])\n', (19462, 19478), False, 'import iris\n'), ((19531, 19547), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (19545, 19547), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((19625, 19691), 'iris.coords.DimCoord', 'iris.coords.DimCoord', (['[0, 1]'], {'standard_name': '"""realization"""', 'units': '(1)'}), "([0, 1], standard_name='realization', units=1)\n", (19645, 19691), False, 'import iris\n'), ((20388, 20445), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {'neighbour_selection_method': '"""nearest_land"""'}), "(neighbour_selection_method='nearest_land')\n", (20402, 20445), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((20896, 20912), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (20910, 20912), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((21366, 21382), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (21380, 21382), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((21741, 21757), 'improver.spotdata.spot_extraction.SpotExtraction', 'SpotExtraction', ([], {}), '()\n', (21755, 21757), False, 'from improver.spotdata.spot_extraction import SpotExtraction\n'), ((3393, 3461), 'iris.coords.CellMethod', 'iris.coords.CellMethod', (['"""maximum"""'], {'coords': '"""time"""', 'intervals': '"""1 hour"""'}), "('maximum', coords='time', intervals='1 hour')\n", (3415, 3461), False, 'import iris\n'), ((3530, 3548), 'datetime.timedelta', 'timedelta', ([], {'hours': '(6)'}), '(hours=6)\n', (3539, 3548), False, 'from datetime import timedelta\n'), ((3133, 3146), 'numpy.arange', 'np.arange', (['(25)'], {}), '(25)\n', (3142, 3146), True, 'import numpy as np\n'), ((4360, 4384), 'improver.synthetic_data.set_up_test_cubes._create_time_point', '_create_time_point', (['time'], {}), '(time)\n', (4378, 4384), False, 'from improver.synthetic_data.set_up_test_cubes import _create_time_point, set_up_variable_cube\n'), ((7155, 7213), 'iris.Constraint', 'iris.Constraint', ([], {'neighbour_selection_method_name': '"""nearest"""'}), "(neighbour_selection_method_name='nearest')\n", (7170, 7213), False, 'import iris\n'), ((7228, 7287), 'iris.Constraint', 'iris.Constraint', ([], {'grid_attributes_key': "['x_index', 'y_index']"}), "(grid_attributes_key=['x_index', 'y_index'])\n", (7243, 7287), False, 'import iris\n'), ((7329, 7358), 'numpy.rint', 'np.rint', (['coordinate_cube.data'], {}), '(coordinate_cube.data)\n', (7336, 7358), True, 'import numpy as np\n'), ((4251, 4269), 'datetime.timedelta', 'timedelta', ([], {'hours': 'i'}), '(hours=i)\n', (4260, 4269), False, 'from datetime import timedelta\n'), ((4551, 4575), 'improver.synthetic_data.set_up_test_cubes._create_time_point', '_create_time_point', (['time'], {}), '(time)\n', (4569, 4575), False, 'from improver.synthetic_data.set_up_test_cubes import _create_time_point, set_up_variable_cube\n'), ((4510, 4528), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (4519, 4528), False, 'from datetime import timedelta\n')] |
import os
import numpy as np
import autoarray as aa
path = "{}/".format(os.path.dirname(os.path.realpath(__file__)))
class TestArray2DEuclid:
def test__euclid_array_for_four_quandrants__loads_data_and_dimensions(
self, euclid_data
):
euclid_array = aa.euclid.Array2DEuclid.top_left(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
euclid_array = aa.euclid.Array2DEuclid.top_right(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
euclid_array = aa.euclid.Array2DEuclid.bottom_left(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
euclid_array = aa.euclid.Array2DEuclid.bottom_right(array_electrons=euclid_data)
assert euclid_array.shape_native == (2086, 2128)
assert (euclid_array.native == np.zeros((2086, 2128))).all()
class TestLayout2DEuclid:
def test__euclid_layout_for_four_quandrants__loads_data_and_dimensions(
self, euclid_data
):
layout = aa.euclid.Layout2DEuclid.top_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (0, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (20, 2086, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.top_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (0, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (15, 2086, 2118, 2128)
layout = aa.euclid.Layout2DEuclid.top_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (0, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (20, 2086, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.top_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (0, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (15, 2086, 2118, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (1, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (0, 2066, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_left(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (1, 0)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (0, 2071, 2118, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=51,
serial_overscan_size=29,
parallel_overscan_size=20,
)
assert layout.original_roe_corner == (1, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2066, 2086, 51, 2099)
assert layout.serial_prescan == (0, 2086, 0, 51)
assert layout.serial_overscan == (0, 2066, 2099, 2128)
layout = aa.euclid.Layout2DEuclid.bottom_right(
parallel_size=2086,
serial_size=2128,
serial_prescan_size=41,
serial_overscan_size=10,
parallel_overscan_size=15,
)
assert layout.original_roe_corner == (1, 1)
assert layout.shape_2d == (2086, 2128)
assert layout.parallel_overscan == (2071, 2086, 41, 2118)
assert layout.serial_prescan == (0, 2086, 0, 41)
assert layout.serial_overscan == (0, 2071, 2118, 2128)
def test__left_side__chooses_correct_layout_given_input(self, euclid_data):
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="E"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="E"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="E"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="F"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="F"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="F"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="G"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="G"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="G"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text1", quadrant_id="H"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text2", quadrant_id="H"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text3", quadrant_id="H"
)
assert layout.original_roe_corner == (0, 0)
def test__right_side__chooses_correct_layout_given_input(self, euclid_data):
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="E"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="E"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="E"
)
assert layout.original_roe_corner == (0, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="F"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="F"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="F"
)
assert layout.original_roe_corner == (0, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="G"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="G"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="G"
)
assert layout.original_roe_corner == (1, 0)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text4", quadrant_id="H"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text5", quadrant_id="H"
)
assert layout.original_roe_corner == (1, 1)
layout = aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id(
ccd_id="text6", quadrant_id="H"
)
assert layout.original_roe_corner == (1, 1)
| [
"autoarray.euclid.Layout2DEuclid.top_right",
"autoarray.euclid.Array2DEuclid.top_right",
"autoarray.euclid.Layout2DEuclid.bottom_left",
"autoarray.euclid.Array2DEuclid.bottom_left",
"os.path.realpath",
"autoarray.euclid.Array2DEuclid.top_left",
"autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id",... | [((97, 123), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (113, 123), False, 'import os\n'), ((293, 354), 'autoarray.euclid.Array2DEuclid.top_left', 'aa.euclid.Array2DEuclid.top_left', ([], {'array_electrons': 'euclid_data'}), '(array_electrons=euclid_data)\n', (325, 354), True, 'import autoarray as aa\n'), ((511, 573), 'autoarray.euclid.Array2DEuclid.top_right', 'aa.euclid.Array2DEuclid.top_right', ([], {'array_electrons': 'euclid_data'}), '(array_electrons=euclid_data)\n', (544, 573), True, 'import autoarray as aa\n'), ((730, 794), 'autoarray.euclid.Array2DEuclid.bottom_left', 'aa.euclid.Array2DEuclid.bottom_left', ([], {'array_electrons': 'euclid_data'}), '(array_electrons=euclid_data)\n', (765, 794), True, 'import autoarray as aa\n'), ((951, 1016), 'autoarray.euclid.Array2DEuclid.bottom_right', 'aa.euclid.Array2DEuclid.bottom_right', ([], {'array_electrons': 'euclid_data'}), '(array_electrons=euclid_data)\n', (987, 1016), True, 'import autoarray as aa\n'), ((1310, 1461), 'autoarray.euclid.Layout2DEuclid.top_left', 'aa.euclid.Layout2DEuclid.top_left', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(51)', 'serial_overscan_size': '(29)', 'parallel_overscan_size': '(20)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=51, serial_overscan_size=29, parallel_overscan_size=20)\n', (1343, 1461), True, 'import autoarray as aa\n'), ((1848, 1999), 'autoarray.euclid.Layout2DEuclid.top_left', 'aa.euclid.Layout2DEuclid.top_left', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(41)', 'serial_overscan_size': '(10)', 'parallel_overscan_size': '(15)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=41, serial_overscan_size=10, parallel_overscan_size=15)\n', (1881, 1999), True, 'import autoarray as aa\n'), ((2386, 2538), 'autoarray.euclid.Layout2DEuclid.top_right', 'aa.euclid.Layout2DEuclid.top_right', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(51)', 'serial_overscan_size': '(29)', 'parallel_overscan_size': '(20)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=51, serial_overscan_size=29, parallel_overscan_size=20)\n', (2420, 2538), True, 'import autoarray as aa\n'), ((2925, 3077), 'autoarray.euclid.Layout2DEuclid.top_right', 'aa.euclid.Layout2DEuclid.top_right', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(41)', 'serial_overscan_size': '(10)', 'parallel_overscan_size': '(15)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=41, serial_overscan_size=10, parallel_overscan_size=15)\n', (2959, 3077), True, 'import autoarray as aa\n'), ((3464, 3618), 'autoarray.euclid.Layout2DEuclid.bottom_left', 'aa.euclid.Layout2DEuclid.bottom_left', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(51)', 'serial_overscan_size': '(29)', 'parallel_overscan_size': '(20)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=51, serial_overscan_size=29, parallel_overscan_size=20)\n', (3500, 3618), True, 'import autoarray as aa\n'), ((4004, 4158), 'autoarray.euclid.Layout2DEuclid.bottom_left', 'aa.euclid.Layout2DEuclid.bottom_left', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(41)', 'serial_overscan_size': '(10)', 'parallel_overscan_size': '(15)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=41, serial_overscan_size=10, parallel_overscan_size=15)\n', (4040, 4158), True, 'import autoarray as aa\n'), ((4544, 4699), 'autoarray.euclid.Layout2DEuclid.bottom_right', 'aa.euclid.Layout2DEuclid.bottom_right', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(51)', 'serial_overscan_size': '(29)', 'parallel_overscan_size': '(20)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=51, serial_overscan_size=29, parallel_overscan_size=20)\n', (4581, 4699), True, 'import autoarray as aa\n'), ((5085, 5240), 'autoarray.euclid.Layout2DEuclid.bottom_right', 'aa.euclid.Layout2DEuclid.bottom_right', ([], {'parallel_size': '(2086)', 'serial_size': '(2128)', 'serial_prescan_size': '(41)', 'serial_overscan_size': '(10)', 'parallel_overscan_size': '(15)'}), '(parallel_size=2086, serial_size=2128,\n serial_prescan_size=41, serial_overscan_size=10, parallel_overscan_size=15)\n', (5122, 5240), True, 'import autoarray as aa\n'), ((5709, 5795), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text1"""', 'quadrant_id': '"""E"""'}), "(ccd_id='text1',\n quadrant_id='E')\n", (5758, 5795), True, 'import autoarray as aa\n'), ((5891, 5977), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text2"""', 'quadrant_id': '"""E"""'}), "(ccd_id='text2',\n quadrant_id='E')\n", (5940, 5977), True, 'import autoarray as aa\n'), ((6073, 6159), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text3"""', 'quadrant_id': '"""E"""'}), "(ccd_id='text3',\n quadrant_id='E')\n", (6122, 6159), True, 'import autoarray as aa\n'), ((6255, 6341), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text1"""', 'quadrant_id': '"""F"""'}), "(ccd_id='text1',\n quadrant_id='F')\n", (6304, 6341), True, 'import autoarray as aa\n'), ((6437, 6523), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text2"""', 'quadrant_id': '"""F"""'}), "(ccd_id='text2',\n quadrant_id='F')\n", (6486, 6523), True, 'import autoarray as aa\n'), ((6619, 6705), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text3"""', 'quadrant_id': '"""F"""'}), "(ccd_id='text3',\n quadrant_id='F')\n", (6668, 6705), True, 'import autoarray as aa\n'), ((6801, 6887), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text1"""', 'quadrant_id': '"""G"""'}), "(ccd_id='text1',\n quadrant_id='G')\n", (6850, 6887), True, 'import autoarray as aa\n'), ((6983, 7069), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text2"""', 'quadrant_id': '"""G"""'}), "(ccd_id='text2',\n quadrant_id='G')\n", (7032, 7069), True, 'import autoarray as aa\n'), ((7165, 7251), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text3"""', 'quadrant_id': '"""G"""'}), "(ccd_id='text3',\n quadrant_id='G')\n", (7214, 7251), True, 'import autoarray as aa\n'), ((7347, 7433), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text1"""', 'quadrant_id': '"""H"""'}), "(ccd_id='text1',\n quadrant_id='H')\n", (7396, 7433), True, 'import autoarray as aa\n'), ((7529, 7615), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text2"""', 'quadrant_id': '"""H"""'}), "(ccd_id='text2',\n quadrant_id='H')\n", (7578, 7615), True, 'import autoarray as aa\n'), ((7711, 7797), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text3"""', 'quadrant_id': '"""H"""'}), "(ccd_id='text3',\n quadrant_id='H')\n", (7760, 7797), True, 'import autoarray as aa\n'), ((7975, 8061), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text4"""', 'quadrant_id': '"""E"""'}), "(ccd_id='text4',\n quadrant_id='E')\n", (8024, 8061), True, 'import autoarray as aa\n'), ((8157, 8243), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text5"""', 'quadrant_id': '"""E"""'}), "(ccd_id='text5',\n quadrant_id='E')\n", (8206, 8243), True, 'import autoarray as aa\n'), ((8339, 8425), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text6"""', 'quadrant_id': '"""E"""'}), "(ccd_id='text6',\n quadrant_id='E')\n", (8388, 8425), True, 'import autoarray as aa\n'), ((8521, 8607), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text4"""', 'quadrant_id': '"""F"""'}), "(ccd_id='text4',\n quadrant_id='F')\n", (8570, 8607), True, 'import autoarray as aa\n'), ((8703, 8789), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text5"""', 'quadrant_id': '"""F"""'}), "(ccd_id='text5',\n quadrant_id='F')\n", (8752, 8789), True, 'import autoarray as aa\n'), ((8885, 8971), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text6"""', 'quadrant_id': '"""F"""'}), "(ccd_id='text6',\n quadrant_id='F')\n", (8934, 8971), True, 'import autoarray as aa\n'), ((9067, 9153), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text4"""', 'quadrant_id': '"""G"""'}), "(ccd_id='text4',\n quadrant_id='G')\n", (9116, 9153), True, 'import autoarray as aa\n'), ((9249, 9335), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text5"""', 'quadrant_id': '"""G"""'}), "(ccd_id='text5',\n quadrant_id='G')\n", (9298, 9335), True, 'import autoarray as aa\n'), ((9431, 9517), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text6"""', 'quadrant_id': '"""G"""'}), "(ccd_id='text6',\n quadrant_id='G')\n", (9480, 9517), True, 'import autoarray as aa\n'), ((9613, 9699), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text4"""', 'quadrant_id': '"""H"""'}), "(ccd_id='text4',\n quadrant_id='H')\n", (9662, 9699), True, 'import autoarray as aa\n'), ((9795, 9881), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text5"""', 'quadrant_id': '"""H"""'}), "(ccd_id='text5',\n quadrant_id='H')\n", (9844, 9881), True, 'import autoarray as aa\n'), ((9977, 10063), 'autoarray.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', 'aa.euclid.Layout2DEuclid.from_ccd_and_quadrant_id', ([], {'ccd_id': '"""text6"""', 'quadrant_id': '"""H"""'}), "(ccd_id='text6',\n quadrant_id='H')\n", (10026, 10063), True, 'import autoarray as aa\n'), ((455, 477), 'numpy.zeros', 'np.zeros', (['(2086, 2128)'], {}), '((2086, 2128))\n', (463, 477), True, 'import numpy as np\n'), ((674, 696), 'numpy.zeros', 'np.zeros', (['(2086, 2128)'], {}), '((2086, 2128))\n', (682, 696), True, 'import numpy as np\n'), ((895, 917), 'numpy.zeros', 'np.zeros', (['(2086, 2128)'], {}), '((2086, 2128))\n', (903, 917), True, 'import numpy as np\n'), ((1117, 1139), 'numpy.zeros', 'np.zeros', (['(2086, 2128)'], {}), '((2086, 2128))\n', (1125, 1139), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.