code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#
# Plasma
# Copyright (c) 2021 <NAME>.
#
from imageio import imwrite
from numpy import linspace, tile, uint16
from pytest import fixture, mark
from .common import tensorread, tensorwrite
from torchplasma.curves import discrete_curve_1d, discrete_curve_3d, cuberead, lutread
IMAGE_PATHS = [
"test/media/filter/1.jpg",
"test/media/filter/2.jpg",
"test/media/filter/3.jpg",
"test/media/filter/4.jpg",
"test/media/filter/10.jpg",
]
def test_create_identity_lut ():
lut = linspace(0., 1., num=4096)
lut = tile(lut, (16, 1))
lut = (lut * 65535).astype(uint16)
imwrite("identity.tif", lut)
@mark.parametrize("image_path", IMAGE_PATHS)
def test_lut (image_path):
image = tensorread(image_path)
lut = lutread("test/media/lut/ramp.tif")
result = discrete_curve_1d(image, lut)
tensorwrite("lut.jpg", result)
@mark.parametrize("image_path", IMAGE_PATHS)
def test_load_cube (image_path):
image = tensorread(image_path)
cube = cuberead("test/media/lut/identity.cube")
result = discrete_curve_3d(image, cube)
tensorwrite("cube.jpg", result)
| [
"torchplasma.curves.discrete_curve_3d",
"numpy.tile",
"torchplasma.curves.cuberead",
"imageio.imwrite",
"pytest.mark.parametrize",
"numpy.linspace",
"torchplasma.curves.lutread",
"torchplasma.curves.discrete_curve_1d"
] | [((631, 674), 'pytest.mark.parametrize', 'mark.parametrize', (['"""image_path"""', 'IMAGE_PATHS'], {}), "('image_path', IMAGE_PATHS)\n", (647, 674), False, 'from pytest import fixture, mark\n'), ((862, 905), 'pytest.mark.parametrize', 'mark.parametrize', (['"""image_path"""', 'IMAGE_PATHS'], {}), "('image_path', IMAGE_PATHS)\n", (878, 905), False, 'from pytest import fixture, mark\n'), ((501, 529), 'numpy.linspace', 'linspace', (['(0.0)', '(1.0)'], {'num': '(4096)'}), '(0.0, 1.0, num=4096)\n', (509, 529), False, 'from numpy import linspace, tile, uint16\n'), ((538, 556), 'numpy.tile', 'tile', (['lut', '(16, 1)'], {}), '(lut, (16, 1))\n', (542, 556), False, 'from numpy import linspace, tile, uint16\n'), ((600, 628), 'imageio.imwrite', 'imwrite', (['"""identity.tif"""', 'lut'], {}), "('identity.tif', lut)\n", (607, 628), False, 'from imageio import imwrite\n'), ((747, 781), 'torchplasma.curves.lutread', 'lutread', (['"""test/media/lut/ramp.tif"""'], {}), "('test/media/lut/ramp.tif')\n", (754, 781), False, 'from torchplasma.curves import discrete_curve_1d, discrete_curve_3d, cuberead, lutread\n'), ((795, 824), 'torchplasma.curves.discrete_curve_1d', 'discrete_curve_1d', (['image', 'lut'], {}), '(image, lut)\n', (812, 824), False, 'from torchplasma.curves import discrete_curve_1d, discrete_curve_3d, cuberead, lutread\n'), ((985, 1025), 'torchplasma.curves.cuberead', 'cuberead', (['"""test/media/lut/identity.cube"""'], {}), "('test/media/lut/identity.cube')\n", (993, 1025), False, 'from torchplasma.curves import discrete_curve_1d, discrete_curve_3d, cuberead, lutread\n'), ((1039, 1069), 'torchplasma.curves.discrete_curve_3d', 'discrete_curve_3d', (['image', 'cube'], {}), '(image, cube)\n', (1056, 1069), False, 'from torchplasma.curves import discrete_curve_1d, discrete_curve_3d, cuberead, lutread\n')] |
# Copyright 2016 the gpflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from __future__ import print_function
import unittest
import gpflow
import numpy as np
import tensorflow as tf
from testing.gpflow_testcase import GPflowTestCase
class PriorModeTests(GPflowTestCase):
"""
these tests optimize the prior to find the mode numerically. Make sure the
mode is the same as the known mode.
"""
def setUp(self):
class FlatModel(gpflow.model.Model):
def build_likelihood(self):
return 0
self.m = FlatModel()
def testGaussianMode(self):
with self.test_session():
self.m.x = gpflow.param.Param(1.0)
self.m.x.prior = gpflow.priors.Gaussian(3, 1)
self.m.optimize(disp=0)
xmax = self.m.get_free_state()
self.assertTrue(np.allclose(xmax, 3))
def testGaussianModeMatrix(self):
with self.test_session():
self.m.x = gpflow.param.Param(np.random.randn(4, 4))
self.m.x.prior = gpflow.priors.Gaussian(-1, 10)
self.m.optimize(disp=0)
xmax = self.m.get_free_state()
self.assertTrue(np.allclose(xmax, -1))
def testGammaMode(self):
with self.test_session():
self.m.x = gpflow.param.Param(1.0)
shape, scale = 4., 5.
self.m.x.prior = gpflow.priors.Gamma(shape, scale)
self.m.optimize(disp=0)
true_mode = (shape - 1.) * scale
self.assertTrue(np.allclose(self.m.x.value, true_mode, 1e-3))
def testLaplaceMode(self):
with self.test_session():
self.m.x = gpflow.param.Param(1.0)
self.m.x.prior = gpflow.priors.Laplace(3, 10)
self.m.optimize(disp=0)
xmax = self.m.get_free_state()
self.assertTrue(np.allclose(xmax, 3))
def testLogNormalMode(self):
with self.test_session():
self.m.x = gpflow.param.Param(1.0)
self.m.x.prior = gpflow.priors.LogNormal(3, 10)
self.m.x.transform = gpflow.transforms.Exp()
self.m.optimize(disp=0)
xmax = self.m.get_free_state()
self.assertTrue(np.allclose(xmax, 3))
def testBetaMode(self):
self.m.x = gpflow.param.Param(0.1)
self.m.x.prior = gpflow.priors.Beta(3., 3.)
self.m.x.transform = gpflow.transforms.Logistic()
self.m.optimize(disp=0, tol=1e-8)
xmax = self.m.get_free_state()
self.assertTrue(np.allclose(0.0, xmax))
def testUniform(self):
with self.test_session():
self.m.x = gpflow.param.Param(1.0)
self.m.x.prior = gpflow.priors.Uniform(-2, 3)
self.m.x.transform = gpflow.transforms.Logistic(-2, 3)
self.m.set_state(np.random.randn(1))
p1 = self.m.compute_log_prior()
self.m.set_state(np.random.randn(1))
p2 = self.m.compute_log_prior()
self.assertFalse(p1 == p2) # prior should no be the same because a transfomration has been applied.
if __name__ == "__main__":
unittest.main()
| [
"gpflow.transforms.Exp",
"gpflow.priors.LogNormal",
"numpy.allclose",
"gpflow.priors.Uniform",
"gpflow.priors.Laplace",
"numpy.random.randn",
"gpflow.priors.Gaussian",
"gpflow.transforms.Logistic",
"gpflow.param.Param",
"gpflow.priors.Beta",
"unittest.main",
"gpflow.priors.Gamma"
] | [((3615, 3630), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3628, 3630), False, 'import unittest\n'), ((2783, 2806), 'gpflow.param.Param', 'gpflow.param.Param', (['(0.1)'], {}), '(0.1)\n', (2801, 2806), False, 'import gpflow\n'), ((2832, 2860), 'gpflow.priors.Beta', 'gpflow.priors.Beta', (['(3.0)', '(3.0)'], {}), '(3.0, 3.0)\n', (2850, 2860), False, 'import gpflow\n'), ((2888, 2916), 'gpflow.transforms.Logistic', 'gpflow.transforms.Logistic', ([], {}), '()\n', (2914, 2916), False, 'import gpflow\n'), ((1167, 1190), 'gpflow.param.Param', 'gpflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (1185, 1190), False, 'import gpflow\n'), ((1220, 1248), 'gpflow.priors.Gaussian', 'gpflow.priors.Gaussian', (['(3)', '(1)'], {}), '(3, 1)\n', (1242, 1248), False, 'import gpflow\n'), ((1546, 1576), 'gpflow.priors.Gaussian', 'gpflow.priors.Gaussian', (['(-1)', '(10)'], {}), '(-1, 10)\n', (1568, 1576), False, 'import gpflow\n'), ((1795, 1818), 'gpflow.param.Param', 'gpflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (1813, 1818), False, 'import gpflow\n'), ((1882, 1915), 'gpflow.priors.Gamma', 'gpflow.priors.Gamma', (['shape', 'scale'], {}), '(shape, scale)\n', (1901, 1915), False, 'import gpflow\n'), ((2161, 2184), 'gpflow.param.Param', 'gpflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (2179, 2184), False, 'import gpflow\n'), ((2214, 2242), 'gpflow.priors.Laplace', 'gpflow.priors.Laplace', (['(3)', '(10)'], {}), '(3, 10)\n', (2235, 2242), False, 'import gpflow\n'), ((2464, 2487), 'gpflow.param.Param', 'gpflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (2482, 2487), False, 'import gpflow\n'), ((2517, 2547), 'gpflow.priors.LogNormal', 'gpflow.priors.LogNormal', (['(3)', '(10)'], {}), '(3, 10)\n', (2540, 2547), False, 'import gpflow\n'), ((2581, 2604), 'gpflow.transforms.Exp', 'gpflow.transforms.Exp', ([], {}), '()\n', (2602, 2604), False, 'import gpflow\n'), ((3024, 3046), 'numpy.allclose', 'np.allclose', (['(0.0)', 'xmax'], {}), '(0.0, xmax)\n', (3035, 3046), True, 'import numpy as np\n'), ((3133, 3156), 'gpflow.param.Param', 'gpflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (3151, 3156), False, 'import gpflow\n'), ((3186, 3214), 'gpflow.priors.Uniform', 'gpflow.priors.Uniform', (['(-2)', '(3)'], {}), '(-2, 3)\n', (3207, 3214), False, 'import gpflow\n'), ((3248, 3281), 'gpflow.transforms.Logistic', 'gpflow.transforms.Logistic', (['(-2)', '(3)'], {}), '(-2, 3)\n', (3274, 3281), False, 'import gpflow\n'), ((1357, 1377), 'numpy.allclose', 'np.allclose', (['xmax', '(3)'], {}), '(xmax, 3)\n', (1368, 1377), True, 'import numpy as np\n'), ((1494, 1515), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (1509, 1515), True, 'import numpy as np\n'), ((1685, 1706), 'numpy.allclose', 'np.allclose', (['xmax', '(-1)'], {}), '(xmax, -1)\n', (1696, 1706), True, 'import numpy as np\n'), ((2026, 2071), 'numpy.allclose', 'np.allclose', (['self.m.x.value', 'true_mode', '(0.001)'], {}), '(self.m.x.value, true_mode, 0.001)\n', (2037, 2071), True, 'import numpy as np\n'), ((2351, 2371), 'numpy.allclose', 'np.allclose', (['xmax', '(3)'], {}), '(xmax, 3)\n', (2362, 2371), True, 'import numpy as np\n'), ((2713, 2733), 'numpy.allclose', 'np.allclose', (['xmax', '(3)'], {}), '(xmax, 3)\n', (2724, 2733), True, 'import numpy as np\n'), ((3312, 3330), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (3327, 3330), True, 'import numpy as np\n'), ((3405, 3423), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (3420, 3423), True, 'import numpy as np\n')] |
'''
init: Jan2019
Author: LAZ
Goals:
- Test OO's implementation of REINFORCE
His claim of testing his own algorithm is a LIE! tsk tsk
- Hey! depends on how you define "testing"...
- Test of running performance passe; this was true of the original code
(see mingame-explore-v2.ipynb)
- Test of learning performance actually fails... the RF trained net performs worse after training
- Performance metric: how long pole stays up (episode length)
- (see test_RF.ipynb)
'''
# from tensorflow.python import debug as tf_debug # debug
import importlib # debug
import itertools
import gym
import minority_agent
import numpy as np
import tensorflow as tf
importlib.reload(minority_agent) # debug
tf.reset_default_graph()
env = gym.make('CartPole-v0')
learning_rate = 0.01
num_episodes = 500
rollout = [[] for i in range(4)]
# rollout is [states | actions | rewards | next_states]
episode_rewards = np.zeros(num_episodes)
episode_lengths = np.zeros(num_episodes)
# sess = tf.Session()
REINFORCE = minority_agent.REINFORCE_MG(name='Tester',
s_size=env.observation_space.shape[0],
a_size=1,
trainer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate),
)
sess = tf.Session(graph=REINFORCE.graph)
# sess = tf_debug.LocalCLIDebugWrapperSession(sess) # debug
REINFORCE.init_graph(sess)
# sess.run(tf.global_variables_initializer())
# sess.run(REINFORCE.init_var())
def process_state(state):
# helperfunction to make state the correct dims for tensorflow
# (4,) -> (1, 4)
return np.expand_dims(state, 0)
for i_episode in range(num_episodes):
state = env.reset()
episode = []
# One step in the environment
for t in itertools.count():
state = process_state(state)
# Take a step
# tensor_states = tf.get_variable('Tester/states:0')
# tensor_actions = tf.get_variable('Tester/output_action:0')
# action = sess.run(tensor_actions, feed_dict={tensor_states: state})
# action = sess.run(REINFORCE.a, feed_dict={REINFORCE.states: state})
action = REINFORCE.generate_action(sess, state)
next_state, reward, done, _ = env.step(np.squeeze(action))
# Keep track of the transition
rollout[0].append(state)
rollout[1].append(action)
rollout[2].append(reward)
rollout[3].append(next_state)
# Update statistics
episode_rewards[i_episode] += reward
episode_lengths[i_episode] = t
# Print out which step we're on, useful for debugging.
print("\rStep {} @ Episode {}/{} ({})".format(
t, i_episode + 1, num_episodes, episode_rewards[i_episode - 1]), end="")
# sys.stdout.flush()
if done:
break
state = next_state
# Go through the episode and make policy updates
REINFORCE.train(rollout, sess, 1.0)
# Now test it!
print('Testing...')
state = env.reset()
done = False
rewards = []
while done is False:
# env.render() # cannot use this on OSX because OpenAI GYM causes segfaults
state = process_state(state)
action = REINFORCE.generate_action(sess, state)
next_state, reward, done, _ = env.step(np.squeeze(action))
rewards.append(reward)
state = next_state
assert sum(rewards) == len(rewards), "Test Failed!"
print("Test Succeeded!")
| [
"tensorflow.reset_default_graph",
"tensorflow.Session",
"numpy.squeeze",
"tensorflow.train.GradientDescentOptimizer",
"numpy.zeros",
"itertools.count",
"importlib.reload",
"numpy.expand_dims",
"gym.make"
] | [((717, 749), 'importlib.reload', 'importlib.reload', (['minority_agent'], {}), '(minority_agent)\n', (733, 749), False, 'import importlib\n'), ((759, 783), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (781, 783), True, 'import tensorflow as tf\n'), ((790, 813), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (798, 813), False, 'import gym\n'), ((963, 985), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (971, 985), True, 'import numpy as np\n'), ((1004, 1026), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (1012, 1026), True, 'import numpy as np\n'), ((1395, 1428), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'REINFORCE.graph'}), '(graph=REINFORCE.graph)\n', (1405, 1428), True, 'import tensorflow as tf\n'), ((1723, 1747), 'numpy.expand_dims', 'np.expand_dims', (['state', '(0)'], {}), '(state, 0)\n', (1737, 1747), True, 'import numpy as np\n'), ((1877, 1894), 'itertools.count', 'itertools.count', ([], {}), '()\n', (1892, 1894), False, 'import itertools\n'), ((1282, 1344), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1315, 1344), True, 'import tensorflow as tf\n'), ((3358, 3376), 'numpy.squeeze', 'np.squeeze', (['action'], {}), '(action)\n', (3368, 3376), True, 'import numpy as np\n'), ((2344, 2362), 'numpy.squeeze', 'np.squeeze', (['action'], {}), '(action)\n', (2354, 2362), True, 'import numpy as np\n')] |
from subprocess import check_call
import os
import shutil as sh
from glob import glob
import nbformat as nbf
from nbclean import NotebookCleaner
from tqdm import tqdm
import numpy as np
SITE_ROOT = os.path.expanduser('~/github/forks/python/teaching/dsep/jupyterhub-for-education-template')
SITE_NAVIGATION = os.path.join(SITE_ROOT, '_data', 'navigation.yml')
TEMPLATE_PATH = os.path.expanduser('~/github/forks/python/teaching/dsep/jupyterhub-for-education-template/assets/templates/jekyllmd.tpl')
TEXTBOOK_FOLDER_NAME = 'textbook'
NOTEBOOKS_FOLDER_NAME = 'notebooks'
TEXTBOOK_FOLDER = os.path.join(SITE_ROOT, TEXTBOOK_FOLDER_NAME)
NOTEBOOKS_FOLDER = os.path.join(SITE_ROOT, NOTEBOOKS_FOLDER_NAME)
IMAGES_FOLDER = os.path.join(SITE_ROOT, 'images')
MARKDOWN_FILE = os.path.join(SITE_ROOT, 'SUMMARY.md')
def _markdown_to_files(path_markdown, indent=2):
"""Takes a markdown file containing chapters/sub-headings and
converts it to a file structure we can use to build a side bar."""
with open(path_markdown, 'r') as ff:
lines = ff.readlines()
files = []
for line in lines:
if line.strip().startswith('* '):
title = _between_symbols(line, '[', ']')
link = _between_symbols(line, '(', ')')
spaces = len(line) - len(line.lstrip(' '))
level = spaces / indent
files.append((title, link, level))
return files
def _between_symbols(string, c1, c2):
"""Will return empty string if nothing is between c1 and c2."""
for char in [c1, c2]:
if char not in string:
raise ValueError("Couldn't find charachter {} in string {}".format(
char, string))
return string[string.index(c1)+1:string.index(c2)]
def _clean_notebook(notebook):
cleaner = NotebookCleaner(notebook)
cleaner.remove_cells(empty=True)
cleaner.remove_cells(search_text="# HIDDEN")
cleaner.clear('stderr')
cleaner.save(notebook)
return notebook
if __name__ == '__main__':
# --- Collect the files we'll convert over ---
files = _markdown_to_files(MARKDOWN_FILE)
for ix_file, (title, link, level) in tqdm(list(enumerate(files))):
if len(link) == 0:
continue
if not os.path.exists(link):
raise ValueError("Could not find file {}".format(link))
# Collecting and renaming files/folders
filename = os.path.basename(link)
new_folder = os.path.dirname(link).replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME)
new_file_path = os.path.join(new_folder, filename.replace('.ipynb', '.md'))
# Collect previous/next md file for pagination
if ix_file == 0:
prev_file_link = ''
prev_file_title = ''
else:
prev_file_title, prev_file_link, _ = files[ix_file-1]
prev_file_link = prev_file_link.replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME).replace('.ipynb', '')
if ix_file == len(files) - 1:
next_file_link = ''
next_file_title = ''
else:
next_file_title, next_file_link, _ = files[ix_file+1]
next_file_link = next_file_link.replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME).replace('.ipynb', '')
if not os.path.isdir(new_folder):
os.makedirs(new_folder)
# Create a temporary version of the notebook we can modify
tmp_notebook = link + '_TMP'
sh.copy2(link, tmp_notebook)
# Clean up the file before converting
_clean_notebook(tmp_notebook)
# Run nbconvert moving it to the output folder
build_call = '--FilesWriter.build_directory={}'.format(new_folder)
images_call = '--NbConvertApp.output_files_dir={}'.format(
os.path.join(IMAGES_FOLDER, new_folder))
check_call(['jupyter', 'nbconvert', '--log-level="CRITICAL"',
'--to', 'markdown', '--template', TEMPLATE_PATH,
images_call, build_call, tmp_notebook])
# Images: replace relative image paths to baseurl paths
IMG_STRINGS = [ii*'../' + IMAGES_FOLDER for ii in range(4)]
with open(new_file_path, 'r') as ff:
lines = ff.readlines()
for ii, line in enumerate(lines):
for IMG_STRING in IMG_STRINGS:
line = line.replace(IMG_STRING, '{{ site.baseurl }}/images')
lines[ii] = line
# Front-matter YAML
yaml = []
yaml += ['---']
yaml += ['layout: textbook']
yaml += ['interact_link: {}'.format(link.lstrip('./'))]
yaml += ['previous:']
yaml += [' url: {}'.format(prev_file_link.lstrip('.'))]
yaml += [' title: {}'.format(prev_file_title)]
yaml += ['next:']
yaml += [' url: {}'.format(next_file_link.lstrip('.'))]
yaml += [' title: {}'.format(next_file_title)]
yaml += ['sidebar:']
yaml += [' nav: sidebar-textbook']
yaml += ['---']
yaml = [ii + '\n' for ii in yaml]
lines = yaml + lines
# Add an extra slash to the inline math before `#` since Jekyll strips it
inline_replace_chars = ['#']
for ii, line in enumerate(lines):
dollars = np.where(['$' == char for char in line])[0]
# Make sure we have at least two dollar signs and they
# Aren't right next to each other
if len(dollars) > 2 and all(ii > 1 for ii in (dollars[1:] - dollars[:1])):
for char in inline_replace_chars:
lines[ii] = line.replace('\\#', '\\\\#')
# Write the result
with open(new_file_path, 'w') as ff:
ff.writelines(lines)
os.remove(tmp_notebook)
# Generate sidebar
sidebar_text = ['sidebar-textbook:']
sp = ' '
chapter_ix = 1
for ix_file, (title, link, level) in tqdm(list(enumerate(files))):
if level > 0 and len(link) == 0:
continue
if level == 0:
title = '{}. {}'.format(chapter_ix, title)
chapter_ix += 1
new_link = link.replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME).replace('.ipynb', '').strip('.')
space = ' ' if level == 0 else ' '
level = int(level)
sidebar_text.append(space + '- title: {}'.format(title))
sidebar_text.append(space + ' class: level_{}'.format(level))
if len(link) > 0:
sidebar_text.append(space + ' url: {}'.format(new_link))
if ix_file != (len(files) - 1) and level < files[ix_file + 1][-1]:
sidebar_text.append(space + ' children:')
sidebar_text = [ii + '\n' for ii in sidebar_text]
with open(SITE_NAVIGATION, 'r') as ff:
lines = ff.readlines()
text_start = np.where(['# --- Textbook sidebar ---' in line for line in lines])[0][0]
lines = lines[:text_start+1]
lines += sidebar_text
with open(SITE_NAVIGATION, 'w') as ff:
ff.writelines(lines)
print('Done!')
| [
"os.path.exists",
"nbclean.NotebookCleaner",
"os.makedirs",
"shutil.copy2",
"subprocess.check_call",
"numpy.where",
"os.path.join",
"os.path.dirname",
"os.path.isdir",
"os.path.basename",
"os.path.expanduser",
"os.remove"
] | [((199, 295), 'os.path.expanduser', 'os.path.expanduser', (['"""~/github/forks/python/teaching/dsep/jupyterhub-for-education-template"""'], {}), "(\n '~/github/forks/python/teaching/dsep/jupyterhub-for-education-template')\n", (217, 295), False, 'import os\n'), ((309, 359), 'os.path.join', 'os.path.join', (['SITE_ROOT', '"""_data"""', '"""navigation.yml"""'], {}), "(SITE_ROOT, '_data', 'navigation.yml')\n", (321, 359), False, 'import os\n'), ((376, 507), 'os.path.expanduser', 'os.path.expanduser', (['"""~/github/forks/python/teaching/dsep/jupyterhub-for-education-template/assets/templates/jekyllmd.tpl"""'], {}), "(\n '~/github/forks/python/teaching/dsep/jupyterhub-for-education-template/assets/templates/jekyllmd.tpl'\n )\n", (394, 507), False, 'import os\n'), ((586, 631), 'os.path.join', 'os.path.join', (['SITE_ROOT', 'TEXTBOOK_FOLDER_NAME'], {}), '(SITE_ROOT, TEXTBOOK_FOLDER_NAME)\n', (598, 631), False, 'import os\n'), ((651, 697), 'os.path.join', 'os.path.join', (['SITE_ROOT', 'NOTEBOOKS_FOLDER_NAME'], {}), '(SITE_ROOT, NOTEBOOKS_FOLDER_NAME)\n', (663, 697), False, 'import os\n'), ((714, 747), 'os.path.join', 'os.path.join', (['SITE_ROOT', '"""images"""'], {}), "(SITE_ROOT, 'images')\n", (726, 747), False, 'import os\n'), ((764, 801), 'os.path.join', 'os.path.join', (['SITE_ROOT', '"""SUMMARY.md"""'], {}), "(SITE_ROOT, 'SUMMARY.md')\n", (776, 801), False, 'import os\n'), ((1782, 1807), 'nbclean.NotebookCleaner', 'NotebookCleaner', (['notebook'], {}), '(notebook)\n', (1797, 1807), False, 'from nbclean import NotebookCleaner\n'), ((2386, 2408), 'os.path.basename', 'os.path.basename', (['link'], {}), '(link)\n', (2402, 2408), False, 'import os\n'), ((3429, 3457), 'shutil.copy2', 'sh.copy2', (['link', 'tmp_notebook'], {}), '(link, tmp_notebook)\n', (3437, 3457), True, 'import shutil as sh\n'), ((3802, 3960), 'subprocess.check_call', 'check_call', (['[\'jupyter\', \'nbconvert\', \'--log-level="CRITICAL"\', \'--to\', \'markdown\',\n \'--template\', TEMPLATE_PATH, images_call, build_call, tmp_notebook]'], {}), '([\'jupyter\', \'nbconvert\', \'--log-level="CRITICAL"\', \'--to\',\n \'markdown\', \'--template\', TEMPLATE_PATH, images_call, build_call,\n tmp_notebook])\n', (3812, 3960), False, 'from subprocess import check_call\n'), ((5689, 5712), 'os.remove', 'os.remove', (['tmp_notebook'], {}), '(tmp_notebook)\n', (5698, 5712), False, 'import os\n'), ((2228, 2248), 'os.path.exists', 'os.path.exists', (['link'], {}), '(link)\n', (2242, 2248), False, 'import os\n'), ((3253, 3278), 'os.path.isdir', 'os.path.isdir', (['new_folder'], {}), '(new_folder)\n', (3266, 3278), False, 'import os\n'), ((3292, 3315), 'os.makedirs', 'os.makedirs', (['new_folder'], {}), '(new_folder)\n', (3303, 3315), False, 'import os\n'), ((3753, 3792), 'os.path.join', 'os.path.join', (['IMAGES_FOLDER', 'new_folder'], {}), '(IMAGES_FOLDER, new_folder)\n', (3765, 3792), False, 'import os\n'), ((6743, 6811), 'numpy.where', 'np.where', (["[('# --- Textbook sidebar ---' in line) for line in lines]"], {}), "([('# --- Textbook sidebar ---' in line) for line in lines])\n", (6751, 6811), True, 'import numpy as np\n'), ((2430, 2451), 'os.path.dirname', 'os.path.dirname', (['link'], {}), '(link)\n', (2445, 2451), False, 'import os\n'), ((5219, 5261), 'numpy.where', 'np.where', (["[('$' == char) for char in line]"], {}), "([('$' == char) for char in line])\n", (5227, 5261), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 20 11:37:23 2017
@author: rjackson
"""
from matplotlib import use
use('agg')
import pyart
from netCDF4 import Dataset
import numpy as np
import math
import os.path
from glob import glob
from datetime import datetime, timedelta
from dask import bag as db
from distributed import Client, LocalCluster
map_path = '100km_cfradial_to_scrib_mapping.nc'
echo_top_data_path = '/lcrc/group/earthscience/rjackson/echo_tops/'
century_weight_file = '100km_weights.nc'
out_path = '/lcrc/group/earthscience/rjackson/echo_top_rrm_grid/'
def convert_echo_tops(echo_top_data_file):
echo_top_dataset = Dataset(echo_top_data_file)
basetime = echo_top_dataset['time'].units
basetime = datetime.strptime(basetime, 'seconds since %Y-%m-%d %H:%M:%S')
the_shape = echo_top_dataset['cpol_T'][:].shape
for i in range(the_shape[0]):
ctop = echo_top_dataset['cpol_T'][i]
time_after = timedelta(seconds=float(echo_top_dataset['time'][i])) + basetime
out_file_name = (out_path + 'cloudtop_twprrm' +
time_after.strftime('%Y%m%d.%H%M%S') + '.nc')
if(not os.path.isfile(out_file_name)):
print('Processing scan ' + str(i))
ctop_dest = np.nan*np.zeros(dest_centers_lat.shape)
count = np.zeros(dest_centers_lat.shape)
equivalent_point = [np.where(points == col[i]-1) for i in range(0, len(S))]
for i in range(0, len(S)):
if(len(equivalent_point[i][0]) == 1):
ctop_src = ctop[equivalent_point[i][0], equivalent_point[i][1]]
if(ctop_src > -999.0):
if(not np.isfinite(ctop_dest[row[i]-1])):
ctop_dest[row[i]-1] = 0
ctop_dest[row[i]-1] = ctop_dest[row[i]-1] + S[i]*ctop_src
count[row[i]-1] += 1
ctop_dest[frac_b != 0] = ctop_dest[frac_b != 0]/frac_b[frac_b != 0]
ctop_dest[count < 50] = np.nan
# Create an output SCRIP file
new_dataset = Dataset(out_file_name, mode='w')
new_dataset.createDimension('grid_size', grid_size)
new_dataset.createDimension('grid_corners', num_corners)
new_dataset.createDimension('grid_rank', grid_rank)
radar_est_cloud_top = new_dataset.createVariable('radar_est_cloud_top',
ctop_dest.dtype,
'grid_size')
radar_est_cloud_top.units = 'm'
radar_est_cloud_top.long_name = 'Radar estimated cloud top height'
radar_est_cloud_top[:] = ctop_dest
grid_center_lat = new_dataset.createVariable('grid_center_lat',
dest_corners_lat.dtype,
'grid_size')
grid_center_lat.units = 'degrees'
grid_center_lat[:] = dest_centers_lat
grid_center_lon = new_dataset.createVariable('grid_center_lon',
dest_corners_lat.dtype,
'grid_size')
grid_center_lat.units = 'degrees'
grid_center_lon[:] = dest_centers_lon
grid_corner_lat = new_dataset.createVariable('grid_corner_lat',
dest_corners_lat.dtype,
('grid_size',
'grid_corners'),
fill_value=float(9.97e36))
grid_corner_lat.units = 'degrees'
grid_corner_lat[:] = dest_corners_lat
grid_corner_lon = new_dataset.createVariable('grid_corner_lon',
dest_corners_lat.dtype,
('grid_size',
'grid_corners'),
fill_value=float(9.97e36))
grid_corner_lon.units = 'degrees'
grid_corner_lon[:] = dest_corners_lon
grid_dims = new_dataset.createVariable('grid_dims', float, 'grid_rank')
grid_dims[:] = 1
grid_imask = new_dataset.createVariable('grid_imask',
dest_corners_lat.dtype,
'grid_size',
fill_value=float(9.97e36))
grid_imask[:] = dest_mask
new_dataset.close()
else:
print(out_file_name + ' Already exists...skipping!')
if __name__ == '__main__':
century_weights_dataset = Dataset(century_weight_file)
map_dataset = Dataset(map_path)
points = map_dataset['scrib_index'][:]
dest_corners_lat = century_weights_dataset['yv_b'][:]
dest_centers_lat = century_weights_dataset['yc_b'][:]
dest_corners_lon = century_weights_dataset['xv_b'][:]
dest_centers_lon = century_weights_dataset['xc_b'][:]
dest_mask = century_weights_dataset['mask_b'][:]
dest_area = century_weights_dataset['area_b'][:]
S = century_weights_dataset['S'][:]
row = century_weights_dataset['row'][:]
col = century_weights_dataset['col'][:]
frac_b = century_weights_dataset['frac_b'][:]
num_corners = dest_corners_lat.shape[1]
grid_size = dest_corners_lat.shape[0]
grid_rank = 1
grid_dims = 1
file_list = glob(echo_top_data_path + '*.cdf')
the_bag = db.from_sequence(file_list)
the_bag.map(convert_echo_tops).compute()
| [
"matplotlib.use",
"datetime.datetime.strptime",
"numpy.where",
"netCDF4.Dataset",
"numpy.zeros",
"numpy.isfinite",
"dask.bag.from_sequence",
"glob.glob"
] | [((137, 147), 'matplotlib.use', 'use', (['"""agg"""'], {}), "('agg')\n", (140, 147), False, 'from matplotlib import use\n'), ((661, 688), 'netCDF4.Dataset', 'Dataset', (['echo_top_data_file'], {}), '(echo_top_data_file)\n', (668, 688), False, 'from netCDF4 import Dataset\n'), ((750, 812), 'datetime.datetime.strptime', 'datetime.strptime', (['basetime', '"""seconds since %Y-%m-%d %H:%M:%S"""'], {}), "(basetime, 'seconds since %Y-%m-%d %H:%M:%S')\n", (767, 812), False, 'from datetime import datetime, timedelta\n'), ((4859, 4887), 'netCDF4.Dataset', 'Dataset', (['century_weight_file'], {}), '(century_weight_file)\n', (4866, 4887), False, 'from netCDF4 import Dataset\n'), ((4906, 4923), 'netCDF4.Dataset', 'Dataset', (['map_path'], {}), '(map_path)\n', (4913, 4923), False, 'from netCDF4 import Dataset\n'), ((5630, 5664), 'glob.glob', 'glob', (["(echo_top_data_path + '*.cdf')"], {}), "(echo_top_data_path + '*.cdf')\n", (5634, 5664), False, 'from glob import glob\n'), ((5679, 5706), 'dask.bag.from_sequence', 'db.from_sequence', (['file_list'], {}), '(file_list)\n', (5695, 5706), True, 'from dask import bag as db\n'), ((1337, 1369), 'numpy.zeros', 'np.zeros', (['dest_centers_lat.shape'], {}), '(dest_centers_lat.shape)\n', (1345, 1369), True, 'import numpy as np\n'), ((2111, 2143), 'netCDF4.Dataset', 'Dataset', (['out_file_name'], {'mode': '"""w"""'}), "(out_file_name, mode='w')\n", (2118, 2143), False, 'from netCDF4 import Dataset\n'), ((1283, 1315), 'numpy.zeros', 'np.zeros', (['dest_centers_lat.shape'], {}), '(dest_centers_lat.shape)\n', (1291, 1315), True, 'import numpy as np\n'), ((1402, 1432), 'numpy.where', 'np.where', (['(points == col[i] - 1)'], {}), '(points == col[i] - 1)\n', (1410, 1432), True, 'import numpy as np\n'), ((1709, 1743), 'numpy.isfinite', 'np.isfinite', (['ctop_dest[row[i] - 1]'], {}), '(ctop_dest[row[i] - 1])\n', (1720, 1743), True, 'import numpy as np\n')] |
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class LearningRate:
"""An exponential learning rate schedule with optional warmup"""
def __init__(self, opts, total_iterations):
self.decay = opts["lr_decay_rate"]
self.lr = (2 ** opts["base_learning_rate_exponent"]) * opts["total_batch_size"]
self.iterations_per_epoch = total_iterations / opts["epochs"]
self.freq = opts['epochs']/opts["lr_drops"]
self.drops = 0
self.warmup = opts['warmup_epochs'] if opts["epochs"] else False
if self.warmup:
self.warmup_length = self.iterations_per_epoch * opts["warmup_epochs"]
def feed_dict_lr(self, iteration):
epoch = iteration / self.iterations_per_epoch
if epoch/self.freq >= self.drops + 1:
n_drops = int(np.floor((epoch/self.freq) - self.drops))
assert n_drops > 0
self.lr *= (self.decay ** n_drops)
self.drops += n_drops
if self.warmup and iteration < self.warmup_length:
return (iteration * self.lr) / self.warmup_length
else:
return self.lr
def add_arguments(parser):
lr_group = parser.add_argument_group('Exponential Learning Rate Decay')
lr_group.add_argument('--lr-decay-rate', type=float,
help="Learning rate rate")
lr_group.add_argument('--lr-drops', type=int,
help="Number of equally spaced learning rate drops")
lr_group.add_argument('--warmup-epochs', type=int, default=5,
help="Warmup length in epochs (Default=5, set to 0 for no warmup)")
return parser
def set_defaults(opts):
opts['summary_str'] += "Exponential LR schedule\n"
if opts["warmup_epochs"] > 0:
opts['summary_str'] += " Warmup: {} epochs\n".format('{warmup_epochs}')
else:
opts['summary_str'] += " No warmup\n"
opts['summary_str'] += (" Decay Rate: {lr_decay_rate}\n"
" Decayed {lr_drops} times)\n")
return opts
| [
"numpy.floor"
] | [((1384, 1424), 'numpy.floor', 'np.floor', (['(epoch / self.freq - self.drops)'], {}), '(epoch / self.freq - self.drops)\n', (1392, 1424), True, 'import numpy as np\n')] |
import math
import numpy as np
from numpy import linalg as LA
import numpy as np
import scipy
from scipy.sparse import *
from scipy.sparse.linalg import norm
import time
import nonnegfac
import importlib
importlib.reload(nonnegfac)
def claculate_norm(X, A, K, PARFOR_FLAG):
# UNTITLED3 Summary of this function goes here
# Detailed explanation goes here
normX = 0
Size_input = A.shape[0] * A.shape[1]
num_non_z = np.count_nonzero(A)
normA = np.sum(np.square(A))
if PARFOR_FLAG:
# parallel for loop
for k in range(K):
normX += scipy.sparse.linalg.norm(X[k], 'fro') ** 2
Size_input += X[k].shape[0] * X[k].shape[1]
num_non_z += X[k].getnnz()
else:
for k in range(K):
normX += scipy.sparse.linalg.norm(X[k], 'fro') ** 2
Size_input += (X[k].shape[0] * X[k].shape[1])
num_non_z += X[k].getnnz()
return normX, normA, Size_input
def calculate_RMSE(X, A, U, W, V, F, normX, normA, Size_input, K, PARFOR_FLAG):
# Calculate fit for parafac2 problem
RMSE = 0
fit_tensor = 0
fit_matrix = 0
# if PARFOR_FLAG:
for k in range(K):
M = U[k] @ np.diag(W[k, :]) @ V.T
fit_tensor = fit_tensor + LA.norm(X[k] - M, 'fro') ** 2
RMSE = RMSE + fit_tensor
fit_tensor = 1 - (fit_tensor / normX)
RMSE_mat = LA.norm((A - (W @ F.T)), 'fro') ** 2
RMSE = RMSE + RMSE_mat
RMSE = math.sqrt(RMSE / Size_input)
fit_matrix = 1 - (RMSE_mat / normA)
return fit_tensor, fit_matrix, RMSE
def TASTE_BPP(X, A, R, conv_tol, seed, PARFOR_FLAG, normX, normA, Size_input, Constraints, mu, lambda_):
tStart = time.time()
RMSE_TIME = []
ROOTPATH = ''
J = X[0].shape[1] # number of features (variables)
K = len(X) # number of subjects
Q = [] # len(Q) = K
U = [] # len(U) = K
np.random.seed(seed) # initilizing the modes based on some seed
V = np.random.rand(J, R)
W = np.random.rand(K, R)
H = np.random.rand(R)
F = np.random.rand(A.shape[1], R)
for k in range(K):
U.append(np.random.rand(X[k].shape[0], R))
prev_RMSE = 0
RMSE = 1
itr = 0
TOTAL_running_TIME = 0
beta = 1
alpha = 1
while abs(RMSE - prev_RMSE) > conv_tol:
itr = itr + 1
t_tennn = time.time()
# update Q_k
# if PARFOR_FLAG:
for k in range(K):
T1, _, T2 = np.linalg.svd(mu * (U[k] @ H.reshape(-1, 1)), full_matrices=False)
Q.append(T1 @ T2)
Q_T_U = 0
if (PARFOR_FLAG):
for k in range(K):
Q_T_U += (mu * np.transpose(Q[k]) @ U[k])
else:
for k in range(K):
Q_T_U += (mu * np.transpose(Q[k]) @ U[k])
H = Q_T_U / (K * mu)
# update S_k
V_T_V = V.T @ V
f_t_f = F.T @ F
# if PARFOR_FLAG:
for k in range(K):
k_hatrio_rao = np.diag(U[k].T @ X[k] @ V)
W[k, :] = nonnegfac.nnlsm_blockpivot(((U[k].T @ U[k]) * V_T_V) + (lambda_ * f_t_f),
(k_hatrio_rao + lambda_ * F.T @ A[k, :].T).reshape(-1, 1), 1, W[k, :].T)[0].T
# update F
F = nonnegfac.nnlsm_blockpivot(lambda_ * W.T @ W, lambda_ * W.T @ A, 1, F.T)[0].T
U_S_T_U_S = 0
U_S_T_X = 0
# update V
if PARFOR_FLAG:
for k in range(K):
U_S = U[k] * W[k, :] # element wise multiplication
U_S_T_U_S = U_S_T_U_S + np.transpose(U_S) @ U_S
U_S_T_X += np.transpose(U_S) @ X[k]
else:
for k in range(K):
U_S = U[k] * W[k, :] # element wise multiplication
U_S_T_U_S = U_S_T_U_S + np.transpose(U_S) @ U_S
U_S_T_X += np.transpose(U_S) @ X[k]
V = nonnegfac.nnlsm_blockpivot(U_S_T_U_S, U_S_T_X, 1, V.T)[0].T
# if PARFOR_FLAG:
for k in range(K):
V_S = V * W[k, :] # element wise multiplication
V_S_T_V_S = np.transpose(V_S) @ V_S + mu * np.eye(R)
U_S_T_X = np.transpose(V_S) @ np.transpose(X[k]) + (mu * np.transpose(H) @ np.transpose(Q[k]))
U[k] = nonnegfac.nnlsm_blockpivot(V_S_T_V_S, U_S_T_X, 1, np.transpose(U[k]))[0].T
tEnd = time.time()
TOTAL_running_TIME = TOTAL_running_TIME + (tEnd - tStart)
prev_RMSE = RMSE
FIT_T, FIT_M, RMSE = calculate_RMSE(X, A, U, W, V, F, normX, normA, Size_input, K, PARFOR_FLAG)
RMSE_TIME.append((TOTAL_running_TIME, RMSE))
return TOTAL_running_TIME, RMSE, FIT_T, FIT_M, RMSE_TIME, U, Q, H, V, W, F
def PARACoupl2_BPP( X,A,V,F,H,R,conv_tol,seed,PARFOR_FLAG,normX,normA,Size_input,Constraints,mu,lambda_ ):
tStart=time.time()
RMSE_TIME=[]
ROOTPATH = ''
J=X[0].shape[1] # number of features (variables)
K = len(X)# number of subjects
Q = []
U = []
np.random.seed(seed) # initilizing the modes based on some seed
W = np.random.rand(K,R)
for k in range(K):
U.append(np.random.rand(X[k].shape[0],R))
prev_RMSE=0
RMSE=1
itr=0
TOTAL_running_TIME=0
beta=1
alpha=1
while abs(RMSE - prev_RMSE) > conv_tol:
itr = itr + 1
t_tennn = time.time()
# update Q_k
# if PARFOR_FLAG:
for k in range(K):
T1, _, T2 = np.linalg.svd(mu * (U[k] @ H.reshape(-1, 1)), full_matrices=False)
Q.append(T1 @ T2)
#update S_k
V_T_V=V.T @ V
F_T_F=F.T @ F
# if (PARFOR_FLAG)
for k in range(K):
k_hatrio_rao = np.diag(U[k].T @ X[k] @ V)
W[k, :] = nonnegfac.nnlsm_blockpivot(((U[k].T @ U[k]) * V_T_V) + (lambda_ * F_T_F),
(k_hatrio_rao + lambda_ * F.T @ A[k, :].T).reshape(-1, 1), 1, W[k, :].T)[0].T
#update U_k
# if PARFOR_FLAG:
for k in range(K):
V_S = V * W[k, :] # element wise multiplication
V_S_T_V_S = V_S.T @ V_S + mu * np.eye(R)
# V_S_T_V_S=sparse(V_S_T_V_S)
U_S_T_X = V_S.T @ X[k].T + (mu * H.T @ Q[k].T)
# U_S_T_X=sparse(U_S_T_X)
U[k] = nonnegfac.nnlsm_blockpivot(V_S_T_V_S, U_S_T_X, 1, U[k].T)[0].T
tEnd = time.time()
TOTAL_running_TIME = TOTAL_running_TIME + (tEnd - tStart)
prev_RMSE = RMSE
FIT_T, FIT_M,RMSE = calculate_RMSE( X,A,U,W,V,F,normX,normA,Size_input,K,PARFOR_FLAG )
RMSE_TIME.append((TOTAL_running_TIME, RMSE))
return TOTAL_running_TIME,RMSE,FIT_T,FIT_M,RMSE_TIME,U,Q,H,V,W,F | [
"numpy.eye",
"numpy.random.rand",
"math.sqrt",
"numpy.square",
"numpy.count_nonzero",
"numpy.diag",
"scipy.sparse.linalg.norm",
"numpy.random.seed",
"importlib.reload",
"numpy.linalg.norm",
"nonnegfac.nnlsm_blockpivot",
"numpy.transpose",
"time.time"
] | [((205, 232), 'importlib.reload', 'importlib.reload', (['nonnegfac'], {}), '(nonnegfac)\n', (221, 232), False, 'import importlib\n'), ((438, 457), 'numpy.count_nonzero', 'np.count_nonzero', (['A'], {}), '(A)\n', (454, 457), True, 'import numpy as np\n'), ((1447, 1475), 'math.sqrt', 'math.sqrt', (['(RMSE / Size_input)'], {}), '(RMSE / Size_input)\n', (1456, 1475), False, 'import math\n'), ((1679, 1690), 'time.time', 'time.time', ([], {}), '()\n', (1688, 1690), False, 'import time\n'), ((1876, 1896), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1890, 1896), True, 'import numpy as np\n'), ((1949, 1969), 'numpy.random.rand', 'np.random.rand', (['J', 'R'], {}), '(J, R)\n', (1963, 1969), True, 'import numpy as np\n'), ((1978, 1998), 'numpy.random.rand', 'np.random.rand', (['K', 'R'], {}), '(K, R)\n', (1992, 1998), True, 'import numpy as np\n'), ((2007, 2024), 'numpy.random.rand', 'np.random.rand', (['R'], {}), '(R)\n', (2021, 2024), True, 'import numpy as np\n'), ((2033, 2062), 'numpy.random.rand', 'np.random.rand', (['A.shape[1]', 'R'], {}), '(A.shape[1], R)\n', (2047, 2062), True, 'import numpy as np\n'), ((4761, 4772), 'time.time', 'time.time', ([], {}), '()\n', (4770, 4772), False, 'import time\n'), ((4925, 4945), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4939, 4945), True, 'import numpy as np\n'), ((4998, 5018), 'numpy.random.rand', 'np.random.rand', (['K', 'R'], {}), '(K, R)\n', (5012, 5018), True, 'import numpy as np\n'), ((477, 489), 'numpy.square', 'np.square', (['A'], {}), '(A)\n', (486, 489), True, 'import numpy as np\n'), ((1372, 1399), 'numpy.linalg.norm', 'LA.norm', (['(A - W @ F.T)', '"""fro"""'], {}), "(A - W @ F.T, 'fro')\n", (1379, 1399), True, 'from numpy import linalg as LA\n'), ((2319, 2330), 'time.time', 'time.time', ([], {}), '()\n', (2328, 2330), False, 'import time\n'), ((4300, 4311), 'time.time', 'time.time', ([], {}), '()\n', (4309, 4311), False, 'import time\n'), ((5261, 5272), 'time.time', 'time.time', ([], {}), '()\n', (5270, 5272), False, 'import time\n'), ((6289, 6300), 'time.time', 'time.time', ([], {}), '()\n', (6298, 6300), False, 'import time\n'), ((2103, 2135), 'numpy.random.rand', 'np.random.rand', (['X[k].shape[0]', 'R'], {}), '(X[k].shape[0], R)\n', (2117, 2135), True, 'import numpy as np\n'), ((2942, 2968), 'numpy.diag', 'np.diag', (['(U[k].T @ X[k] @ V)'], {}), '(U[k].T @ X[k] @ V)\n', (2949, 2968), True, 'import numpy as np\n'), ((5058, 5090), 'numpy.random.rand', 'np.random.rand', (['X[k].shape[0]', 'R'], {}), '(X[k].shape[0], R)\n', (5072, 5090), True, 'import numpy as np\n'), ((5614, 5640), 'numpy.diag', 'np.diag', (['(U[k].T @ X[k] @ V)'], {}), '(U[k].T @ X[k] @ V)\n', (5621, 5640), True, 'import numpy as np\n'), ((587, 624), 'scipy.sparse.linalg.norm', 'scipy.sparse.linalg.norm', (['X[k]', '"""fro"""'], {}), "(X[k], 'fro')\n", (611, 624), False, 'import scipy\n'), ((783, 820), 'scipy.sparse.linalg.norm', 'scipy.sparse.linalg.norm', (['X[k]', '"""fro"""'], {}), "(X[k], 'fro')\n", (807, 820), False, 'import scipy\n'), ((1198, 1214), 'numpy.diag', 'np.diag', (['W[k, :]'], {}), '(W[k, :])\n', (1205, 1214), True, 'import numpy as np\n'), ((1255, 1279), 'numpy.linalg.norm', 'LA.norm', (['(X[k] - M)', '"""fro"""'], {}), "(X[k] - M, 'fro')\n", (1262, 1279), True, 'from numpy import linalg as LA\n'), ((3223, 3295), 'nonnegfac.nnlsm_blockpivot', 'nonnegfac.nnlsm_blockpivot', (['(lambda_ * W.T @ W)', '(lambda_ * W.T @ A)', '(1)', 'F.T'], {}), '(lambda_ * W.T @ W, lambda_ * W.T @ A, 1, F.T)\n', (3249, 3295), False, 'import nonnegfac\n'), ((3843, 3897), 'nonnegfac.nnlsm_blockpivot', 'nonnegfac.nnlsm_blockpivot', (['U_S_T_U_S', 'U_S_T_X', '(1)', 'V.T'], {}), '(U_S_T_U_S, U_S_T_X, 1, V.T)\n', (3869, 3897), False, 'import nonnegfac\n'), ((3577, 3594), 'numpy.transpose', 'np.transpose', (['U_S'], {}), '(U_S)\n', (3589, 3594), True, 'import numpy as np\n'), ((3806, 3823), 'numpy.transpose', 'np.transpose', (['U_S'], {}), '(U_S)\n', (3818, 3823), True, 'import numpy as np\n'), ((4042, 4059), 'numpy.transpose', 'np.transpose', (['V_S'], {}), '(V_S)\n', (4054, 4059), True, 'import numpy as np\n'), ((4073, 4082), 'numpy.eye', 'np.eye', (['R'], {}), '(R)\n', (4079, 4082), True, 'import numpy as np\n'), ((4105, 4122), 'numpy.transpose', 'np.transpose', (['V_S'], {}), '(V_S)\n', (4117, 4122), True, 'import numpy as np\n'), ((4125, 4143), 'numpy.transpose', 'np.transpose', (['X[k]'], {}), '(X[k])\n', (4137, 4143), True, 'import numpy as np\n'), ((4170, 4188), 'numpy.transpose', 'np.transpose', (['Q[k]'], {}), '(Q[k])\n', (4182, 4188), True, 'import numpy as np\n'), ((6042, 6051), 'numpy.eye', 'np.eye', (['R'], {}), '(R)\n', (6048, 6051), True, 'import numpy as np\n'), ((6210, 6267), 'nonnegfac.nnlsm_blockpivot', 'nonnegfac.nnlsm_blockpivot', (['V_S_T_V_S', 'U_S_T_X', '(1)', 'U[k].T'], {}), '(V_S_T_V_S, U_S_T_X, 1, U[k].T)\n', (6236, 6267), False, 'import nonnegfac\n'), ((2633, 2651), 'numpy.transpose', 'np.transpose', (['Q[k]'], {}), '(Q[k])\n', (2645, 2651), True, 'import numpy as np\n'), ((2736, 2754), 'numpy.transpose', 'np.transpose', (['Q[k]'], {}), '(Q[k])\n', (2748, 2754), True, 'import numpy as np\n'), ((3526, 3543), 'numpy.transpose', 'np.transpose', (['U_S'], {}), '(U_S)\n', (3538, 3543), True, 'import numpy as np\n'), ((3755, 3772), 'numpy.transpose', 'np.transpose', (['U_S'], {}), '(U_S)\n', (3767, 3772), True, 'import numpy as np\n'), ((4152, 4167), 'numpy.transpose', 'np.transpose', (['H'], {}), '(H)\n', (4164, 4167), True, 'import numpy as np\n'), ((4259, 4277), 'numpy.transpose', 'np.transpose', (['U[k]'], {}), '(U[k])\n', (4271, 4277), True, 'import numpy as np\n')] |
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
im = np.zeros((256, 256))
im[64:-64, 64:-64] = 1
im = ndimage.rotate(im, 15, mode='constant')
im = ndimage.gaussian_filter(im, 8)
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
plt.figure(figsize=(16, 5))
plt.subplot(141)
plt.imshow(im, cmap=plt.cm.gray)
plt.axis('off')
plt.title('square', fontsize=20)
plt.subplot(142)
plt.imshow(sx)
plt.axis('off')
plt.title('Sobel (x direction)', fontsize=20)
plt.subplot(143)
plt.imshow(sob)
plt.axis('off')
plt.title('Sobel filter', fontsize=20)
im += 0.07*np.random.random(im.shape)
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
plt.subplot(144)
plt.imshow(sob)
plt.axis('off')
plt.title('Sobel for noisy image', fontsize=20)
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=1, bottom=0, left=0, right=0.9)
plt.show()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.random.random",
"matplotlib.pyplot.axis",
"scipy.ndimage.sobel",
"numpy.zeros",
"matplotlib.pyplot.figure",
"scipy.ndimage.gaussian_filter",
"numpy.hypot",
"scipy.ndimage.rotate",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subp... | [((83, 103), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (91, 103), True, 'import numpy as np\n'), ((133, 172), 'scipy.ndimage.rotate', 'ndimage.rotate', (['im', '(15)'], {'mode': '"""constant"""'}), "(im, 15, mode='constant')\n", (147, 172), False, 'from scipy import ndimage\n'), ((178, 208), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['im', '(8)'], {}), '(im, 8)\n', (201, 208), False, 'from scipy import ndimage\n'), ((215, 257), 'scipy.ndimage.sobel', 'ndimage.sobel', (['im'], {'axis': '(0)', 'mode': '"""constant"""'}), "(im, axis=0, mode='constant')\n", (228, 257), False, 'from scipy import ndimage\n'), ((263, 305), 'scipy.ndimage.sobel', 'ndimage.sobel', (['im'], {'axis': '(1)', 'mode': '"""constant"""'}), "(im, axis=1, mode='constant')\n", (276, 305), False, 'from scipy import ndimage\n'), ((312, 328), 'numpy.hypot', 'np.hypot', (['sx', 'sy'], {}), '(sx, sy)\n', (320, 328), True, 'import numpy as np\n'), ((330, 357), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 5)'}), '(figsize=(16, 5))\n', (340, 357), True, 'import matplotlib.pyplot as plt\n'), ((358, 374), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (369, 374), True, 'import matplotlib.pyplot as plt\n'), ((375, 407), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {'cmap': 'plt.cm.gray'}), '(im, cmap=plt.cm.gray)\n', (385, 407), True, 'import matplotlib.pyplot as plt\n'), ((408, 423), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (416, 423), True, 'import matplotlib.pyplot as plt\n'), ((424, 456), 'matplotlib.pyplot.title', 'plt.title', (['"""square"""'], {'fontsize': '(20)'}), "('square', fontsize=20)\n", (433, 456), True, 'import matplotlib.pyplot as plt\n'), ((457, 473), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (468, 473), True, 'import matplotlib.pyplot as plt\n'), ((474, 488), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sx'], {}), '(sx)\n', (484, 488), True, 'import matplotlib.pyplot as plt\n'), ((489, 504), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (497, 504), True, 'import matplotlib.pyplot as plt\n'), ((505, 550), 'matplotlib.pyplot.title', 'plt.title', (['"""Sobel (x direction)"""'], {'fontsize': '(20)'}), "('Sobel (x direction)', fontsize=20)\n", (514, 550), True, 'import matplotlib.pyplot as plt\n'), ((551, 567), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (562, 567), True, 'import matplotlib.pyplot as plt\n'), ((568, 583), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sob'], {}), '(sob)\n', (578, 583), True, 'import matplotlib.pyplot as plt\n'), ((584, 599), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (592, 599), True, 'import matplotlib.pyplot as plt\n'), ((600, 638), 'matplotlib.pyplot.title', 'plt.title', (['"""Sobel filter"""'], {'fontsize': '(20)'}), "('Sobel filter', fontsize=20)\n", (609, 638), True, 'import matplotlib.pyplot as plt\n'), ((684, 726), 'scipy.ndimage.sobel', 'ndimage.sobel', (['im'], {'axis': '(0)', 'mode': '"""constant"""'}), "(im, axis=0, mode='constant')\n", (697, 726), False, 'from scipy import ndimage\n'), ((732, 774), 'scipy.ndimage.sobel', 'ndimage.sobel', (['im'], {'axis': '(1)', 'mode': '"""constant"""'}), "(im, axis=1, mode='constant')\n", (745, 774), False, 'from scipy import ndimage\n'), ((781, 797), 'numpy.hypot', 'np.hypot', (['sx', 'sy'], {}), '(sx, sy)\n', (789, 797), True, 'import numpy as np\n'), ((799, 815), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(144)'], {}), '(144)\n', (810, 815), True, 'import matplotlib.pyplot as plt\n'), ((816, 831), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sob'], {}), '(sob)\n', (826, 831), True, 'import matplotlib.pyplot as plt\n'), ((832, 847), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (840, 847), True, 'import matplotlib.pyplot as plt\n'), ((848, 895), 'matplotlib.pyplot.title', 'plt.title', (['"""Sobel for noisy image"""'], {'fontsize': '(20)'}), "('Sobel for noisy image', fontsize=20)\n", (857, 895), True, 'import matplotlib.pyplot as plt\n'), ((899, 984), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.02)', 'hspace': '(0.02)', 'top': '(1)', 'bottom': '(0)', 'left': '(0)', 'right': '(0.9)'}), '(wspace=0.02, hspace=0.02, top=1, bottom=0, left=0,\n right=0.9)\n', (918, 984), True, 'import matplotlib.pyplot as plt\n'), ((982, 992), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (990, 992), True, 'import matplotlib.pyplot as plt\n'), ((651, 677), 'numpy.random.random', 'np.random.random', (['im.shape'], {}), '(im.shape)\n', (667, 677), True, 'import numpy as np\n')] |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import warnings
warnings.simplefilter('ignore')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
import re
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer
import pickle
def load_data(database_filepath):
"""Load and merge messages and categories datasets
Args:
database_filename: string. Filename for SQLite database containing cleaned message data.
Returns:
X: dataframe. Dataframe containing features dataset.
Y: dataframe. Dataframe containing labels dataset.
category_names: list of strings. List containing category names.
"""
# load data from database
engine = create_engine('sqlite:///%s' % database_filepath)
df = pd.read_sql_table(database_filepath, engine)
print (engine.table_names())
# Create X and Y datasets
X = df['message']
Y = df.drop(['message', 'original', 'genre'], axis = 1)
# Create sierial containing all category names
category_names = Y.columns
return X, Y, category_names
def tokenize(text):
"""Normalize, tokenize and stem text string
Args:
text: string. String containing message for processing
Returns:
stemmed: list of strings. List containing normalized and stemmed word tokens
"""
# Convert text to lowercase and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize words
tokens = word_tokenize(text)
# Stem word tokens and remove stop words
stemmer = PorterStemmer()
stop_words = stopwords.words("english")
stemmed = [stemmer.stem(word) for word in tokens if word not in stop_words]
return stemmed
def performance_metric(y_true, y_pred):
"""Calculate median F1 score for all of the output classifiers
Args:
y_true: array. Array containing actual labels.
y_pred: array. Array containing predicted labels.
Returns:
score: float. Median F1 score for all of the output classifiers
"""
f1_list = []
for i in range(np.shape(y_pred)[1]):
f1 = f1_score(np.array(y_true)[:, i], y_pred[:, i])
f1_list.append(f1)
score = np.median(f1_list)
return score
def build_model():
"""Build a machine learning pipeline
Args:
None
Returns:
cv: gridsearchcv object. Gridsearchcv object that transforms the data, creates the
model object and finds the optimal model parameters.
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer = tokenize, min_df=1)),
('tfidf', TfidfTransformer(use_idf=False)),
('clf', MultiOutputClassifier(LogisticRegression())),
])
parameters = {
'vect__min_df': [1],
'tfidf__use_idf':[ False],
# 'clf__estimator__multi_class': ['ovr']
'clf__estimator__random_state': [25],
'clf__estimator__C': [1.0, 10],
'clf__estimator__penalty':["l1", "l2"],
#'clf__estimator__solver':['lbfgs','liblinear']
'clf__estimator__solver':['liblinear'],
}
# Create scorer
scorer = make_scorer(performance_metric)
# Create grid search object
cv = GridSearchCV(pipeline, param_grid = parameters, scoring = scorer, verbose = 10, n_jobs = 1)
return cv
def get_eval_metrics(actual, predicted, col_names):
"""Calculate evaluation metrics for ML model
Args:
actual: array. Array containing actual labels.
predicted: array. Array containing predicted labels.
col_names: list of strings. List containing names for each of the predicted fields.
Returns:
metrics_df: dataframe. Dataframe containing the accuracy, precision, recall
and f1 score for a given set of actual and predicted labels.
"""
metrics = []
# Calculate evaluation metrics for each set of labels
for i in range(len(col_names)):
accuracy = accuracy_score(actual[:, i], predicted[:, i])
precision = precision_score(actual[:, i], predicted[:, i])
recall = recall_score(actual[:, i], predicted[:, i])
f1 = f1_score(actual[:, i], predicted[:, i])
metrics.append([accuracy, precision, recall, f1])
# Create dataframe containing metrics
metrics = np.array(metrics)
metrics_df = pd.DataFrame(data = metrics, index = col_names, columns = ['Accuracy', 'Precision', 'Recall', 'F1'])
return metrics_df
def evaluate_model(model, X_test, Y_test, col_names):
"""Returns test accuracy, precision, recall and F1 score for fitted model
Args:
model: model object. Fitted model object.
X_test: dataframe. Dataframe containing test features dataset.
Y_test: dataframe. Dataframe containing test labels dataset.
col_names: list of strings. List containing column names.
Returns:
None
"""
# Calculate evaluation metrics for test set
Y_test_pred = model.predict(X_test)
eval_metrics = get_eval_metrics(np.array(Y_test), Y_test_pred, col_names)
print(eval_metrics)
# Get summary stats for model
print(eval_metrics.describe())
def save_model(model, model_filepath):
"""Pickle fitted model
Args:
model: model object. Fitted model object.
model_filepath: string. Filepath for where fitted model should be saved
Returns:
None
"""
pickle.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
np.random.seed(71)
model.fit(X_train, Y_train)
# Parameters for best mean test score
print("Best params:\n%s" % model.best_params_)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model.best_estimator_, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.feature_extraction.text.TfidfTransformer",
"nltk.download",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"pandas.read_sql_table",
"nltk.corpus.stopwords.words",
"sklearn.feature_extraction.text.CountVectorizer",
... | [((104, 135), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (125, 135), False, 'import warnings\n'), ((308, 330), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (321, 330), False, 'import nltk\n'), ((331, 355), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (344, 355), False, 'import nltk\n'), ((356, 382), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (369, 382), False, 'import nltk\n'), ((1285, 1334), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///%s' % database_filepath)"], {}), "('sqlite:///%s' % database_filepath)\n", (1298, 1334), False, 'from sqlalchemy import create_engine\n'), ((1344, 1388), 'pandas.read_sql_table', 'pd.read_sql_table', (['database_filepath', 'engine'], {}), '(database_filepath, engine)\n', (1361, 1388), True, 'import pandas as pd\n'), ((2051, 2070), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2064, 2070), False, 'from nltk.tokenize import word_tokenize\n'), ((2135, 2150), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (2148, 2150), False, 'from nltk.stem import PorterStemmer\n'), ((2168, 2194), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2183, 2194), False, 'from nltk.corpus import stopwords\n'), ((2801, 2819), 'numpy.median', 'np.median', (['f1_list'], {}), '(f1_list)\n', (2810, 2819), True, 'import numpy as np\n'), ((3720, 3751), 'sklearn.metrics.make_scorer', 'make_scorer', (['performance_metric'], {}), '(performance_metric)\n', (3731, 3751), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer\n'), ((3798, 3885), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipeline'], {'param_grid': 'parameters', 'scoring': 'scorer', 'verbose': '(10)', 'n_jobs': '(1)'}), '(pipeline, param_grid=parameters, scoring=scorer, verbose=10,\n n_jobs=1)\n', (3810, 3885), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4875, 4892), 'numpy.array', 'np.array', (['metrics'], {}), '(metrics)\n', (4883, 4892), True, 'import numpy as np\n'), ((4910, 5008), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'metrics', 'index': 'col_names', 'columns': "['Accuracy', 'Precision', 'Recall', 'F1']"}), "(data=metrics, index=col_names, columns=['Accuracy',\n 'Precision', 'Recall', 'F1'])\n", (4922, 5008), True, 'import pandas as pd\n'), ((4529, 4574), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['actual[:, i]', 'predicted[:, i]'], {}), '(actual[:, i], predicted[:, i])\n', (4543, 4574), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer\n'), ((4595, 4641), 'sklearn.metrics.precision_score', 'precision_score', (['actual[:, i]', 'predicted[:, i]'], {}), '(actual[:, i], predicted[:, i])\n', (4610, 4641), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer\n'), ((4659, 4702), 'sklearn.metrics.recall_score', 'recall_score', (['actual[:, i]', 'predicted[:, i]'], {}), '(actual[:, i], predicted[:, i])\n', (4671, 4702), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer\n'), ((4716, 4755), 'sklearn.metrics.f1_score', 'f1_score', (['actual[:, i]', 'predicted[:, i]'], {}), '(actual[:, i], predicted[:, i])\n', (4724, 4755), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer\n'), ((5587, 5603), 'numpy.array', 'np.array', (['Y_test'], {}), '(Y_test)\n', (5595, 5603), True, 'import numpy as np\n'), ((6296, 6333), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)'}), '(X, Y, test_size=0.2)\n', (6312, 6333), False, 'from sklearn.model_selection import train_test_split\n'), ((6460, 6478), 'numpy.random.seed', 'np.random.seed', (['(71)'], {}), '(71)\n', (6474, 6478), True, 'import numpy as np\n'), ((2671, 2687), 'numpy.shape', 'np.shape', (['y_pred'], {}), '(y_pred)\n', (2679, 2687), True, 'import numpy as np\n'), ((2715, 2731), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (2723, 2731), True, 'import numpy as np\n'), ((3140, 3185), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'tokenizer': 'tokenize', 'min_df': '(1)'}), '(tokenizer=tokenize, min_df=1)\n', (3155, 3185), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3208, 3239), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'use_idf': '(False)'}), '(use_idf=False)\n', (3224, 3239), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3280, 3300), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (3298, 3300), False, 'from sklearn.linear_model import LogisticRegression\n')] |
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
from dataclasses import is_dataclass
from dataclasses import replace as dc_replace
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
from smarts.core.controllers import ActionSpaceType
from smarts.core.coordinates import Heading
from smarts.core.plan import Goal, PositionalGoal, Via
from smarts.core.sensors import Observation, ViaPoint, Vias
from smarts.core.utils.math import (
position_to_ego_frame,
world_position_from_ego_frame,
wrap_value,
)
def _isnamedtupleinstance(x):
t = type(x)
b = t.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(t, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n) == str for n in f)
def _replace(obj: Any, **kwargs):
if is_dataclass(obj):
return dc_replace(obj, **kwargs)
elif _isnamedtupleinstance(obj):
return obj._replace(**kwargs)
raise ValueError("Must be a namedtuple or dataclass.")
def ego_centric_observation_adapter(obs: Observation, *args: Any, **kwargs: Any) -> Any:
"""An observation adapter that converts the observation to an ego-centric perspective."""
position = obs.ego_vehicle_state.position
heading = obs.ego_vehicle_state.heading
def ego_frame_dynamics(v):
return np.array([np.linalg.norm(v[:2]), 0, *v[2:]]) # point to X
def transform(v):
return position_to_ego_frame(v, position, heading)
def adjust_heading(h):
return wrap_value(h - heading, -math.pi, math.pi)
nvs = obs.neighborhood_vehicle_states or []
wpps = obs.waypoint_paths or []
def _replace_via(via: Union[Via, ViaPoint]):
return _replace(via, position=transform(via.position))
vd = None
if obs.via_data:
rpvp = lambda vps: [_replace_via(vp) for vp in vps]
vd = _replace(
obs.via_data,
near_via_points=rpvp(obs.via_data.near_via_points),
hit_via_points=rpvp(obs.via_data.hit_via_points),
)
replace_wps = lambda lwps: [
[
_replace(
wp,
pos=transform(np.append(wp.pos, [0]))[:2],
heading=Heading(adjust_heading(wp.heading)),
)
for wp in wps
]
for wps in lwps
]
rwps = None
if obs.road_waypoints:
rwps = _replace(
obs.road_waypoints,
lanes={
l_id: replace_wps(wps) for l_id, wps in obs.road_waypoints.lanes.items()
},
)
replace_metadata = lambda cam_obs: _replace(
cam_obs,
metadata=_replace(
cam_obs.metadata, camera_pos=(0, 0, 0), camera_heading_in_degrees=0
),
)
def _optional_replace_goal(goal):
if isinstance(goal, PositionalGoal):
return {"goal": _replace(goal, position=transform(tuple(goal.position)))}
return {}
def _replace_lidar(lidar):
if len(lidar) == 0:
return []
return [
[transform(hit_point) for hit_point in lidar[0]],
lidar[1],
[
[transform(ray_start), transform(ray_end)]
for ray_start, ray_end in lidar[2]
],
]
return _replace(
obs,
ego_vehicle_state=_replace(
obs.ego_vehicle_state,
position=np.array([0, 0, 0]),
heading=Heading(0),
linear_velocity=ego_frame_dynamics(obs.ego_vehicle_state.linear_velocity),
linear_acceleration=ego_frame_dynamics(
obs.ego_vehicle_state.linear_acceleration
),
linear_jerk=ego_frame_dynamics(obs.ego_vehicle_state.linear_jerk),
mission=_replace(
obs.ego_vehicle_state.mission,
start=_replace(
obs.ego_vehicle_state.mission.start,
position=transform(
np.append(obs.ego_vehicle_state.mission.start.position, [0])
)[:2],
heading=adjust_heading(obs.ego_vehicle_state.mission.start.heading),
),
via=tuple(
_replace_via(via) for via in obs.ego_vehicle_state.mission.via
),
**_optional_replace_goal(obs.ego_vehicle_state.mission.goal),
# TODO??: `entry_tactic.zone` zone.position?
),
),
neighborhood_vehicle_states=[
_replace(
nv,
position=transform(nv.position),
heading=Heading(adjust_heading(nv.heading)),
)
for nv in nvs
],
lidar_point_cloud=_replace_lidar(obs.lidar_point_cloud),
waypoint_paths=replace_wps(wpps),
drivable_area_grid_map=replace_metadata(obs.drivable_area_grid_map),
occupancy_grid_map=replace_metadata(obs.occupancy_grid_map),
top_down_rgb=replace_metadata(obs.top_down_rgb),
road_waypoints=rwps,
via_data=vd,
)
def _egocentric_continuous_action_adapter(act: Tuple[float, float, float], _=None):
return act
def _egocentric_actuator_dynamic_adapter(act: Tuple[float, float, float], _=None):
return act
def _egocentric_lane_adapter(act: str, _=None):
return act
def _egocentric_lane_with_continous_speed_adapter(act: Tuple[int, float], _=None):
return act
def _trajectory_adaption(act, last_obs):
new_pos = np.array(
[
world_position_from_ego_frame(
[x, y, 0],
last_obs.ego_vehicle_state.position,
last_obs.ego_vehicle_state.heading,
)[:2]
for x, y in zip(*act[:2])
]
).T
new_headings = np.array(
[
wrap_value(h + last_obs.ego_vehicle_state.heading, -math.pi, math.pi)
for h in act[2]
]
)
return (*new_pos, new_headings, *act[3:])
def _egocentric_trajectory_adapter(
act: Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float]],
last_obs: Optional[Observation] = None,
):
if last_obs:
return _trajectory_adaption(act, last_obs)
return act
def _egocentric_trajectory_with_time_adapter(
act: Tuple[
Sequence[float],
Sequence[float],
Sequence[float],
Sequence[float],
Sequence[float],
],
last_obs: Optional[Observation] = None,
):
if last_obs:
return _trajectory_adaption(act, last_obs)
return act
def _egocentric_mpc_adapter(
act: Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float]],
last_obs: Optional[Observation] = None,
):
if last_obs:
return _trajectory_adaption(act, last_obs)
return act
def _egocentric_target_pose_adapter(
act: Tuple[float, float, float, float], last_obs: Optional[Observation] = None
):
if last_obs:
out_pos = world_position_from_ego_frame(
np.append(act[:2], [0]),
last_obs.ego_vehicle_state.position,
last_obs.ego_vehicle_state.heading,
)
return np.array(
[
*out_pos[:2],
wrap_value(
last_obs.ego_vehicle_state.heading + act[2], -math.pi, math.pi
),
act[3],
]
)
return act
def _egocentric_multi_target_pose_adapter(
act: Dict[str, Tuple[float, float, float, float]],
last_obs: Optional[Observation] = None,
):
assert ValueError(
"Ego-centric assumes single vehicle and is ambiguous with multi-target-pose."
)
def _egocentric_imitation_adapter(
act: Union[float, Tuple[float, float]], last_obs: Optional[Observation] = None
):
return act
def _pair_adapters(
ego_centric_observation_adapter: Callable[[Observation], Observation],
ego_centric_action_adapter: Callable[[Any, Optional[Observation]], Any],
):
"""Wrapper that shares the state between both adapters."""
last_obs = None
def oa_wrapper(obs: Observation):
nonlocal last_obs
last_obs = obs # Store the unmodified observation
return ego_centric_observation_adapter(obs)
def aa_wrapper(act: Any):
nonlocal last_obs
# Pass the last unmodified obs to the action for conversion purposes
return ego_centric_action_adapter(act, last_obs)
return oa_wrapper, aa_wrapper
def get_egocentric_adapters(action_space: ActionSpaceType):
"""Provides a set of adapters that share state information of the unmodified observation.
This will allow the action adapter to automatically convert back to world space for SMARTS.
Returns:
(obs_adapter, action_adapter)
"""
m = {
ActionSpaceType.Continuous: _egocentric_continuous_action_adapter,
ActionSpaceType.ActuatorDynamic: _egocentric_actuator_dynamic_adapter,
ActionSpaceType.Lane: _egocentric_lane_adapter,
ActionSpaceType.LaneWithContinuousSpeed: _egocentric_lane_with_continous_speed_adapter,
ActionSpaceType.Trajectory: _egocentric_trajectory_adapter,
ActionSpaceType.TrajectoryWithTime: _egocentric_trajectory_with_time_adapter,
ActionSpaceType.MPC: _egocentric_mpc_adapter,
ActionSpaceType.TargetPose: _egocentric_target_pose_adapter,
ActionSpaceType.MultiTargetPose: _egocentric_multi_target_pose_adapter,
ActionSpaceType.Imitation: _egocentric_imitation_adapter,
}
return _pair_adapters(ego_centric_observation_adapter, m.get(action_space))
| [
"smarts.core.coordinates.Heading",
"smarts.core.utils.math.wrap_value",
"smarts.core.utils.math.position_to_ego_frame",
"numpy.append",
"numpy.array",
"smarts.core.utils.math.world_position_from_ego_frame",
"dataclasses.replace",
"numpy.linalg.norm",
"dataclasses.is_dataclass"
] | [((1964, 1981), 'dataclasses.is_dataclass', 'is_dataclass', (['obj'], {}), '(obj)\n', (1976, 1981), False, 'from dataclasses import is_dataclass\n'), ((1998, 2023), 'dataclasses.replace', 'dc_replace', (['obj'], {}), '(obj, **kwargs)\n', (2008, 2023), True, 'from dataclasses import replace as dc_replace\n'), ((2579, 2622), 'smarts.core.utils.math.position_to_ego_frame', 'position_to_ego_frame', (['v', 'position', 'heading'], {}), '(v, position, heading)\n', (2600, 2622), False, 'from smarts.core.utils.math import position_to_ego_frame, world_position_from_ego_frame, wrap_value\n'), ((2666, 2708), 'smarts.core.utils.math.wrap_value', 'wrap_value', (['(h - heading)', '(-math.pi)', 'math.pi'], {}), '(h - heading, -math.pi, math.pi)\n', (2676, 2708), False, 'from smarts.core.utils.math import position_to_ego_frame, world_position_from_ego_frame, wrap_value\n'), ((6960, 7029), 'smarts.core.utils.math.wrap_value', 'wrap_value', (['(h + last_obs.ego_vehicle_state.heading)', '(-math.pi)', 'math.pi'], {}), '(h + last_obs.ego_vehicle_state.heading, -math.pi, math.pi)\n', (6970, 7029), False, 'from smarts.core.utils.math import position_to_ego_frame, world_position_from_ego_frame, wrap_value\n'), ((8146, 8169), 'numpy.append', 'np.append', (['act[:2]', '[0]'], {}), '(act[:2], [0])\n', (8155, 8169), True, 'import numpy as np\n'), ((2492, 2513), 'numpy.linalg.norm', 'np.linalg.norm', (['v[:2]'], {}), '(v[:2])\n', (2506, 2513), True, 'import numpy as np\n'), ((8363, 8437), 'smarts.core.utils.math.wrap_value', 'wrap_value', (['(last_obs.ego_vehicle_state.heading + act[2])', '(-math.pi)', 'math.pi'], {}), '(last_obs.ego_vehicle_state.heading + act[2], -math.pi, math.pi)\n', (8373, 8437), False, 'from smarts.core.utils.math import position_to_ego_frame, world_position_from_ego_frame, wrap_value\n'), ((4546, 4565), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4554, 4565), True, 'import numpy as np\n'), ((4587, 4597), 'smarts.core.coordinates.Heading', 'Heading', (['(0)'], {}), '(0)\n', (4594, 4597), False, 'from smarts.core.coordinates import Heading\n'), ((6672, 6790), 'smarts.core.utils.math.world_position_from_ego_frame', 'world_position_from_ego_frame', (['[x, y, 0]', 'last_obs.ego_vehicle_state.position', 'last_obs.ego_vehicle_state.heading'], {}), '([x, y, 0], last_obs.ego_vehicle_state.\n position, last_obs.ego_vehicle_state.heading)\n', (6701, 6790), False, 'from smarts.core.utils.math import position_to_ego_frame, world_position_from_ego_frame, wrap_value\n'), ((3303, 3325), 'numpy.append', 'np.append', (['wp.pos', '[0]'], {}), '(wp.pos, [0])\n', (3312, 3325), True, 'import numpy as np\n'), ((5120, 5180), 'numpy.append', 'np.append', (['obs.ego_vehicle_state.mission.start.position', '[0]'], {}), '(obs.ego_vehicle_state.mission.start.position, [0])\n', (5129, 5180), True, 'import numpy as np\n')] |
from __future__ import print_function
from __future__ import division
from hoomd import *
from hoomd import hpmc
import numpy
import math
import sys
import os
import unittest
import tempfile
context.initialize()
def create_empty(**kwargs):
snap = data.make_snapshot(**kwargs);
return init.read_snapshot(snap);
# this test checks that sdf produces correct results. A baseline script on a known working verison of SDF provides
# the reference average and error lines. A short simulation is run, sdf computed, and compared to the reference.
# number of particles
l=32
# packing fraction
phi = 0.8
poly_A = 1; # assumes squares
aspect = 1.0;
N = l*l;
A = N * poly_A / phi;
Lx = math.sqrt(A/aspect);
Ly = aspect * Lx;
ax = Lx / l;
ay = Ly / l;
avg = numpy.array([ 55.20126953, 54.89853516, 54.77910156, 54.56660156,
54.22255859, 53.83935547, 53.77617188, 53.42109375,
53.05546875, 52.86376953, 52.65576172, 52.21240234,
52.07402344, 51.88974609, 51.69990234, 51.32099609,
51.09775391, 51.06533203, 50.61923828, 50.35566406,
50.07197266, 49.92275391, 49.51914062, 49.39013672,
49.17597656, 48.91982422, 48.64580078, 48.30712891,
48.12207031, 47.815625 , 47.57744141, 47.37099609,
47.14765625, 46.92382812, 46.6984375 , 46.66943359,
46.18203125, 45.95615234, 45.66650391, 45.52714844,
45.39951172, 45.04599609, 44.90908203, 44.62197266,
44.37460937, 44.02998047, 43.84306641, 43.53310547,
43.55 , 43.29589844, 43.06054688, 42.85097656,
42.58837891, 42.39326172, 42.21152344, 41.91777344,
41.71054687, 41.68232422, 41.42177734, 41.08085938,
40.91435547, 40.76123047, 40.45380859, 40.178125 ,
40.14853516, 39.81972656, 39.60585938, 39.44169922,
39.34179688, 39.09541016, 38.78105469, 38.60087891,
38.56572266, 38.27158203, 38.02011719, 37.865625 ,
37.77851562, 37.51113281, 37.25615234, 37.23857422,
36.91757812, 36.68486328, 36.57675781, 36.39140625,
36.06240234, 36.01962891, 35.8375 , 35.51914062,
35.3640625 , 35.29042969, 34.86337891, 34.72460938,
34.73964844, 34.57871094, 34.32685547, 34.02607422,
33.78271484, 33.82548828, 33.53808594, 33.40341797,
33.17861328, 33.05439453, 32.80361328, 32.55478516,
32.53759766, 32.28447266, 32.26513672, 32.05732422,
31.82294922, 31.83535156, 31.56376953, 31.46337891,
31.27431641, 30.88310547, 30.85107422, 30.63320313,
30.57822266, 30.28886719, 30.28183594, 30.05927734,
29.98896484, 29.690625 , 29.51816406, 29.40742188,
29.2328125 , 29.19853516, 28.94599609, 28.80449219,
28.47480469, 28.48476563, 28.31738281, 28.21455078,
28.00878906, 27.90458984, 27.84970703, 27.54052734,
27.43818359, 27.31064453, 27.12773437, 26.91464844,
26.84511719, 26.78701172, 26.53603516, 26.39853516,
26.13779297, 26.16269531, 25.92138672, 25.80244141,
25.75234375, 25.49384766, 25.37197266, 25.26962891,
25.14287109, 24.87558594, 24.778125 , 24.68320312,
24.65957031, 24.44404297, 24.31621094, 24.203125 ,
24.12402344, 23.89628906, 23.76621094, 23.56923828,
23.38095703, 23.32724609, 23.25498047, 23.09697266,
23.04716797, 22.90712891, 22.68662109, 22.59970703,
22.54824219, 22.53632813, 22.29267578, 22.08613281,
21.98398437, 21.89169922, 21.74550781, 21.75878906,
21.45625 , 21.37529297, 21.1890625 , 21.18417969,
21.0671875 , 20.95087891, 20.81650391, 20.60390625,
20.66953125, 20.4640625 , 20.47021484, 20.12988281,
20.17099609, 20.05224609, 19.89619141, 19.80859375,
19.72558594, 19.64990234, 19.43525391, 19.38203125]);
err = numpy.array([ 1.21368492, 1.07520243, 1.22496485, 1.07203861, 1.31918198,
1.15482965, 1.11606943, 1.12342247, 1.1214123 , 1.2033176 ,
1.14923442, 1.11741796, 1.08633901, 1.10809585, 1.13268611,
1.17159683, 1.12298656, 1.27754418, 1.09430177, 1.08989947,
1.051715 , 1.13990382, 1.16086636, 1.19538929, 1.09450355,
1.10057404, 0.98204849, 1.02542969, 1.10736805, 1.18062055,
1.12365972, 1.12265463, 1.06131492, 1.15169701, 1.13772836,
1.03968987, 1.04348243, 1.00617502, 1.02450203, 1.08293272,
1.02187476, 1.00072731, 1.0267637 , 1.08289546, 1.03696814,
1.01035732, 1.05730499, 1.07088231, 1.00528653, 0.9195167 ,
0.99235353, 1.00839744, 0.98700882, 0.87196929, 1.00124084,
0.96481759, 0.9412312 , 1.04691734, 0.92419062, 0.89478269,
0.85106599, 1.0143535 , 1.07011876, 0.88196475, 0.8708013 ,
0.91838154, 0.9309356 , 0.97521482, 0.94277816, 0.86336248,
0.8845162 , 1.00421706, 0.87940419, 0.85516477, 0.86071935,
0.96725404, 0.87175829, 0.86386878, 0.96833751, 0.87554994,
0.8449041 , 0.77404494, 0.92879454, 0.95780868, 0.84341047,
0.88067771, 0.83393048, 0.94414754, 0.94671484, 0.84554255,
0.8906436 , 0.84538732, 0.78517686, 0.89134056, 0.78446042,
0.8952503 , 0.84624311, 0.79573064, 0.85422345, 0.88918562,
0.75531048, 0.82884413, 0.83369698, 0.77627999, 0.84187759,
0.87986859, 0.86356705, 0.90929237, 0.83017397, 0.86393341,
0.81426374, 0.80991068, 0.86676111, 0.75232448, 0.8021119 ,
0.68794232, 0.69039919, 0.71421068, 0.77667793, 0.82113389,
0.70256397, 0.83293526, 0.69512453, 0.75148262, 0.7407287 ,
0.74124134, 0.77846167, 0.7941425 , 0.81125561, 0.73334183,
0.76452184, 0.71159507, 0.67302729, 0.66175046, 0.84778683,
0.66273563, 0.76777339, 0.71355888, 0.74460445, 0.76623613,
0.63883733, 0.6887326 , 0.74616778, 0.65223179, 0.76358086,
0.68985286, 0.66273563, 0.72437662, 0.77382571, 0.66234322,
0.74757211, 0.62809942, 0.75606851, 0.65375498, 0.65920693,
0.64767863, 0.67683992, 0.63170556, 0.69891621, 0.70708048,
0.64583276, 0.73903135, 0.60068155, 0.66055863, 0.69614341,
0.61515868, 0.63001311, 0.68602529, 0.7014929 , 0.61950453,
0.60049188, 0.6259654 , 0.55819764, 0.65039367, 0.67079534,
0.60552195, 0.64864663, 0.59901689, 0.65517427, 0.55348699,
0.57578738, 0.6253923 , 0.62679547, 0.61274744, 0.5681065 ,
0.6065114 , 0.61170127, 0.60009145, 0.61583989, 0.63889728,
0.66477228, 0.60133457, 0.56484264, 0.5676353 , 0.55359946,
0.59000379, 0.60483562, 0.57305916, 0.57591598, 0.66462928]);
class sdf_test1 (unittest.TestCase):
def setUp(self):
# setup the MC integration
self.system = create_empty(N=l*l, box=data.boxdim(Lx=Lx, Ly=Ly, dimensions=2), particle_types=['A'])
# initialize a simple cubic array of particles
lox = - Lx / 2.0;
loy = - Ly / 2.0;
for p in self.system.particles:
(i, j) = (p.tag % l, p.tag//l % l);
p.position = (lox + i*ax + ax/2, loy + j*ay + ay/2, 0);
self.mc = hpmc.integrate.convex_polygon(seed=10, d=0.1);
self.mc.shape_param.set('A', vertices=[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)]);
if comm.get_rank() == 0:
tmp = tempfile.mkstemp(suffix='.hpmc-test-sdf');
self.tmp_file = tmp[1];
else:
self.tmp_file = "invalid";
def test_sdf(self):
# setup SDF logging
xmax=0.02
dx=1e-4
hpmc.analyze.sdf(mc=self.mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=200, period=10, phase=0)
# run
run(6000);
# read in the output file and check it
if comm.get_rank() == 0:
r = numpy.loadtxt(self.tmp_file);
self.assertEqual(r.shape[0], 3);
self.assertEqual(r.shape[1]-1, avg.size);
# skip the first frame in averaging, then check that all values are within 3 error bars of the reference avg
# this seems sufficient to get good test results even with different seeds or GPU runs
v = numpy.mean(r[1:, 1:], axis=0);
invalid = numpy.abs(avg - v) > (8*err);
self.assertEqual(numpy.sum(invalid), 0);
def tearDown(self):
del self.mc
del self.system
context.initialize();
if comm.get_rank() == 0:
os.remove(self.tmp_file);
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| [
"numpy.mean",
"numpy.abs",
"hoomd.hpmc.integrate.convex_polygon",
"math.sqrt",
"hoomd.hpmc.analyze.sdf",
"numpy.array",
"numpy.sum",
"unittest.main",
"numpy.loadtxt",
"tempfile.mkstemp",
"os.remove"
] | [((688, 709), 'math.sqrt', 'math.sqrt', (['(A / aspect)'], {}), '(A / aspect)\n', (697, 709), False, 'import math\n'), ((762, 3537), 'numpy.array', 'numpy.array', (['[55.20126953, 54.89853516, 54.77910156, 54.56660156, 54.22255859, \n 53.83935547, 53.77617188, 53.42109375, 53.05546875, 52.86376953, \n 52.65576172, 52.21240234, 52.07402344, 51.88974609, 51.69990234, \n 51.32099609, 51.09775391, 51.06533203, 50.61923828, 50.35566406, \n 50.07197266, 49.92275391, 49.51914062, 49.39013672, 49.17597656, \n 48.91982422, 48.64580078, 48.30712891, 48.12207031, 47.815625, \n 47.57744141, 47.37099609, 47.14765625, 46.92382812, 46.6984375, \n 46.66943359, 46.18203125, 45.95615234, 45.66650391, 45.52714844, \n 45.39951172, 45.04599609, 44.90908203, 44.62197266, 44.37460937, \n 44.02998047, 43.84306641, 43.53310547, 43.55, 43.29589844, 43.06054688,\n 42.85097656, 42.58837891, 42.39326172, 42.21152344, 41.91777344, \n 41.71054687, 41.68232422, 41.42177734, 41.08085938, 40.91435547, \n 40.76123047, 40.45380859, 40.178125, 40.14853516, 39.81972656, \n 39.60585938, 39.44169922, 39.34179688, 39.09541016, 38.78105469, \n 38.60087891, 38.56572266, 38.27158203, 38.02011719, 37.865625, \n 37.77851562, 37.51113281, 37.25615234, 37.23857422, 36.91757812, \n 36.68486328, 36.57675781, 36.39140625, 36.06240234, 36.01962891, \n 35.8375, 35.51914062, 35.3640625, 35.29042969, 34.86337891, 34.72460938,\n 34.73964844, 34.57871094, 34.32685547, 34.02607422, 33.78271484, \n 33.82548828, 33.53808594, 33.40341797, 33.17861328, 33.05439453, \n 32.80361328, 32.55478516, 32.53759766, 32.28447266, 32.26513672, \n 32.05732422, 31.82294922, 31.83535156, 31.56376953, 31.46337891, \n 31.27431641, 30.88310547, 30.85107422, 30.63320313, 30.57822266, \n 30.28886719, 30.28183594, 30.05927734, 29.98896484, 29.690625, \n 29.51816406, 29.40742188, 29.2328125, 29.19853516, 28.94599609, \n 28.80449219, 28.47480469, 28.48476563, 28.31738281, 28.21455078, \n 28.00878906, 27.90458984, 27.84970703, 27.54052734, 27.43818359, \n 27.31064453, 27.12773437, 26.91464844, 26.84511719, 26.78701172, \n 26.53603516, 26.39853516, 26.13779297, 26.16269531, 25.92138672, \n 25.80244141, 25.75234375, 25.49384766, 25.37197266, 25.26962891, \n 25.14287109, 24.87558594, 24.778125, 24.68320312, 24.65957031, \n 24.44404297, 24.31621094, 24.203125, 24.12402344, 23.89628906, \n 23.76621094, 23.56923828, 23.38095703, 23.32724609, 23.25498047, \n 23.09697266, 23.04716797, 22.90712891, 22.68662109, 22.59970703, \n 22.54824219, 22.53632813, 22.29267578, 22.08613281, 21.98398437, \n 21.89169922, 21.74550781, 21.75878906, 21.45625, 21.37529297, \n 21.1890625, 21.18417969, 21.0671875, 20.95087891, 20.81650391, \n 20.60390625, 20.66953125, 20.4640625, 20.47021484, 20.12988281, \n 20.17099609, 20.05224609, 19.89619141, 19.80859375, 19.72558594, \n 19.64990234, 19.43525391, 19.38203125]'], {}), '([55.20126953, 54.89853516, 54.77910156, 54.56660156, \n 54.22255859, 53.83935547, 53.77617188, 53.42109375, 53.05546875, \n 52.86376953, 52.65576172, 52.21240234, 52.07402344, 51.88974609, \n 51.69990234, 51.32099609, 51.09775391, 51.06533203, 50.61923828, \n 50.35566406, 50.07197266, 49.92275391, 49.51914062, 49.39013672, \n 49.17597656, 48.91982422, 48.64580078, 48.30712891, 48.12207031, \n 47.815625, 47.57744141, 47.37099609, 47.14765625, 46.92382812, \n 46.6984375, 46.66943359, 46.18203125, 45.95615234, 45.66650391, \n 45.52714844, 45.39951172, 45.04599609, 44.90908203, 44.62197266, \n 44.37460937, 44.02998047, 43.84306641, 43.53310547, 43.55, 43.29589844,\n 43.06054688, 42.85097656, 42.58837891, 42.39326172, 42.21152344, \n 41.91777344, 41.71054687, 41.68232422, 41.42177734, 41.08085938, \n 40.91435547, 40.76123047, 40.45380859, 40.178125, 40.14853516, \n 39.81972656, 39.60585938, 39.44169922, 39.34179688, 39.09541016, \n 38.78105469, 38.60087891, 38.56572266, 38.27158203, 38.02011719, \n 37.865625, 37.77851562, 37.51113281, 37.25615234, 37.23857422, \n 36.91757812, 36.68486328, 36.57675781, 36.39140625, 36.06240234, \n 36.01962891, 35.8375, 35.51914062, 35.3640625, 35.29042969, 34.86337891,\n 34.72460938, 34.73964844, 34.57871094, 34.32685547, 34.02607422, \n 33.78271484, 33.82548828, 33.53808594, 33.40341797, 33.17861328, \n 33.05439453, 32.80361328, 32.55478516, 32.53759766, 32.28447266, \n 32.26513672, 32.05732422, 31.82294922, 31.83535156, 31.56376953, \n 31.46337891, 31.27431641, 30.88310547, 30.85107422, 30.63320313, \n 30.57822266, 30.28886719, 30.28183594, 30.05927734, 29.98896484, \n 29.690625, 29.51816406, 29.40742188, 29.2328125, 29.19853516, \n 28.94599609, 28.80449219, 28.47480469, 28.48476563, 28.31738281, \n 28.21455078, 28.00878906, 27.90458984, 27.84970703, 27.54052734, \n 27.43818359, 27.31064453, 27.12773437, 26.91464844, 26.84511719, \n 26.78701172, 26.53603516, 26.39853516, 26.13779297, 26.16269531, \n 25.92138672, 25.80244141, 25.75234375, 25.49384766, 25.37197266, \n 25.26962891, 25.14287109, 24.87558594, 24.778125, 24.68320312, \n 24.65957031, 24.44404297, 24.31621094, 24.203125, 24.12402344, \n 23.89628906, 23.76621094, 23.56923828, 23.38095703, 23.32724609, \n 23.25498047, 23.09697266, 23.04716797, 22.90712891, 22.68662109, \n 22.59970703, 22.54824219, 22.53632813, 22.29267578, 22.08613281, \n 21.98398437, 21.89169922, 21.74550781, 21.75878906, 21.45625, \n 21.37529297, 21.1890625, 21.18417969, 21.0671875, 20.95087891, \n 20.81650391, 20.60390625, 20.66953125, 20.4640625, 20.47021484, \n 20.12988281, 20.17099609, 20.05224609, 19.89619141, 19.80859375, \n 19.72558594, 19.64990234, 19.43525391, 19.38203125])\n', (773, 3537), False, 'import numpy\n'), ((3927, 6467), 'numpy.array', 'numpy.array', (['[1.21368492, 1.07520243, 1.22496485, 1.07203861, 1.31918198, 1.15482965, \n 1.11606943, 1.12342247, 1.1214123, 1.2033176, 1.14923442, 1.11741796, \n 1.08633901, 1.10809585, 1.13268611, 1.17159683, 1.12298656, 1.27754418,\n 1.09430177, 1.08989947, 1.051715, 1.13990382, 1.16086636, 1.19538929, \n 1.09450355, 1.10057404, 0.98204849, 1.02542969, 1.10736805, 1.18062055,\n 1.12365972, 1.12265463, 1.06131492, 1.15169701, 1.13772836, 1.03968987,\n 1.04348243, 1.00617502, 1.02450203, 1.08293272, 1.02187476, 1.00072731,\n 1.0267637, 1.08289546, 1.03696814, 1.01035732, 1.05730499, 1.07088231, \n 1.00528653, 0.9195167, 0.99235353, 1.00839744, 0.98700882, 0.87196929, \n 1.00124084, 0.96481759, 0.9412312, 1.04691734, 0.92419062, 0.89478269, \n 0.85106599, 1.0143535, 1.07011876, 0.88196475, 0.8708013, 0.91838154, \n 0.9309356, 0.97521482, 0.94277816, 0.86336248, 0.8845162, 1.00421706, \n 0.87940419, 0.85516477, 0.86071935, 0.96725404, 0.87175829, 0.86386878,\n 0.96833751, 0.87554994, 0.8449041, 0.77404494, 0.92879454, 0.95780868, \n 0.84341047, 0.88067771, 0.83393048, 0.94414754, 0.94671484, 0.84554255,\n 0.8906436, 0.84538732, 0.78517686, 0.89134056, 0.78446042, 0.8952503, \n 0.84624311, 0.79573064, 0.85422345, 0.88918562, 0.75531048, 0.82884413,\n 0.83369698, 0.77627999, 0.84187759, 0.87986859, 0.86356705, 0.90929237,\n 0.83017397, 0.86393341, 0.81426374, 0.80991068, 0.86676111, 0.75232448,\n 0.8021119, 0.68794232, 0.69039919, 0.71421068, 0.77667793, 0.82113389, \n 0.70256397, 0.83293526, 0.69512453, 0.75148262, 0.7407287, 0.74124134, \n 0.77846167, 0.7941425, 0.81125561, 0.73334183, 0.76452184, 0.71159507, \n 0.67302729, 0.66175046, 0.84778683, 0.66273563, 0.76777339, 0.71355888,\n 0.74460445, 0.76623613, 0.63883733, 0.6887326, 0.74616778, 0.65223179, \n 0.76358086, 0.68985286, 0.66273563, 0.72437662, 0.77382571, 0.66234322,\n 0.74757211, 0.62809942, 0.75606851, 0.65375498, 0.65920693, 0.64767863,\n 0.67683992, 0.63170556, 0.69891621, 0.70708048, 0.64583276, 0.73903135,\n 0.60068155, 0.66055863, 0.69614341, 0.61515868, 0.63001311, 0.68602529,\n 0.7014929, 0.61950453, 0.60049188, 0.6259654, 0.55819764, 0.65039367, \n 0.67079534, 0.60552195, 0.64864663, 0.59901689, 0.65517427, 0.55348699,\n 0.57578738, 0.6253923, 0.62679547, 0.61274744, 0.5681065, 0.6065114, \n 0.61170127, 0.60009145, 0.61583989, 0.63889728, 0.66477228, 0.60133457,\n 0.56484264, 0.5676353, 0.55359946, 0.59000379, 0.60483562, 0.57305916, \n 0.57591598, 0.66462928]'], {}), '([1.21368492, 1.07520243, 1.22496485, 1.07203861, 1.31918198, \n 1.15482965, 1.11606943, 1.12342247, 1.1214123, 1.2033176, 1.14923442, \n 1.11741796, 1.08633901, 1.10809585, 1.13268611, 1.17159683, 1.12298656,\n 1.27754418, 1.09430177, 1.08989947, 1.051715, 1.13990382, 1.16086636, \n 1.19538929, 1.09450355, 1.10057404, 0.98204849, 1.02542969, 1.10736805,\n 1.18062055, 1.12365972, 1.12265463, 1.06131492, 1.15169701, 1.13772836,\n 1.03968987, 1.04348243, 1.00617502, 1.02450203, 1.08293272, 1.02187476,\n 1.00072731, 1.0267637, 1.08289546, 1.03696814, 1.01035732, 1.05730499, \n 1.07088231, 1.00528653, 0.9195167, 0.99235353, 1.00839744, 0.98700882, \n 0.87196929, 1.00124084, 0.96481759, 0.9412312, 1.04691734, 0.92419062, \n 0.89478269, 0.85106599, 1.0143535, 1.07011876, 0.88196475, 0.8708013, \n 0.91838154, 0.9309356, 0.97521482, 0.94277816, 0.86336248, 0.8845162, \n 1.00421706, 0.87940419, 0.85516477, 0.86071935, 0.96725404, 0.87175829,\n 0.86386878, 0.96833751, 0.87554994, 0.8449041, 0.77404494, 0.92879454, \n 0.95780868, 0.84341047, 0.88067771, 0.83393048, 0.94414754, 0.94671484,\n 0.84554255, 0.8906436, 0.84538732, 0.78517686, 0.89134056, 0.78446042, \n 0.8952503, 0.84624311, 0.79573064, 0.85422345, 0.88918562, 0.75531048, \n 0.82884413, 0.83369698, 0.77627999, 0.84187759, 0.87986859, 0.86356705,\n 0.90929237, 0.83017397, 0.86393341, 0.81426374, 0.80991068, 0.86676111,\n 0.75232448, 0.8021119, 0.68794232, 0.69039919, 0.71421068, 0.77667793, \n 0.82113389, 0.70256397, 0.83293526, 0.69512453, 0.75148262, 0.7407287, \n 0.74124134, 0.77846167, 0.7941425, 0.81125561, 0.73334183, 0.76452184, \n 0.71159507, 0.67302729, 0.66175046, 0.84778683, 0.66273563, 0.76777339,\n 0.71355888, 0.74460445, 0.76623613, 0.63883733, 0.6887326, 0.74616778, \n 0.65223179, 0.76358086, 0.68985286, 0.66273563, 0.72437662, 0.77382571,\n 0.66234322, 0.74757211, 0.62809942, 0.75606851, 0.65375498, 0.65920693,\n 0.64767863, 0.67683992, 0.63170556, 0.69891621, 0.70708048, 0.64583276,\n 0.73903135, 0.60068155, 0.66055863, 0.69614341, 0.61515868, 0.63001311,\n 0.68602529, 0.7014929, 0.61950453, 0.60049188, 0.6259654, 0.55819764, \n 0.65039367, 0.67079534, 0.60552195, 0.64864663, 0.59901689, 0.65517427,\n 0.55348699, 0.57578738, 0.6253923, 0.62679547, 0.61274744, 0.5681065, \n 0.6065114, 0.61170127, 0.60009145, 0.61583989, 0.63889728, 0.66477228, \n 0.60133457, 0.56484264, 0.5676353, 0.55359946, 0.59000379, 0.60483562, \n 0.57305916, 0.57591598, 0.66462928])\n', (3938, 6467), False, 'import numpy\n'), ((8665, 8702), 'unittest.main', 'unittest.main', ([], {'argv': "['test.py', '-v']"}), "(argv=['test.py', '-v'])\n", (8678, 8702), False, 'import unittest\n'), ((7301, 7346), 'hoomd.hpmc.integrate.convex_polygon', 'hpmc.integrate.convex_polygon', ([], {'seed': '(10)', 'd': '(0.1)'}), '(seed=10, d=0.1)\n', (7330, 7346), False, 'from hoomd import hpmc\n'), ((7728, 7833), 'hoomd.hpmc.analyze.sdf', 'hpmc.analyze.sdf', ([], {'mc': 'self.mc', 'filename': 'self.tmp_file', 'xmax': 'xmax', 'dx': 'dx', 'navg': '(200)', 'period': '(10)', 'phase': '(0)'}), '(mc=self.mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg\n =200, period=10, phase=0)\n', (7744, 7833), False, 'from hoomd import hpmc\n'), ((7501, 7542), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".hpmc-test-sdf"""'}), "(suffix='.hpmc-test-sdf')\n", (7517, 7542), False, 'import tempfile\n'), ((7960, 7988), 'numpy.loadtxt', 'numpy.loadtxt', (['self.tmp_file'], {}), '(self.tmp_file)\n', (7973, 7988), False, 'import numpy\n'), ((8326, 8355), 'numpy.mean', 'numpy.mean', (['r[1:, 1:]'], {'axis': '(0)'}), '(r[1:, 1:], axis=0)\n', (8336, 8355), False, 'import numpy\n'), ((8607, 8631), 'os.remove', 'os.remove', (['self.tmp_file'], {}), '(self.tmp_file)\n', (8616, 8631), False, 'import os\n'), ((8379, 8397), 'numpy.abs', 'numpy.abs', (['(avg - v)'], {}), '(avg - v)\n', (8388, 8397), False, 'import numpy\n'), ((8438, 8456), 'numpy.sum', 'numpy.sum', (['invalid'], {}), '(invalid)\n', (8447, 8456), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 22:43:17 2020
@author: Sam
"""
#impoty librarys
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit as cv
#Import the data from the Text File
#This is inefficiant, i dont care
data10k = np.genfromtxt("ACresonance10kNew.txt",skip_header =3,delimiter=",")
frequency=data10k[:, 0]
rlV_out10k=data10k[:,1]
imV_out10k=data10k[:,2]
data1k = np.genfromtxt("ACresonance1kNew.txt",skip_header =3,delimiter=",")
rlV_out1k=data1k[:,1]
imV_out1k=data1k[:,2]
data100 = np.genfromtxt("ACresonance100New.txt",skip_header =3,delimiter=",")
rlV_out100=data100[:,1]
imV_out100=data100[:,2]
data10 = np.genfromtxt("ACresonance10New.txt",skip_header =3,delimiter=",")
rlV_out10=data10[:,1]
imV_out10=data10[:,2]
data2 = np.genfromtxt("ACresonance2New.txt",skip_header =3,delimiter=",")
rlV_out2=data2[:,1]
imV_out2=data2[:,2]
"""
plt.figure(figsize=(20, 10))
plt.xscale("log")
plt.xlabel("frequency / Hz")
plt.ylabel("Voltage / V")
plt.plot(frequency,rlV_out10k, "g", label = "V_out_real for 10k Ohms for the resistors")
plt.plot(frequency,rlV_out1k, "b", label = "V_out_real for 1k Ohms for the resistors")
plt.plot(frequency,rlV_out100, "pink", label = "V_out_real for 100 Ohms for the resistors")
plt.plot(frequency,rlV_out10, "r", label = "V_out_real for 10 Ohms for the resistors")
plt.plot(frequency,rlV_out2, "black", label = "V_out_real for 2 Ohms for the resistors")
plt.title("AC Resonance graph")
plt.legend()
"""
x = lambda x, y : (x**2 + y**2)**0.5
V_out10k= []
for i in range (len(rlV_out10k)):
V_out10k.append(x(rlV_out10k[i], imV_out10k[i]))
V_out1k= []
for i in range (len(rlV_out1k)):
V_out1k.append(x(rlV_out1k[i], imV_out1k[i]))
V_out100= []
for i in range (len(rlV_out100)):
V_out100.append(x(rlV_out100[i], imV_out100[i]))
V_out10= []
for i in range (len(rlV_out10)):
V_out10.append(x(rlV_out10[i], imV_out10[i]))
V_out2= []
for i in range (len(rlV_out2)):
V_out2.append(x(rlV_out2[i], imV_out2[i]))
plt.figure(figsize=(20, 10))
plt.xscale("log")
plt.xlabel("frequency / Hz")
plt.ylabel("Voltage / V")
plt.plot(frequency,V_out10k, "g", label = "V_out for 10k Ohms for the resistors (Magnitude)")
plt.plot(frequency,V_out1k, "b", label = "V_out for 1k Ohms for the resistors (Magnitude)")
plt.plot(frequency,V_out100, "r", label = "V_out for 100 Ohms for the resistors (Magnitude)")
plt.plot(frequency,V_out10, "orange", label = "V_out for 10 Ohms for the resistors (Magnitude)")
plt.plot(frequency,V_out2, "black", label = "V_out for 2 Ohms for the resistors (Magnitude)")
plt.title("AC Resonance graph (Magnitudes)")
plt.legend()
def freqAtHalfAmp (V, f):
for i in range (len(V)):
if V[i] > 0.244 and V[i] < 0.256:
print(f[i])
def resonantFreq (V, f):
for i in range (len(V)):
if V[i] > 0.49 and V[i] < 0.51:
print(f[i])
"""
freqAtHalfAmp(V_out1k, frequency)
freqAtHalfAmp (V_out100, frequency)
freqAtHalfAmp (V_out10, frequency)
freqAtHalfAmp (V_out2, frequency)
"""
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.genfromtxt",
"matplotlib.pyplot.xscale"
] | [((291, 359), 'numpy.genfromtxt', 'np.genfromtxt', (['"""ACresonance10kNew.txt"""'], {'skip_header': '(3)', 'delimiter': '""","""'}), "('ACresonance10kNew.txt', skip_header=3, delimiter=',')\n", (304, 359), True, 'import numpy as np\n'), ((448, 515), 'numpy.genfromtxt', 'np.genfromtxt', (['"""ACresonance1kNew.txt"""'], {'skip_header': '(3)', 'delimiter': '""","""'}), "('ACresonance1kNew.txt', skip_header=3, delimiter=',')\n", (461, 515), True, 'import numpy as np\n'), ((574, 642), 'numpy.genfromtxt', 'np.genfromtxt', (['"""ACresonance100New.txt"""'], {'skip_header': '(3)', 'delimiter': '""","""'}), "('ACresonance100New.txt', skip_header=3, delimiter=',')\n", (587, 642), True, 'import numpy as np\n'), ((704, 771), 'numpy.genfromtxt', 'np.genfromtxt', (['"""ACresonance10New.txt"""'], {'skip_header': '(3)', 'delimiter': '""","""'}), "('ACresonance10New.txt', skip_header=3, delimiter=',')\n", (717, 771), True, 'import numpy as np\n'), ((828, 894), 'numpy.genfromtxt', 'np.genfromtxt', (['"""ACresonance2New.txt"""'], {'skip_header': '(3)', 'delimiter': '""","""'}), "('ACresonance2New.txt', skip_header=3, delimiter=',')\n", (841, 894), True, 'import numpy as np\n'), ((2161, 2189), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2171, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2213), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2206, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2215, 2243), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency / Hz"""'], {}), "('frequency / Hz')\n", (2225, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2245, 2270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Voltage / V"""'], {}), "('Voltage / V')\n", (2255, 2270), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2369), 'matplotlib.pyplot.plot', 'plt.plot', (['frequency', 'V_out10k', '"""g"""'], {'label': '"""V_out for 10k Ohms for the resistors (Magnitude)"""'}), "(frequency, V_out10k, 'g', label=\n 'V_out for 10k Ohms for the resistors (Magnitude)')\n", (2280, 2369), True, 'import matplotlib.pyplot as plt\n'), ((2368, 2463), 'matplotlib.pyplot.plot', 'plt.plot', (['frequency', 'V_out1k', '"""b"""'], {'label': '"""V_out for 1k Ohms for the resistors (Magnitude)"""'}), "(frequency, V_out1k, 'b', label=\n 'V_out for 1k Ohms for the resistors (Magnitude)')\n", (2376, 2463), True, 'import matplotlib.pyplot as plt\n'), ((2461, 2558), 'matplotlib.pyplot.plot', 'plt.plot', (['frequency', 'V_out100', '"""r"""'], {'label': '"""V_out for 100 Ohms for the resistors (Magnitude)"""'}), "(frequency, V_out100, 'r', label=\n 'V_out for 100 Ohms for the resistors (Magnitude)')\n", (2469, 2558), True, 'import matplotlib.pyplot as plt\n'), ((2556, 2656), 'matplotlib.pyplot.plot', 'plt.plot', (['frequency', 'V_out10', '"""orange"""'], {'label': '"""V_out for 10 Ohms for the resistors (Magnitude)"""'}), "(frequency, V_out10, 'orange', label=\n 'V_out for 10 Ohms for the resistors (Magnitude)')\n", (2564, 2656), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2751), 'matplotlib.pyplot.plot', 'plt.plot', (['frequency', 'V_out2', '"""black"""'], {'label': '"""V_out for 2 Ohms for the resistors (Magnitude)"""'}), "(frequency, V_out2, 'black', label=\n 'V_out for 2 Ohms for the resistors (Magnitude)')\n", (2662, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2749, 2793), 'matplotlib.pyplot.title', 'plt.title', (['"""AC Resonance graph (Magnitudes)"""'], {}), "('AC Resonance graph (Magnitudes)')\n", (2758, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2807), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2805, 2807), True, 'import matplotlib.pyplot as plt\n')] |
import itertools
import numpy as np
import matplotlib as mlp
import matplotlib.pyplot as plt
import pandas as pd
import scipy
from sklearn import cluster
from sklearn import datasets
from sklearn import metrics
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition
from time import time
##Pre-process everything and set up PCA model
faces = pd.read_csv('faces.csv', header = None)
n_samples, n_features = faces.shape
faces_centered = faces - faces.mean(axis=0)
faces_centered -= faces_centered.mean(axis=1).values.reshape(n_samples, -1)
n_features = H*W
mean_image = faces_data.mean(axis=0)
faces_data_centered = faces_data - faces_data.mean(axis=0)
faces_data_centered -= faces_data_centered.mean(axis=1).values.reshape(n_samples, -1)
def plot_gallery(title, images, n_col=3, n_row=2, cmap=plt.cm.gray):
plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(
comp.reshape(image_shape),
cmap=cmap,
interpolation="nearest",
vmin=-vmax,
vmax=vmax,
)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.0)
pca = decomposition.PCA()
pca.fit(faces_centered)
##Define graphing method and plot faces in selected range
plt.figure(figsize=(16, 16));
for ii in range(16):
plt.subplot(4, 4, ii + 1) # It starts with one
plt.imshow(pca.components_[ii].reshape(64, 64), cmap=plt.cm.gray)
plt.grid(False);
plt.xticks([]);
plt.yticks([]);
##Investigate "explained variance ratio over component"
with plt.style.context('fivethirtyeight'):
plt.figure(figsize=(10, 10));
plt.title('Explained Variance Ratio over Component');
plt.plot(pca.explained_variance_ratio_);
with plt.style.context('fivethirtyeight'):
plt.figure(figsize=(16, 12));
plt.title('Cumulative Explained Variance over EigenFace');
plt.plot(pca.explained_variance_ratio_.cumsum());
##Actual PCA applied on faces data and result graphed
n_row, n_col = 1, 15
n_components = n_row * n_col
imsize = (64, 64)
faces= pd.read_csv('faces.csv', header = None)
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).values.reshape(n_samples, -1)
estimator = decomposition.PCA(n_components=n_components, svd_solver="randomized", whiten=True)
data = faces_centered
estimator.fit(data)
for i, comp in enumerate(estimator.components_[:n_components]):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(
comp.reshape(imsize),
cmap=plt.cm.gray,
interpolation="nearest",
vmin=-vmax,
vmax=vmax,
)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.0)
plt.show()
##PCA variance ratio explained, investigate "explained variance ratio over component"
with plt.style.context('fivethirtyeight'):
plt.figure(figsize=(10, 10));
plt.title('Explained Variance Ratio over Component');
plt.plot(estimator.explained_variance_ratio_);
##Projection by taking the first n number of principal components as a recognition technique
x0 = faces[:1]
loadings = pd.DataFrame(estimator.components_)
proj=[]
for i in range(1, 15):
n_row, n_col = 1, i
n_components = n_row * n_col
estimator = decomposition.PCA(n_components=n_components, svd_solver="randomized", whiten=True)
data = faces_centered
estimator.fit(data)
loadings = pd.DataFrame(estimator.components_.T)
P = np.dot(loadings, loadings.T)
proj.append(np.matmul(x0, P))
for i in range(1, len(proj)):
plt.figure(figsize=(16, 16));
p = proj[i].values.reshape(imsize)
plt.figure
plt.subplot(1, len(proj), len(proj))
plt.imshow(p, interpolation='nearest')
plt.xticks()
plt.yticks()
plt.show() | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.context",
"matplotlib.pyplot.yticks",
"numpy.dot",
"numpy.matmul",
"pandas.DataFrame"... | [((419, 456), 'pandas.read_csv', 'pd.read_csv', (['"""faces.csv"""'], {'header': 'None'}), "('faces.csv', header=None)\n", (430, 456), True, 'import pandas as pd\n'), ((1389, 1408), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {}), '()\n', (1406, 1408), False, 'from sklearn import decomposition\n'), ((1493, 1521), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (1503, 1521), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2326), 'pandas.read_csv', 'pd.read_csv', (['"""faces.csv"""'], {'header': 'None'}), "('faces.csv', header=None)\n", (2300, 2326), True, 'import pandas as pd\n'), ((2536, 2622), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'n_components', 'svd_solver': '"""randomized"""', 'whiten': '(True)'}), "(n_components=n_components, svd_solver='randomized',\n whiten=True)\n", (2553, 2622), False, 'from sklearn import decomposition\n'), ((3034, 3088), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', (['(0.01)', '(0.05)', '(0.99)', '(0.93)', '(0.04)', '(0.0)'], {}), '(0.01, 0.05, 0.99, 0.93, 0.04, 0.0)\n', (3053, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3099), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3097, 3099), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3529), 'pandas.DataFrame', 'pd.DataFrame', (['estimator.components_'], {}), '(estimator.components_)\n', (3506, 3529), True, 'import pandas as pd\n'), ((892, 939), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.0 * n_col, 2.26 * n_row)'}), '(figsize=(2.0 * n_col, 2.26 * n_row))\n', (902, 939), True, 'import matplotlib.pyplot as plt\n'), ((944, 972), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'size': '(16)'}), '(title, size=16)\n', (956, 972), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1376), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', (['(0.01)', '(0.05)', '(0.99)', '(0.93)', '(0.04)', '(0.0)'], {}), '(0.01, 0.05, 0.99, 0.93, 0.04, 0.0)\n', (1341, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', '(ii + 1)'], {}), '(4, 4, ii + 1)\n', (1559, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1684), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1677, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1690, 1704), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1700, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1724), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1720, 1724), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1825), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (1806, 1825), True, 'import matplotlib.pyplot as plt\n'), ((1831, 1859), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1841, 1859), True, 'import matplotlib.pyplot as plt\n'), ((1865, 1917), 'matplotlib.pyplot.title', 'plt.title', (['"""Explained Variance Ratio over Component"""'], {}), "('Explained Variance Ratio over Component')\n", (1874, 1917), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1962), 'matplotlib.pyplot.plot', 'plt.plot', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (1931, 1962), True, 'import matplotlib.pyplot as plt\n'), ((1969, 2005), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (1986, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2039), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (2021, 2039), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2102), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Explained Variance over EigenFace"""'], {}), "('Cumulative Explained Variance over EigenFace')\n", (2054, 2102), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2765), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', 'n_col', '(i + 1)'], {}), '(n_row, n_col, i + 1)\n', (2744, 2765), True, 'import matplotlib.pyplot as plt\n'), ((2996, 3010), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (3006, 3010), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3033), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (3029, 3033), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3228), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (3209, 3228), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3262), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3244, 3262), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3320), 'matplotlib.pyplot.title', 'plt.title', (['"""Explained Variance Ratio over Component"""'], {}), "('Explained Variance Ratio over Component')\n", (3277, 3320), True, 'import matplotlib.pyplot as plt\n'), ((3326, 3371), 'matplotlib.pyplot.plot', 'plt.plot', (['estimator.explained_variance_ratio_'], {}), '(estimator.explained_variance_ratio_)\n', (3334, 3371), True, 'import matplotlib.pyplot as plt\n'), ((3634, 3720), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'n_components', 'svd_solver': '"""randomized"""', 'whiten': '(True)'}), "(n_components=n_components, svd_solver='randomized',\n whiten=True)\n", (3651, 3720), False, 'from sklearn import decomposition\n'), ((3782, 3819), 'pandas.DataFrame', 'pd.DataFrame', (['estimator.components_.T'], {}), '(estimator.components_.T)\n', (3794, 3819), True, 'import pandas as pd\n'), ((3828, 3856), 'numpy.dot', 'np.dot', (['loadings', 'loadings.T'], {}), '(loadings, loadings.T)\n', (3834, 3856), True, 'import numpy as np\n'), ((3925, 3953), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (3935, 3953), True, 'import matplotlib.pyplot as plt\n'), ((4054, 4092), 'matplotlib.pyplot.imshow', 'plt.imshow', (['p'], {'interpolation': '"""nearest"""'}), "(p, interpolation='nearest')\n", (4064, 4092), True, 'import matplotlib.pyplot as plt\n'), ((4097, 4109), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (4107, 4109), True, 'import matplotlib.pyplot as plt\n'), ((4114, 4126), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (4124, 4126), True, 'import matplotlib.pyplot as plt\n'), ((4131, 4141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4139, 4141), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1051), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', 'n_col', '(i + 1)'], {}), '(n_row, n_col, i + 1)\n', (1030, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1280, 1294), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (1290, 1294), True, 'import matplotlib.pyplot as plt\n'), ((1303, 1317), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (1313, 1317), True, 'import matplotlib.pyplot as plt\n'), ((3873, 3889), 'numpy.matmul', 'np.matmul', (['x0', 'P'], {}), '(x0, P)\n', (3882, 3889), True, 'import numpy as np\n')] |
#
# Copyright (c) 2019. <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
This class uses MiniMax and Random strategy randomly to generate moves. Training against this player may
help fixing playing same board state repeatedly.
"""
import numpy as np
from Board import Board
from PlayerBase import PlayerBase
from MinMaxPlayer import MinMaxPlayer
class PseudoRandomPlayer(PlayerBase):
def __init__(self):
self.move_history = []
self.min_max_player = MinMaxPlayer()
super().__init__()
def make_move(self, board: Board) -> int:
randomizer = np.random.uniform(0.1, 0.9, 1)
if randomizer >= 0.4:
# use MiniMax strategy most of the time but randomize sometimes just like humans make mistake sometimes
return self.min_max_player.find_best_move(board)[0]
else:
empty_cells = board.get_empty_cells_1d()
while True:
move = np.random.choice(empty_cells)
if board.is_move_valid(move):
self.move_history.append(move)
return move
def match_over(self, match_result):
pass
def next_match(self):
self.move_history = []
def save_data(self):
pass
def load_data(self):
pass
| [
"numpy.random.choice",
"MinMaxPlayer.MinMaxPlayer",
"numpy.random.uniform"
] | [((676, 690), 'MinMaxPlayer.MinMaxPlayer', 'MinMaxPlayer', ([], {}), '()\n', (688, 690), False, 'from MinMaxPlayer import MinMaxPlayer\n'), ((786, 816), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(0.9)', '(1)'], {}), '(0.1, 0.9, 1)\n', (803, 816), True, 'import numpy as np\n'), ((1141, 1170), 'numpy.random.choice', 'np.random.choice', (['empty_cells'], {}), '(empty_cells)\n', (1157, 1170), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
import uncertainties as unc
from uncertainties import unumpy
R, dr, Iq, dIq, Vg, dVg, vd, dvd = np.genfromtxt("dati.txt", unpack=True, skip_header=1)
Iq = unumpy.uarray(Iq, dIq)*0.001
Vg = unumpy.uarray(Vg, dVg)
vd = unumpy.uarray(vd, dvd)*0.001
Ra = unc.ufloat(6.73e3, 0.06e3)
Rb = unc.ufloat(0.807e3, 0.07e3)
C = unc.ufloat(10e-6, 1e-6)
nVt = 52e-3
Rth = ((Ra*Rb)/(Ra+Rb))
Vth = (Rb/(Ra+Rb))*Vg
id = (Vth-vd)/Rth
rd_att = nVt/Iq
rd1 = vd/id
rd2 = (vd*Ra*Rb) / (Rb*Vg - vd*(Ra+Rb))
print(id) | [
"uncertainties.ufloat",
"numpy.genfromtxt",
"uncertainties.unumpy.uarray"
] | [((170, 223), 'numpy.genfromtxt', 'np.genfromtxt', (['"""dati.txt"""'], {'unpack': '(True)', 'skip_header': '(1)'}), "('dati.txt', unpack=True, skip_header=1)\n", (183, 223), True, 'import numpy as np\n'), ((263, 285), 'uncertainties.unumpy.uarray', 'unumpy.uarray', (['Vg', 'dVg'], {}), '(Vg, dVg)\n', (276, 285), False, 'from uncertainties import unumpy\n'), ((325, 349), 'uncertainties.ufloat', 'unc.ufloat', (['(6730.0)', '(60.0)'], {}), '(6730.0, 60.0)\n', (335, 349), True, 'import uncertainties as unc\n'), ((357, 380), 'uncertainties.ufloat', 'unc.ufloat', (['(807.0)', '(70.0)'], {}), '(807.0, 70.0)\n', (367, 380), True, 'import uncertainties as unc\n'), ((389, 413), 'uncertainties.ufloat', 'unc.ufloat', (['(1e-05)', '(1e-06)'], {}), '(1e-05, 1e-06)\n', (399, 413), True, 'import uncertainties as unc\n'), ((229, 251), 'uncertainties.unumpy.uarray', 'unumpy.uarray', (['Iq', 'dIq'], {}), '(Iq, dIq)\n', (242, 251), False, 'from uncertainties import unumpy\n'), ((291, 313), 'uncertainties.unumpy.uarray', 'unumpy.uarray', (['vd', 'dvd'], {}), '(vd, dvd)\n', (304, 313), False, 'from uncertainties import unumpy\n')] |
import random
import json
import pickle
import numpy as np
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import load_model
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('./intents.json').read())
words = pickle.load(open('words.pkl', 'rb'))
classes = pickle.load(open('classes.pkl', 'rb'))
model = load_model('chatbot_model.model')
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words]
return sentence_words
def bag_of_words(sentence):
setence_words = clean_up_sentence(sentence)
bag = [0] * len(words)
for w in setence_words:
for i, word in enumerate(words):
if word == w:
bag[i] = 1
return np.array(bag)
def predict_class(sentence):
bow = bag_of_words(sentence)
res = model.predict(np.array([bow]))[0]
ERROR_TRESHOLD = 0.25
results = [[i, r] for i, r in enumerate(res) if r > ERROR_TRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': classes[r[0]], 'probability':str(r[1])})
return return_list
| [
"tensorflow.keras.models.load_model",
"numpy.array",
"nltk.stem.WordNetLemmatizer",
"nltk.word_tokenize"
] | [((174, 193), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (191, 193), False, 'from nltk.stem import WordNetLemmatizer\n'), ((349, 382), 'tensorflow.keras.models.load_model', 'load_model', (['"""chatbot_model.model"""'], {}), "('chatbot_model.model')\n", (359, 382), False, 'from tensorflow.keras.models import load_model\n'), ((438, 466), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (456, 466), False, 'import nltk\n'), ((808, 821), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (816, 821), True, 'import numpy as np\n'), ((909, 924), 'numpy.array', 'np.array', (['[bow]'], {}), '([bow])\n', (917, 924), True, 'import numpy as np\n')] |
from time import sleep
import cv2 as cv
import argparse
import sys
import numpy as np
import os.path
from glob import glob
#from PIL import image
frame_count = 0 # used in mainloop where we're extracting images., and then to drawPred( called by post process)
frame_count_out=0 # used in post process loop, to get the no of specified class value.
# Initialize the parameters
confThreshold = 0.5 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
inpWidth = 416 #Width of network's input image
inpHeight = 416 #Height of network's input image
# Load names of classes
classesFile = "/Downloads/Automated-Traffic-Rule-Violation-Detection-System/dev/ML/model/obj.names";
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = "/Downloads/Automated-Traffic-Rule-Violation-Detection-System/dev/ML/model/yolov3-obj.cfg";
modelWeights = "/Downloads/Automated-Traffic-Rule-Violation-Detection-System/dev/ML/model/yolov3-obj_2400.weights";
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
# Get the names of the output layers
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Draw the predicted bounding box
def drawPred(classId, conf, left, top, right, bottom, frame):
global frame_count
# Draw a bounding box.
#cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
label = '%.2f' % conf
# Get the label for the class name and its confidence
if classes:
assert(classId < len(classes))
label = '%s:%s' % (classes[classId], label)
#Display the label at the top of the bounding box
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
#print(label) #testing
#print(labelSize) #testing
#print(baseLine) #testing
label_name,label_conf = label.split(':') #spliting into class & confidance. will compare it with person.
if label_name == 'Helmet':
#will try to print of label have people.. or can put a counter to find the no of people occurance.
#will try if it satisfy the condition otherwise, we won't print the boxes or leave it.
#cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
#cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1)
frame_count+=1
#print(frame_count)
if(frame_count> 0):
return frame_count
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
global frame_count_out
frame_count_out=0
classIds = []
confidences = []
boxes = []
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = [] #have to fins which class have hieghest confidence........=====>>><<<<=======
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
#print(classIds)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
count_person=0 # for counting the classes in this loop.
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
#this function in loop is calling drawPred so, try pushing one test counter in parameter , so it can calculate it.
frame_count_out = drawPred(classIds[i], confidences[i], left, top, left + width, top + height, frame)
#increase test counter till the loop end then print...
#checking class, if it is a person or not
my_class='Helmet' #======================================== mycode .....
unknown_class = classes[classId]
if my_class == unknown_class:
count_person += 1
#if(frame_count_out > 0):
#print(frame_count_out)
if count_person >= 1:
path = 'test_out/'
# frame_name=os.path.basename(fn) # trimm the path and give file name.
#cv.imwrite(str(path)+frame_name, frame) # writing to folder.
#print(type(frame))
#cv.imshow('img',frame)
#cv.waitKey(800)
return 1
else:
return 0
#cv.imwrite(frame_name, frame)
#======================================mycode.........
# Process inputs
winName = 'Deep learning object detection in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
def detect(frame):
#frame = cv.imread(fn)
frame_count =0
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
# Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
#print(t)
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
#print(label)
#cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
#print(label)
k=postprocess(frame, outs)
if k:
return 1
else:
return 0
| [
"cv2.dnn.blobFromImage",
"cv2.dnn.readNetFromDarknet",
"numpy.argmax",
"cv2.dnn.NMSBoxes",
"cv2.getTickFrequency",
"cv2.getTextSize",
"cv2.namedWindow"
] | [((1154, 1213), 'cv2.dnn.readNetFromDarknet', 'cv.dnn.readNetFromDarknet', (['modelConfiguration', 'modelWeights'], {}), '(modelConfiguration, modelWeights)\n', (1179, 1213), True, 'import cv2 as cv\n'), ((5999, 6040), 'cv2.namedWindow', 'cv.namedWindow', (['winName', 'cv.WINDOW_NORMAL'], {}), '(winName, cv.WINDOW_NORMAL)\n', (6013, 6040), True, 'import cv2 as cv\n'), ((2116, 2170), 'cv2.getTextSize', 'cv.getTextSize', (['label', 'cv.FONT_HERSHEY_SIMPLEX', '(0.5)', '(1)'], {}), '(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n', (2130, 2170), True, 'import cv2 as cv\n'), ((4537, 4601), 'cv2.dnn.NMSBoxes', 'cv.dnn.NMSBoxes', (['boxes', 'confidences', 'confThreshold', 'nmsThreshold'], {}), '(boxes, confidences, confThreshold, nmsThreshold)\n', (4552, 4601), True, 'import cv2 as cv\n'), ((6158, 6247), 'cv2.dnn.blobFromImage', 'cv.dnn.blobFromImage', (['frame', '(1 / 255)', '(inpWidth, inpHeight)', '[0, 0, 0]', '(1)'], {'crop': '(False)'}), '(frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1,\n crop=False)\n', (6178, 6247), True, 'import cv2 as cv\n'), ((3799, 3816), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3808, 3816), True, 'import numpy as np\n'), ((6722, 6743), 'cv2.getTickFrequency', 'cv.getTickFrequency', ([], {}), '()\n', (6741, 6743), True, 'import cv2 as cv\n')] |
import pytest
import numpy as np
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from dtaidistance import dtw, dtw_c
import logging
logger = logging.getLogger("be.kuleuven.dtai.distance")
if dtw_c is None:
print('ERROR: dtw_c is not build')
sys.exit(1)
def test_distance1_a():
# dist_opts = {'max_dist': 0.201, 'max_step': 0.011, 'max_length_diff': 8, 'window': 3}
dist_opts = {'window': 3}
s1 = np.array([ 0., 0.01, 0., 0.01, 0., 0., 0., 0.01, 0.01, 0.02, 0., 0.])
s2 = np.array([ 0., 0.02, 0.02, 0., 0., 0.01, 0.01, 0., 0., 0., 0.])
d1 = dtw.distance(s1, s2, **dist_opts)
d2 = dtw_c.distance_nogil(s1, s2, **dist_opts)
assert d1 == d2
assert d1 == pytest.approx(0.02)
def test_distance1_b():
dist_opts = {}
s1 = np.array([ 0., 0.01, 0., 0.01, 0., 0., 0., 0.01, 0.01, 0.02, 0., 0.])
s2 = np.array([ 0., 0.02, 0.02, 0., 0., 0.01, 0.01, 0., 0., 0., 0.])
d1 = dtw.distance(s1, s2, **dist_opts)
d2 = dtw_c.distance_nogil(s1, s2, **dist_opts)
assert d1 == d2
assert d1 == pytest.approx(0.02)
def test_distance2_a():
dist_opts = {'max_dist': 1.1}
s1 = np.array([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])
s2 = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
d1 = dtw.distance(s1, s2, **dist_opts)
d2 = dtw_c.distance_nogil(s1, s2, **dist_opts)
assert d1 == d2
assert d1 == pytest.approx(1.0)
def test_distance2_aa():
dist_opts = {'max_dist': 0.1}
s1 = np.array([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])
s2 = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
d1 = dtw.distance(s1, s2, **dist_opts)
d2 = dtw_c.distance_nogil(s1, s2, **dist_opts)
print(d1, d2)
assert d1 == d2
assert d1 == pytest.approx(np.inf)
def test_distance2_b():
dist_opts = {'max_step': 1.1}
s1 = np.array([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])
s2 = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
d1 = dtw.distance(s1, s2, **dist_opts)
d2 = dtw_c.distance_nogil(s1, s2, **dist_opts)
assert d1 == d2
assert d1 == pytest.approx(1.0)
def test_distance2_bb():
dist_opts = {'max_step': 0.1}
s1 = np.array([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])
s2 = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
d1 = dtw.distance(s1, s2, **dist_opts)
d2 = dtw_c.distance_nogil(s1, s2, **dist_opts)
print(d1, d2)
assert d1 == d2
assert d1 == pytest.approx(np.inf)
def test_distance2_c():
dist_opts = {}
s1 = np.array([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])
s2 = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
d1 = dtw.distance(s1, s2, **dist_opts)
d2 = dtw_c.distance_nogil(s1, s2, **dist_opts)
assert d1 == d2
assert d1 == pytest.approx(1.0)
def test_distance3_a():
dist_opts = {"penalty": 0.005, "max_step": 0.011, "window": 3}
s = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.005, 0.01, 0.015, 0.02, 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
p = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.005, 0.01, 0.015, 0.02, 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
d1 = dtw.distance(s, p, **dist_opts)
d2 = dtw_c.distance_nogil(s, p, **dist_opts)
assert d1 == pytest.approx(d2)
def test_distance4():
try:
import pandas as pd
except ImportError:
# If no pandas, ignore test (not a required dependency)
return
s = [[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0.005, 0.01, 0.015, 0.02, 0.01, 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]
p = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
df = pd.DataFrame(data=s)
s = df.values
for i in range(s.shape[0]):
ss = s[i] # ss will not be C contiguous memory layout
d = dtw_c.distance_nogil(ss, p)
# print(d)
# def test_distance5():
# healthy = np.array([-0.01014404, 0.01240234, -0.00549316, 0.01905518, 0.02086182,
# 0.02155762, 0.02252197, -0.0015625, 0.0194458, -0.00305176,
# 0.01724854, 0.01274414, 0.01470947, 0.01373291, -0.00751953,
# 0.01088867, -0.01018066, 0.01325684, 0.00531006, 0.01184082,
# 0.01030273, -0.00766602, 0.00996094, -0.01044922, 0.00991211,
# 0.00155029, 0.01335449, 0.0135498, -0.00367432, 0.00953369,
# -0.01192627, 0.01107178, -0.00112305, 0.01309814, 0.01253662,
# -0.00327148, 0.00714111, -0.01375732, 0.00942383, -0.00631104,
# 0.015271, 0.01461182, 0.00447998, 0.01408691, -0.00461426,
# 0.01923828, -0.00228271, 0.01993408, 0.0177124, 0.01256104])
#
# faulty = np.array([0.51872559, 0.51743164, 0.51727295, 0.51866455, 0.512146,
# 0.5309082, 0.52078857, 0.52185059, 0.52429199, 0.52486572,
# 0.53078613, 0.50743408, 0.52678223, 0.52731934, 0.52879639,
# 0.53051758, 0.51055908, 0.54437256, 0.5453125, 0.54205322,
# 0.54060059, 0.53500977, 0.54443359, 0.52835693, 0.53216553,
# 0.53133545, 0.53546143, 0.53426514, 0.50535889, 0.53413086,
# 0.53583984, 0.53778076, 0.53405762, 0.51973877, 0.54488525,
# 0.53464355, 0.5338501, 0.53098145, 0.528479, 0.53360596,
# 0.50834961, 0.52283936, 0.52408447, 0.53001709, 0.5282959,
# 0.50821533, 0.5369873, 0.53790283, 0.53980713, 0.53851318])
# d = dtw.distance_fast(healthy, faulty)
# print(d)
# dp, dsp = ssdtw.warping_paths(healthy, faulty, None)
# print(dp)
# d, ds = dtw.warping_paths(healthy, faulty)
# print(d)
# print(ds)
# dtw.plot(healthy, faulty, ds, "/Users/wannes/Desktop/test1.png")
# dtw.plot(healthy, faulty, dsp, "/Users/wannes/Desktop/test2.png")
# np.savetxt("/Users/wannes/Desktop/matrix1.txt", ds)
# np.savetxt("/Users/wannes/Desktop/matrix2.txt", dsp)
# print('healthy', np.mean(healthy), np.std(healthy))
# print('faulty', np.mean(faulty), np.std(faulty))
#
# Conclusion: Constant difference between two series will always have the diagonal as best solution and is thus
# equal to Euclidean distance. This is one of the reasons why normalisation is important for
# clustering.
def test_distance6():
s1 = np.array([0, 0, 1, 2, 1, 0, 1, 0, 0], dtype=np.double)
s2 = np.array([0.0, 1, 2, 0, 0, 0, 0, 0, 0])
d = dtw.distance_fast(s1, s2, window=2)
print(d)
def test_bug1():
"""Failed on Windows if pointer types are different."""
series = [np.array([0, 0, 1, 2, 1, 0, 1, 0, 0], dtype=np.double),
np.array([0.0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0.0, 0, 1, 2, 1, 0, 0, 0])]
ds = dtw.distance_matrix_fast(series)
# print(ds)
def test_bug1_serial():
"""Failed on Windows if pointer types are different."""
series = [np.array([0, 0, 1, 2, 1, 0, 1, 0, 0], dtype=np.double),
np.array([0.0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0.0, 0, 1, 2, 1, 0, 0, 0])]
ds = dtw.distance_matrix_fast(series, parallel=False)
print(ds)
if __name__ == "__main__":
logger.setLevel(logging.WARNING)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
# test_distance2_a()
# test_distance2_b()
# test_distance2_c()
# test_distance3_a()
# test_distance4()
# test_distance6()
test_bug1()
| [
"logging.getLogger",
"dtaidistance.dtw.distance_matrix_fast",
"dtaidistance.dtw.distance",
"logging.StreamHandler",
"pytest.approx",
"dtaidistance.dtw_c.distance_nogil",
"dtaidistance.dtw.distance_fast",
"numpy.array",
"os.path.realpath",
"sys.exit",
"pandas.DataFrame"
] | [((196, 242), 'logging.getLogger', 'logging.getLogger', (['"""be.kuleuven.dtai.distance"""'], {}), "('be.kuleuven.dtai.distance')\n", (213, 242), False, 'import logging\n'), ((306, 317), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (314, 317), False, 'import sys, os\n'), ((474, 549), 'numpy.array', 'np.array', (['[0.0, 0.01, 0.0, 0.01, 0.0, 0.0, 0.0, 0.01, 0.01, 0.02, 0.0, 0.0]'], {}), '([0.0, 0.01, 0.0, 0.01, 0.0, 0.0, 0.0, 0.01, 0.01, 0.02, 0.0, 0.0])\n', (482, 549), True, 'import numpy as np\n'), ((560, 629), 'numpy.array', 'np.array', (['[0.0, 0.02, 0.02, 0.0, 0.0, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.02, 0.02, 0.0, 0.0, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0])\n', (568, 629), True, 'import numpy as np\n'), ((641, 674), 'dtaidistance.dtw.distance', 'dtw.distance', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (653, 674), False, 'from dtaidistance import dtw, dtw_c\n'), ((684, 725), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (704, 725), False, 'from dtaidistance import dtw, dtw_c\n'), ((837, 912), 'numpy.array', 'np.array', (['[0.0, 0.01, 0.0, 0.01, 0.0, 0.0, 0.0, 0.01, 0.01, 0.02, 0.0, 0.0]'], {}), '([0.0, 0.01, 0.0, 0.01, 0.0, 0.0, 0.0, 0.01, 0.01, 0.02, 0.0, 0.0])\n', (845, 912), True, 'import numpy as np\n'), ((923, 992), 'numpy.array', 'np.array', (['[0.0, 0.02, 0.02, 0.0, 0.0, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.02, 0.02, 0.0, 0.0, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0])\n', (931, 992), True, 'import numpy as np\n'), ((1004, 1037), 'dtaidistance.dtw.distance', 'dtw.distance', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (1016, 1037), False, 'from dtaidistance import dtw, dtw_c\n'), ((1047, 1088), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (1067, 1088), False, 'from dtaidistance import dtw, dtw_c\n'), ((1215, 1260), 'numpy.array', 'np.array', (['[0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])\n', (1223, 1260), True, 'import numpy as np\n'), ((1270, 1315), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])\n', (1278, 1315), True, 'import numpy as np\n'), ((1325, 1358), 'dtaidistance.dtw.distance', 'dtw.distance', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (1337, 1358), False, 'from dtaidistance import dtw, dtw_c\n'), ((1368, 1409), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (1388, 1409), False, 'from dtaidistance import dtw, dtw_c\n'), ((1536, 1581), 'numpy.array', 'np.array', (['[0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])\n', (1544, 1581), True, 'import numpy as np\n'), ((1591, 1636), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])\n', (1599, 1636), True, 'import numpy as np\n'), ((1646, 1679), 'dtaidistance.dtw.distance', 'dtw.distance', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (1658, 1679), False, 'from dtaidistance import dtw, dtw_c\n'), ((1689, 1730), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (1709, 1730), False, 'from dtaidistance import dtw, dtw_c\n'), ((1877, 1922), 'numpy.array', 'np.array', (['[0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])\n', (1885, 1922), True, 'import numpy as np\n'), ((1932, 1977), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])\n', (1940, 1977), True, 'import numpy as np\n'), ((1987, 2020), 'dtaidistance.dtw.distance', 'dtw.distance', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (1999, 2020), False, 'from dtaidistance import dtw, dtw_c\n'), ((2030, 2071), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (2050, 2071), False, 'from dtaidistance import dtw, dtw_c\n'), ((2198, 2243), 'numpy.array', 'np.array', (['[0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])\n', (2206, 2243), True, 'import numpy as np\n'), ((2253, 2298), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])\n', (2261, 2298), True, 'import numpy as np\n'), ((2308, 2341), 'dtaidistance.dtw.distance', 'dtw.distance', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (2320, 2341), False, 'from dtaidistance import dtw, dtw_c\n'), ((2351, 2392), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (2371, 2392), False, 'from dtaidistance import dtw, dtw_c\n'), ((2524, 2569), 'numpy.array', 'np.array', (['[0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0])\n', (2532, 2569), True, 'import numpy as np\n'), ((2579, 2624), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])\n', (2587, 2624), True, 'import numpy as np\n'), ((2634, 2667), 'dtaidistance.dtw.distance', 'dtw.distance', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (2646, 2667), False, 'from dtaidistance import dtw, dtw_c\n'), ((2677, 2718), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s1', 's2'], {}), '(s1, s2, **dist_opts)\n', (2697, 2718), False, 'from dtaidistance import dtw, dtw_c\n'), ((2876, 3147), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.005, \n 0.01, 0.015, 0.02, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.005, 0.01, 0.015, 0.02, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (2884, 3147), True, 'import numpy as np\n'), ((3099, 3370), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.005, 0.01, \n 0.015, 0.02, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.005,\n 0.01, 0.015, 0.02, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (3107, 3370), True, 'import numpy as np\n'), ((3323, 3354), 'dtaidistance.dtw.distance', 'dtw.distance', (['s', 'p'], {}), '(s, p, **dist_opts)\n', (3335, 3354), False, 'from dtaidistance import dtw, dtw_c\n'), ((3364, 3403), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['s', 'p'], {}), '(s, p, **dist_opts)\n', (3384, 3403), False, 'from dtaidistance import dtw, dtw_c\n'), ((3836, 3911), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (3844, 3911), True, 'import numpy as np\n'), ((3908, 3928), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 's'}), '(data=s)\n', (3920, 3928), True, 'import pandas as pd\n'), ((6670, 6724), 'numpy.array', 'np.array', (['[0, 0, 1, 2, 1, 0, 1, 0, 0]'], {'dtype': 'np.double'}), '([0, 0, 1, 2, 1, 0, 1, 0, 0], dtype=np.double)\n', (6678, 6724), True, 'import numpy as np\n'), ((6734, 6773), 'numpy.array', 'np.array', (['[0.0, 1, 2, 0, 0, 0, 0, 0, 0]'], {}), '([0.0, 1, 2, 0, 0, 0, 0, 0, 0])\n', (6742, 6773), True, 'import numpy as np\n'), ((6782, 6817), 'dtaidistance.dtw.distance_fast', 'dtw.distance_fast', (['s1', 's2'], {'window': '(2)'}), '(s1, s2, window=2)\n', (6799, 6817), False, 'from dtaidistance import dtw, dtw_c\n'), ((7102, 7134), 'dtaidistance.dtw.distance_matrix_fast', 'dtw.distance_matrix_fast', (['series'], {}), '(series)\n', (7126, 7134), False, 'from dtaidistance import dtw, dtw_c\n'), ((7429, 7477), 'dtaidistance.dtw.distance_matrix_fast', 'dtw.distance_matrix_fast', (['series'], {'parallel': '(False)'}), '(series, parallel=False)\n', (7453, 7477), False, 'from dtaidistance import dtw, dtw_c\n'), ((7567, 7600), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (7588, 7600), False, 'import logging\n'), ((763, 782), 'pytest.approx', 'pytest.approx', (['(0.02)'], {}), '(0.02)\n', (776, 782), False, 'import pytest\n'), ((1126, 1145), 'pytest.approx', 'pytest.approx', (['(0.02)'], {}), '(0.02)\n', (1139, 1145), False, 'import pytest\n'), ((1447, 1465), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (1460, 1465), False, 'import pytest\n'), ((1786, 1807), 'pytest.approx', 'pytest.approx', (['np.inf'], {}), '(np.inf)\n', (1799, 1807), False, 'import pytest\n'), ((2109, 2127), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (2122, 2127), False, 'import pytest\n'), ((2448, 2469), 'pytest.approx', 'pytest.approx', (['np.inf'], {}), '(np.inf)\n', (2461, 2469), False, 'import pytest\n'), ((2756, 2774), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (2769, 2774), False, 'import pytest\n'), ((3421, 3438), 'pytest.approx', 'pytest.approx', (['d2'], {}), '(d2)\n', (3434, 3438), False, 'import pytest\n'), ((4054, 4081), 'dtaidistance.dtw_c.distance_nogil', 'dtw_c.distance_nogil', (['ss', 'p'], {}), '(ss, p)\n', (4074, 4081), False, 'from dtaidistance import dtw, dtw_c\n'), ((6924, 6978), 'numpy.array', 'np.array', (['[0, 0, 1, 2, 1, 0, 1, 0, 0]'], {'dtype': 'np.double'}), '([0, 0, 1, 2, 1, 0, 1, 0, 0], dtype=np.double)\n', (6932, 6978), True, 'import numpy as np\n'), ((6994, 7039), 'numpy.array', 'np.array', (['[0.0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0.0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0])\n', (7002, 7039), True, 'import numpy as np\n'), ((7055, 7091), 'numpy.array', 'np.array', (['[0.0, 0, 1, 2, 1, 0, 0, 0]'], {}), '([0.0, 0, 1, 2, 1, 0, 0, 0])\n', (7063, 7091), True, 'import numpy as np\n'), ((7251, 7305), 'numpy.array', 'np.array', (['[0, 0, 1, 2, 1, 0, 1, 0, 0]'], {'dtype': 'np.double'}), '([0, 0, 1, 2, 1, 0, 1, 0, 0], dtype=np.double)\n', (7259, 7305), True, 'import numpy as np\n'), ((7321, 7366), 'numpy.array', 'np.array', (['[0.0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0.0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0])\n', (7329, 7366), True, 'import numpy as np\n'), ((7382, 7418), 'numpy.array', 'np.array', (['[0.0, 0, 1, 2, 1, 0, 0, 0]'], {}), '([0.0, 0, 1, 2, 1, 0, 0, 0])\n', (7390, 7418), True, 'import numpy as np\n'), ((93, 119), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (109, 119), False, 'import sys, os\n')] |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
"""
Implements the Scene Parser framework
"""
from matplotlib import projections
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.modeling.detector.generalized_rcnn import GeneralizedRCNN
from .relation_head.relation_head import build_roi_relation_head
from maskrcnn_benchmark.modeling.backbone import build_backbone
from .relation_head.roi_relation_box_feature_extractors import make_roi_relation_box_feature_extractor
from .attribute_head.attribute_head import build_roi_attribute_head
from torchvision.utils import save_image
from cross_vit import Attention, CrossTransformerV2, CrossTransformer
import wandb
from torchinfo import summary
from torch.utils.tensorboard import SummaryWriter
class SceneParserOutputs(object):
"""
Structure that holds SceneParser output object predicitions and relation predictions,
and provide .to function to be able to move all nececssary tensors
between gpu and cpu. (Inspired from SCANEmbedding)
"""
def __init__(self, predictions, prediction_pairs=None):
self.predictions = predictions
self.prediction_pairs = prediction_pairs
def to(self, *args, **kwargs):
cast_predictions = self.predictions.to(*args, *kwargs)
if self.prediction_pairs is not None:
cast_prediction_pairs = self.prediction_pairs.to(*args, *kwargs)
else:
cast_prediction_pairs = None
return SceneParserOutputs(cast_predictions, cast_prediction_pairs)
SCENE_PAESER_DICT = ["sg_baseline", "sg_imp",
"sg_msdn", "sg_grcnn", "sg_reldn", "sg_neuralmotif"]
class SceneParser(GeneralizedRCNN):
"""
Main class for Generalized Relation R-CNN.
It consists of three main parts:
- backbone
- rpn
- object detection (roi_heads)
- Scene graph parser model: IMP, MSDN, MOTIF, graph-rcnn, ect
"""
def __init__(self, cfg):
super(SceneParser, self).__init__(cfg)
self.cfg = cfg
self.device = cfg.MODEL.DEVICE
self.detector_pre_calculated = self.cfg.MODEL.ROI_RELATION_HEAD.DETECTOR_PRE_CALCULATED
self.detector_force_boxes = self.cfg.MODEL.ROI_BOX_HEAD.FORCE_BOXES
self.cfg_check()
self.attend = False
feature_dim = self.backbone.out_channels
if not self.cfg.MODEL.ROI_RELATION_HEAD.SHARE_CONV_BACKBONE:
self.rel_backbone = build_backbone(cfg)
feature_dim = self.rel_backbone.out_channels
# TODO: add force_relations logic
self.force_relations = cfg.MODEL.ROI_RELATION_HEAD.FORCE_RELATIONS
if cfg.MODEL.RELATION_ON and self.cfg.MODEL.ROI_RELATION_HEAD.ALGORITHM in SCENE_PAESER_DICT:
self.relation_head = build_roi_relation_head(cfg, feature_dim)
if cfg.MODEL.ATTRIBUTE_ON:
self.attribute_head = build_roi_attribute_head(cfg, feature_dim)
# self._freeze_components(self.cfg)
for p in self.backbone.parameters():
p.requires_grad = False
for p in self.rpn.parameters():
p.requires_grad = False
for p in self.roi_heads.parameters():
p.requires_grad = False
if not self.cfg.MODEL.ROI_RELATION_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
if self.cfg.MODEL.ROI_RELATION_HEAD.SEPERATE_SO_FEATURE_EXTRACTOR:
self.subj_feature_extractor = make_roi_relation_box_feature_extractor(
cfg, feature_dim)
self.obj_feature_extractor = make_roi_relation_box_feature_extractor(
cfg, feature_dim)
else:
self.obj_feature_extractor = make_roi_relation_box_feature_extractor(
cfg, feature_dim)
if self.attend:
w, h = 20, 20
sm_dim = 1024
lg_dim = w * h
cross_attn_depth = 4 # 2
cross_attn_heads = 6 # 4
cross_attn_dim_head = 64
dropout = 0.1
self.cross_attention = CrossTransformerV2(sm_dim=sm_dim, lg_dim=lg_dim, depth=cross_attn_depth,
heads=cross_attn_heads, dim_head=cross_attn_dim_head, dropout=dropout)
self.norm = torch.nn.BatchNorm2d(256)
self.norm2 = torch.nn.BatchNorm2d(1)
# self.attend = Attention(
# dim=sm_dim, heads=cross_attn_heads, dim_head=64, dropout=dropout)
# wandb.watch(self.relation_head)
# self.writer = SummaryWriter('runs/model_check4')
def cfg_check(self):
if self.cfg.MODEL.ROI_RELATION_HEAD.MODE == 'predcls':
assert self.cfg.MODEL.ROI_RELATION_HEAD.DETECTOR_PRE_CALCULATED == False and self.cfg.MODEL.ROI_RELATION_HEAD.FORCE_RELATIONS == False
if self.cfg.MODEL.ROI_RELATION_HEAD.MODE == 'sgcls':
assert self.cfg.MODEL.ROI_BOX_HEAD.FORCE_BOXES == True and self.cfg.MODEL.ROI_RELATION_HEAD.DETECTOR_PRE_CALCULATED == False
def to(self, device, **kwargs):
super(SceneParser, self).to(device, **kwargs)
if self.cfg.MODEL.RELATION_ON:
self.relation_head.to(device, **kwargs)
if self.cfg.MODEL.ATTRIBUTE_ON:
self.attribute_head.to(device, **kwargs)
# if self.detector_pre_calculated:
# self.backbone.to('cpu')
# self.rpn.to('cpu')
# self.roi_heads.to('cpu')
def _post_processing_constrained(self, result_obj, result_pred):
"""
Arguments:
object_predictions, predicate_predictions
Returns:
sort the object-predicate triplets, and output the top
"""
result_obj_new, result_pred_new = [], []
assert len(result_obj) == len(
result_pred), "object list must have equal number to predicate list"
for result_obj_i, result_pred_i in zip(result_obj, result_pred):
obj_scores = result_obj_i.get_field("scores")
rel_inds = result_pred_i.get_field("idx_pairs")
pred_scores = result_pred_i.get_field("scores")
scores = torch.stack((
obj_scores[rel_inds[:, 0]],
obj_scores[rel_inds[:, 1]],
pred_scores[:, 1:].max(1)[0]
), 1).prod(1)
scores_sorted, order = scores.sort(0, descending=True)
result_pred_i = result_pred_i[order[:self.cfg.MODEL.ROI_RELATION_HEAD.TRIPLETS_PER_IMG]]
result_obj_new.append(result_obj_i)
result_pred_i.add_field('labels', result_pred_i.get_field(
"scores")[:, 1:].argmax(dim=1)) # not include background
result_pred_i.add_field(
'scores_all', result_pred_i.get_field('scores'))
result_pred_i.add_field(
'scores', scores[order[:self.cfg.MODEL.ROI_RELATION_HEAD.TRIPLETS_PER_IMG]])
# filter out bad prediction
inds = result_pred_i.get_field(
'scores') > self.cfg.MODEL.ROI_RELATION_HEAD.POSTPROCESS_SCORE_THRESH
result_pred_i = result_pred_i[inds]
result_pred_new.append(result_pred_i)
return result_obj_new, result_pred_new
def _post_processing_unconstrained(self, result_obj, result_pred):
"""
Arguments:
object_predictions, predicate_predictions
Returns:
sort the object-predicate triplets, and output the top
"""
result_obj_new, result_pred_new = [], []
assert len(result_obj) == len(
result_pred), "object list must have equal number to predicate list"
for result_obj_i, result_pred_i in zip(result_obj, result_pred):
obj_scores = result_obj_i.get_field("scores").cpu().numpy()
rel_inds = result_pred_i.get_field("idx_pairs").cpu().numpy()
pred_scores = result_pred_i.get_field("scores").cpu().numpy()[:, 1:]
det_labels_prd = np.argsort(-pred_scores, axis=1)
det_scores_prd = -np.sort(-pred_scores, axis=1)
det_scores_so = obj_scores[rel_inds[:, 0]
] * obj_scores[rel_inds[:, 1]]
det_scores_spo = det_scores_so[:, None] * det_scores_prd[:, :2]
det_scores_inds = argsort_desc(det_scores_spo)[
:self.cfg.MODEL.ROI_RELATION_HEAD.TRIPLETS_PER_IMG]
result_labels = det_labels_prd[det_scores_inds[:,
0], det_scores_inds[:, 1]]
result_pred_i = result_pred_i[det_scores_inds[:, 0]]
result_pred_i.add_field('labels', torch.from_numpy(result_labels))
result_pred_i.add_field(
'scores_all', result_pred_i.get_field('scores'))
result_pred_i.add_field('scores', torch.from_numpy(
det_scores_spo[det_scores_inds[:, 0], det_scores_inds[:, 1]]))
# filter out bad prediction
inds = result_pred_i.get_field(
'scores') > self.cfg.MODEL.ROI_RELATION_HEAD.POSTPROCESS_SCORE_THRESH
result_pred_i = result_pred_i[inds]
result_obj_new.append(result_obj_i)
result_pred_new.append(result_pred_i)
return result_obj_new, result_pred_new
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
We can assume that gt_boxlist contains two other fields:
"relation_labels": list of [subj_id, obj_id, predicate_category]
"pred_labels": n*n matrix with predicate_category (including BG) as values.
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
if self.force_relations and targets is None:
# note targets cannot be None but could have 0 box.
raise ValueError(
"In force_relations setting, targets should be passed")
# set the object detector to evaluation mode and run the object detection model
self.backbone.eval()
self.rpn.eval()
self.roi_heads.eval()
images = to_image_list(images)
if targets:
if self.detector_pre_calculated:
predictions = [prediction.to(self.device) for (
target, prediction) in targets if prediction is not None]
targets = [target.to(self.device) for (
target, prediction) in targets if target is not None]
else:
targets = [target.to(self.device)
for target in targets if target is not None]
scene_parser_losses = {}
if not self.detector_pre_calculated:
features = self.backbone(images.tensors)
proposals, proposal_losses = self.rpn(images, features, targets)
if self.detector_force_boxes:
proposals = [BoxList(target.bbox, target.size, target.mode)
for target in targets]
x, predictions, detector_losses = self.roi_heads(
features, proposals, targets)
else:
x, predictions, detector_losses = self.roi_heads(
features, proposals, targets)
scene_parser_losses.update(detector_losses)
else:
proposal_losses = {}
if targets is not None or len(targets) != 0:
predictions = self.roi_heads['box'].loss_evaluator.prepare_labels(
predictions, targets)
if (self.force_relations or self.cfg.MODEL.ROI_RELATION_HEAD.MODE == 'predcls') and not self.training:
predictions = targets
for pred in predictions:
pred.add_field('scores', torch.tensor(
[1.0] * len(pred)).to(self.device))
if self.cfg.TEST.OUTPUT_FEATURE:
gt_labels = pred.get_field('labels')
gt_pseudo_scores_all = torch.zeros(
len(pred), self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES).to(gt_labels.device)
gt_pseudo_scores_all.scatter_(
1, gt_labels.unsqueeze(0).view(-1, 1), 1)
pred.add_field('scores_all', gt_pseudo_scores_all)
gt_boxes = pred.bbox
gt_pseudo_boxes_all = gt_boxes.unsqueeze(1).repeat(
1, self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES, 1)
pred.add_field('boxes_all', gt_pseudo_boxes_all)
if self.cfg.TEST.OUTPUT_FEATURE:
gt_features = self.roi_heads.box.feature_extractor(
features, predictions)
if gt_features.ndimension() == 4:
gt_features = torch.nn.functional.adaptive_avg_pool2d(
gt_features, 1)
gt_features = gt_features.view(gt_features.size(0), -1)
gt_boxes_per_image = [len(box) for box in predictions]
assert sum(gt_boxes_per_image) == len(
gt_features), "gt_boxes_per_image and len(gt_features) do not match!"
gt_features = gt_features.split(gt_boxes_per_image, dim=0)
for pred, gt_feature in zip(predictions, gt_features):
pred.add_field('box_features', gt_feature)
if not self.cfg.MODEL.ROI_RELATION_HEAD.SHARE_CONV_BACKBONE:
features = self.rel_backbone(images.tensors) # decoupled
else:
features = [feature.detach() for feature in features] # coupled
# relation classification network
# optimization: during training, if we share the feature extractor between
# the box and the relation heads, then we can reuse the features already computed
if not self.cfg.MODEL.ROI_RELATION_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
obj_features = self.obj_feature_extractor(
features, predictions, use_relu=False)
if obj_features.ndimension() == 4:
obj_features = torch.nn.functional.adaptive_avg_pool2d(
obj_features, 1)
obj_features = obj_features.view(obj_features.size(0), -1)
boxes_per_image = [len(box) for box in predictions]
obj_features = obj_features.split(boxes_per_image, dim=0)
for prediction, obj_feature in zip(predictions, obj_features):
prediction.add_field('box_features', obj_feature)
if self.cfg.MODEL.ROI_RELATION_HEAD.SEPERATE_SO_FEATURE_EXTRACTOR:
subj_features = self.subj_feature_extractor(
features, predictions, use_relu=False)
if subj_features.ndimension() == 4:
subj_features = torch.nn.functional.adaptive_avg_pool2d(
subj_features, 1)
subj_features = subj_features.view(
subj_features.size(0), -1)
boxes_per_image = [len(box) for box in predictions]
subj_features = subj_features.split(boxes_per_image, dim=0)
for prediction, subj_feature, obj_feature in zip(predictions, subj_features, obj_features):
prediction.add_field('subj_box_features', subj_feature)
prediction.add_field('obj_box_features', obj_feature)
if self.training:
if not self.cfg.MODEL.ROI_RELATION_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
gt_features = self.obj_feature_extractor(
features, targets, use_relu=False)
else:
gt_features = self.roi_heads.box.feature_extractor(
features, targets)
if gt_features.ndimension() == 4:
gt_features = torch.nn.functional.adaptive_avg_pool2d(
gt_features, 1)
gt_features = gt_features.view(gt_features.size(0), -1)
gt_boxes_per_image = [len(box) for box in targets]
assert sum(gt_boxes_per_image) == len(
gt_features), "gt_boxes_per_image and len(gt_features) do not match!"
gt_features = gt_features.split(gt_boxes_per_image, dim=0)
for target, gt_feature in zip(targets, gt_features):
target.add_field('box_features', gt_feature)
target.add_field('gt_labels', target.get_field('labels'))
# if self.cfg.TEST.OUTPUT_SCORES_ALL:
# gt_labels = target.get_field('labels')
# gt_pseudo_scores_all = torch.zeros(len(target), self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES).to(gt_labels.device)
# gt_pseudo_scores_all.scatter_(1, gt_labels.unsqueeze(0).view(-1, 1), 1)
# target.add_field('scores_all', gt_pseudo_scores_all)
# gt_boxes = target.bbox
# gt_pseudo_boxes_all = gt_boxes.unsqueeze(1).repeat(1, self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES, 1)
# target.add_field('boxes_all', gt_pseudo_boxes_all)
if self.cfg.MODEL.ROI_RELATION_HEAD.SEPERATE_SO_FEATURE_EXTRACTOR:
gt_subj_features = self.subj_feature_extractor(
features, targets, use_relu=False)
if gt_subj_features.ndimension() == 4:
gt_subj_features = torch.nn.functional.adaptive_avg_pool2d(
gt_subj_features, 1)
gt_subj_features = gt_subj_features.view(
gt_subj_features.size(0), -1)
gt_boxes_per_image = [len(box) for box in targets]
gt_subj_features = gt_subj_features.split(
gt_boxes_per_image, dim=0)
for target, gt_subj_feature, gt_feature in zip(targets, gt_subj_features, gt_features):
target.add_field('subj_box_features', gt_subj_feature)
target.add_field('obj_box_features', gt_feature)
# if not self.cfg.MODEL.ROI_RELATION_HEAD.SHARE_CONV_BACKBONE:
# features = self.rel_backbone(images.tensors)
# else:
# features = [feature.detach() for feature in features]
# The predictions_pred contains idx_pairs (M*2) and scores (M*Pred_Cat); see Jianwei's code
# TODO: add force_relations logic
# pdb.set_trace()
if self.attend:
boxes = []
for i, prediction in enumerate(predictions):
# break
box_features = prediction.get_field('box_features')
size = box_features.shape
# print(size)
if size[0] != 100:
d = (100 // size[0])
r = 100 - size[0] * d
if size[0] < (100 - size[0]):
box_features = torch.cat(
[box_features for _ in range(d)])
if r > 0:
box_features = torch.cat(
[box_features, box_features[:r]])
else:
box_features = torch.cat(
[box_features, box_features[:100 - size[0]]])
boxes.append(box_features)
box_features = torch.stack(boxes, dim=0)
# self.writer.add_graph(
# self.attend, (box_features[:, :1, :], box_features[:, 1:, :]))
# import sys
# sys.exit(0)
b, c, w, h = features[4].shape
attn_box_features, attn_features = self.cross_attention(
box_features.clone().detach(), features[4].view(b, c, w * h).clone().detach())
features[4][:] = self.norm(
features[4][:] + attn_features.view(b, c, w, h)[:])
x_obj_features = self.norm2(
(box_features + attn_box_features).unsqueeze(1)).squeeze(1)
for i, p in enumerate(predictions):
s = p.get_field('box_features').size(0)
p.add_field('box_features', x_obj_features[i][:s])
# torch.cuda.empty_cache()
# prediction_pairs --> contains the bounding boxes
x_pairs, prediction_pairs, relation_losses = self.relation_head(
features, predictions, targets)
# pdb.set_trace()
scene_parser_losses.update(relation_losses)
# attribute head
if self.cfg.MODEL.ATTRIBUTE_ON:
x_attr, predictions, attribute_losses = self.attribute_head(
features, predictions, targets)
if self.training:
losses = {}
losses.update(scene_parser_losses)
losses.update(proposal_losses)
if self.cfg.MODEL.ATTRIBUTE_ON:
losses.update(attribute_losses)
return losses
# NOTE: if object scores are updated in rel_heads, we need to ensure detections are updated accordingly
if self.cfg.MODEL.ROI_RELATION_HEAD.POSTPROCESS_METHOD == 'constrained':
predictions, prediction_pairs = self._post_processing_constrained(
predictions, prediction_pairs)
else:
predictions, prediction_pairs = self._post_processing_unconstrained(
predictions, prediction_pairs)
return [SceneParserOutputs(prediction, prediction_pair)
for prediction, prediction_pair in zip(predictions, prediction_pairs)]
def argsort_desc(scores):
"""
Returns the indices that sort scores descending in a smart way
:param scores: Numpy array of arbitrary size
:return: an array of size [numel(scores), dim(scores)] where each row is the index you'd
need to get the score.
"""
return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape))
| [
"torch.nn.BatchNorm2d",
"maskrcnn_benchmark.modeling.backbone.build_backbone",
"torch.nn.functional.adaptive_avg_pool2d",
"numpy.sort",
"torch.stack",
"torch.from_numpy",
"numpy.argsort",
"maskrcnn_benchmark.structures.bounding_box.BoxList",
"maskrcnn_benchmark.structures.image_list.to_image_list",
... | [((10769, 10790), 'maskrcnn_benchmark.structures.image_list.to_image_list', 'to_image_list', (['images'], {}), '(images)\n', (10782, 10790), False, 'from maskrcnn_benchmark.structures.image_list import to_image_list\n'), ((2577, 2596), 'maskrcnn_benchmark.modeling.backbone.build_backbone', 'build_backbone', (['cfg'], {}), '(cfg)\n', (2591, 2596), False, 'from maskrcnn_benchmark.modeling.backbone import build_backbone\n'), ((4173, 4320), 'cross_vit.CrossTransformerV2', 'CrossTransformerV2', ([], {'sm_dim': 'sm_dim', 'lg_dim': 'lg_dim', 'depth': 'cross_attn_depth', 'heads': 'cross_attn_heads', 'dim_head': 'cross_attn_dim_head', 'dropout': 'dropout'}), '(sm_dim=sm_dim, lg_dim=lg_dim, depth=cross_attn_depth,\n heads=cross_attn_heads, dim_head=cross_attn_dim_head, dropout=dropout)\n', (4191, 4320), False, 'from cross_vit import Attention, CrossTransformerV2, CrossTransformer\n'), ((4396, 4421), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (4416, 4421), False, 'import torch\n'), ((4447, 4470), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(1)'], {}), '(1)\n', (4467, 4470), False, 'import torch\n'), ((8080, 8112), 'numpy.argsort', 'np.argsort', (['(-pred_scores)'], {'axis': '(1)'}), '(-pred_scores, axis=1)\n', (8090, 8112), True, 'import numpy as np\n'), ((19942, 19967), 'torch.stack', 'torch.stack', (['boxes'], {'dim': '(0)'}), '(boxes, dim=0)\n', (19953, 19967), False, 'import torch\n'), ((8143, 8172), 'numpy.sort', 'np.sort', (['(-pred_scores)'], {'axis': '(1)'}), '(-pred_scores, axis=1)\n', (8150, 8172), True, 'import numpy as np\n'), ((8764, 8795), 'torch.from_numpy', 'torch.from_numpy', (['result_labels'], {}), '(result_labels)\n', (8780, 8795), False, 'import torch\n'), ((8945, 9023), 'torch.from_numpy', 'torch.from_numpy', (['det_scores_spo[det_scores_inds[:, 0], det_scores_inds[:, 1]]'], {}), '(det_scores_spo[det_scores_inds[:, 0], det_scores_inds[:, 1]])\n', (8961, 9023), False, 'import torch\n'), ((14714, 14770), 'torch.nn.functional.adaptive_avg_pool2d', 'torch.nn.functional.adaptive_avg_pool2d', (['obj_features', '(1)'], {}), '(obj_features, 1)\n', (14753, 14770), False, 'import torch\n'), ((16444, 16499), 'torch.nn.functional.adaptive_avg_pool2d', 'torch.nn.functional.adaptive_avg_pool2d', (['gt_features', '(1)'], {}), '(gt_features, 1)\n', (16483, 16499), False, 'import torch\n'), ((11552, 11598), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['target.bbox', 'target.size', 'target.mode'], {}), '(target.bbox, target.size, target.mode)\n', (11559, 11598), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((13420, 13475), 'torch.nn.functional.adaptive_avg_pool2d', 'torch.nn.functional.adaptive_avg_pool2d', (['gt_features', '(1)'], {}), '(gt_features, 1)\n', (13459, 13475), False, 'import torch\n'), ((15430, 15487), 'torch.nn.functional.adaptive_avg_pool2d', 'torch.nn.functional.adaptive_avg_pool2d', (['subj_features', '(1)'], {}), '(subj_features, 1)\n', (15469, 15487), False, 'import torch\n'), ((18011, 18071), 'torch.nn.functional.adaptive_avg_pool2d', 'torch.nn.functional.adaptive_avg_pool2d', (['gt_subj_features', '(1)'], {}), '(gt_subj_features, 1)\n', (18050, 18071), False, 'import torch\n'), ((19786, 19841), 'torch.cat', 'torch.cat', (['[box_features, box_features[:100 - size[0]]]'], {}), '([box_features, box_features[:100 - size[0]]])\n', (19795, 19841), False, 'import torch\n'), ((19644, 19687), 'torch.cat', 'torch.cat', (['[box_features, box_features[:r]]'], {}), '([box_features, box_features[:r]])\n', (19653, 19687), False, 'import torch\n')] |
import numpy as np
def dbMoriWen(z, us, umf, d_bed, l_or, dist_type):
"""
Calculates the equivalent diameter of the gas bubble in the bed, assuming
that all of the volume in bubbles in the bed were combined into a single
spherical bubble. This uses the Mori/Wen correlation as given in
Fluidization Engineering by Kunii & Levenspiel (K/L), eqs. 5.15, 5.19, and
6.5.
Parameters
----------
z : float
Height of the bubble along the vertical axis of the bed [m]
us : float
Superficial velocity of the gas [m/s]
umf : float
Minimum fluidization velocity [m/s]
d_bed : float
Bed diameter [m]
l_or : float
Orifice spacing [m] (only if perforated plate distributor used)
dist_type : string
Type of distributor plate
Options are
'perf_sq' = perforated, square arrangement of orifices
'perf_tri' = perforated, triangular arrangement of orifices
'porous' = porous (equivalent to perforated triangular arrangement of
tiny orifices)
Returns
-------
db : float
Equivalent bubble diameter at the specified z position [m]
"""
# Constants
g_cgs = 981.0 # gravity, cm/s^2
# Convert all mks units to cgs units for use with the correlation, which
# appears in K/L in cgs units.
z_cgs = z * 100.0
us_cgs = us * 100.0
umf_cgs = float(umf) * 100.0
d_bed_cgs = d_bed * 100.0
l_or_cgs = l_or * 100.0
# Maximum bubble diameter, cm
db_max = 0.65 * (np.pi / 4 * d_bed_cgs**2 * (us_cgs - umf_cgs))**0.4
# Minimum bubble diameter, cm for high flow rate/large bubble sizes at
# distributor plate. Also works for porous distributors.
db_min_high = 2.78 / g_cgs * (us_cgs - umf_cgs)**2
if dist_type == 'perf_sq':
# Minimum bubble diameter, cm for low flow rate/small bubble sizes at
# distributor plate
db_min_low = 1.3 / (g_cgs**0.2) * \
((us_cgs - umf_cgs) * l_or_cgs**2)**0.4
# Set the minimum bubble diameter based on the orifice spacing
if db_min_low <= l_or_cgs:
db_min = db_min_low
else:
db_min = db_min_high
elif dist_type == 'perf_tri':
# Minimum bubble diameter, cm for low flow rate/small bubble sizes at
# distributor plate
db_min_low = 1.3 / (g_cgs**0.2) * ((us_cgs - umf_cgs) *
l_or_cgs**2 * np.sqrt(3)/2)**0.4
# Set the minimum bubble diameter based on the orifice spacing
if db_min_low <= l_or_cgs:
db_min = db_min_low
else:
db_min = db_min_high
elif dist_type == 'porous':
# Just use the high flow rate equation at the distributor
db_min = db_min_high
else:
raise NotImplementedError(f"Unknown distributor type {dist_type}" +
" in Mori/Wen bubble diameter calculation.")
# Equivalent bubble diameter, cm
db = db_max - (db_max - db_min) * np.exp(-0.3 * z_cgs / d_bed_cgs)
# Constrain to 80% of the diameter of the column
db = min(0.8*d_bed*100.0, db)
# Return the bubble diameter, m
return db / 100.0
| [
"numpy.exp",
"numpy.sqrt"
] | [((3056, 3088), 'numpy.exp', 'np.exp', (['(-0.3 * z_cgs / d_bed_cgs)'], {}), '(-0.3 * z_cgs / d_bed_cgs)\n', (3062, 3088), True, 'import numpy as np\n'), ((2480, 2490), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2487, 2490), True, 'import numpy as np\n')] |
import subprocess
import os
import numpy as np
import pymesh
'''
Partitions a mesh into a number of patches defined by @partitions using METIS
'''
def mesh_partitioning(filename, mesh, partitions):
if partitions < 2:
return [mesh], [range(mesh.num_vertices)]
# Get only the name of the file (without its extension)
filename, _ = os.path.splitext(filename)
# Convert the mesh to a file of format "mesh" in order to apply METIS on it
file = open(filename + '.mesh', 'w')
file.write(str(len(mesh.faces)) + " " + str(1) + "\n")
for f in mesh.faces:
file.write(str(f[0]+1) + " " + str(f[1]+1) + " " + str(f[2]+1) + "\n")
file.close()
# Apply METIS on the file
with open(os.devnull, "w") as f:
subprocess.check_call(['mpmetis', filename + '.mesh', str(partitions)], stdout=f)
# Read the file produced by METIS
file = open(filename + '.mesh.epart.'+ str(partitions))
partitions_list = file.readlines()
patches = []
mapping = []
for i in range(partitions):
faces = []
vertices = []
# Initialize all the mappings at -1
indexes_mapping = np.zeros(mesh.num_vertices) - 1
for j, partition in enumerate(partitions_list):
# Find all the faces that should be added to this specific patch
if int(partition) == i:
# Check if the vertices corresponding to this face have already been added to this parttion
i1 = indexes_mapping[mesh.faces[j][0]]
i2 = indexes_mapping[mesh.faces[j][1]]
i3 = indexes_mapping[mesh.faces[j][2]]
# If the vertices have not been added to this partition, add them and update the mapping
if i1 == -1 :
vertices.append(mesh.vertices[mesh.faces[j][0]])
i1 = len(vertices) - 1
indexes_mapping[mesh.faces[j][0]] = i1
if i2 == -1 :
vertices.append(mesh.vertices[mesh.faces[j][1]])
i2 = len(vertices) - 1
indexes_mapping[mesh.faces[j][1]] = i2
if i3 == -1 :
vertices.append(mesh.vertices[mesh.faces[j][2]])
i3 = len(vertices) - 1
indexes_mapping[mesh.faces[j][2]] = i3
# Add the faces to the list of faces for this partition with the correct indexes
faces.append([i1, i2, i3])
# Save the patch and the mapping
mapping.append(indexes_mapping)
patches.append(pymesh.form_mesh(np.array(vertices), np.array(faces)))
return patches, mapping
| [
"numpy.array",
"numpy.zeros",
"os.path.splitext"
] | [((357, 383), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (373, 383), False, 'import os\n'), ((1162, 1189), 'numpy.zeros', 'np.zeros', (['mesh.num_vertices'], {}), '(mesh.num_vertices)\n', (1170, 1189), True, 'import numpy as np\n'), ((2612, 2630), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (2620, 2630), True, 'import numpy as np\n'), ((2632, 2647), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (2640, 2647), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample Keras actor network that generates distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.networks import categorical_projection_network
from tf_agents.networks import encoding_network
from tf_agents.networks import network
from tf_agents.networks import normal_projection_network
from tf_agents.specs import tensor_spec
from tf_agents.utils import nest_utils
def _categorical_projection_net(action_spec, logits_init_output_factor=0.1):
return categorical_projection_network.CategoricalProjectionNetwork(
action_spec, logits_init_output_factor=logits_init_output_factor)
def _normal_projection_net(action_spec,
init_action_stddev=0.35,
init_means_output_factor=0.1):
std_bias_initializer_value = np.log(np.exp(init_action_stddev) - 1)
return normal_projection_network.NormalProjectionNetwork(
action_spec,
init_means_output_factor=init_means_output_factor,
std_bias_initializer_value=std_bias_initializer_value,
scale_distribution=False)
@gin.configurable
class ActorDistributionNetwork(network.DistributionNetwork):
"""Creates an actor producing either Normal or Categorical distribution.
Note: By default, this network uses `NormalProjectionNetwork` for continuous
projection which by default uses `tanh_squash_to_spec` to normalize its
output. Due to the nature of the `tanh` function, values near the spec bounds
cannot be returned.
"""
def __init__(self,
input_tensor_spec,
output_tensor_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
fc_layer_params=(200, 100),
dropout_layer_params=None,
activation_fn=tf.keras.activations.relu,
kernel_initializer=None,
batch_squash=True,
dtype=tf.float32,
discrete_projection_net=_categorical_projection_net,
continuous_projection_net=_normal_projection_net,
name='ActorDistributionNetwork'):
"""Creates an instance of `ActorDistributionNetwork`.
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
input.
output_tensor_spec: A nest of `tensor_spec.BoundedTensorSpec` representing
the output.
preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`
representing preprocessing for the different observations.
All of these layers must not be already built. For more details see
the documentation of `networks.EncodingNetwork`.
preprocessing_combiner: (Optional.) A keras layer that takes a flat list
of tensors and combines them. Good options include
`tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`.
This layer must not be already built. For more details see
the documentation of `networks.EncodingNetwork`.
conv_layer_params: Optional list of convolution layers parameters, where
each item is a length-three tuple indicating (filters, kernel_size,
stride).
fc_layer_params: Optional list of fully_connected parameters, where each
item is the number of units in the layer.
dropout_layer_params: Optional list of dropout layer parameters, each item
is the fraction of input units to drop or a dictionary of parameters
according to the keras.Dropout documentation. The additional parameter
`permanent', if set to True, allows to apply dropout at inference for
approximated Bayesian inference. The dropout layers are interleaved with
the fully connected layers; there is a dropout layer after each fully
connected layer, except if the entry in the list is None. This list must
have the same length of fc_layer_params, or be None.
activation_fn: Activation function, e.g. tf.nn.relu, slim.leaky_relu, ...
kernel_initializer: Initializer to use for the kernels of the conv and
dense layers. If none is provided a default glorot_uniform
batch_squash: If True the outer_ranks of the observation are squashed into
the batch dimension. This allow encoding networks to be used with
observations with shape [BxTx...].
dtype: The dtype to use by the convolution and fully connected layers.
discrete_projection_net: Callable that generates a discrete projection
network to be called with some hidden state and the outer_rank of the
state.
continuous_projection_net: Callable that generates a continuous projection
network to be called with some hidden state and the outer_rank of the
state.
name: A string representing name of the network.
Raises:
ValueError: If `input_tensor_spec` contains more than one observation.
"""
if not kernel_initializer:
kernel_initializer = tf.compat.v1.keras.initializers.glorot_uniform()
encoder = encoding_network.EncodingNetwork(
input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=fc_layer_params,
dropout_layer_params=dropout_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
batch_squash=batch_squash,
dtype=dtype)
def map_proj(spec):
if tensor_spec.is_discrete(spec):
return discrete_projection_net(spec)
else:
return continuous_projection_net(spec)
projection_networks = tf.nest.map_structure(map_proj, output_tensor_spec)
output_spec = tf.nest.map_structure(lambda proj_net: proj_net.output_spec,
projection_networks)
super(ActorDistributionNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
output_spec=output_spec,
name=name)
self._encoder = encoder
self._projection_networks = projection_networks
self._output_tensor_spec = output_tensor_spec
@property
def output_tensor_spec(self):
return self._output_tensor_spec
def call(self, observations, step_type, network_state, training=False):
state, network_state = self._encoder(
observations,
step_type=step_type,
network_state=network_state,
training=training)
outer_rank = nest_utils.get_outer_rank(observations, self.input_tensor_spec)
output_actions = tf.nest.map_structure(
lambda proj_net: proj_net(state, outer_rank)[0],
self._projection_networks)
return output_actions, network_state
| [
"tf_agents.networks.categorical_projection_network.CategoricalProjectionNetwork",
"tensorflow.compat.v1.keras.initializers.glorot_uniform",
"tf_agents.networks.encoding_network.EncodingNetwork",
"numpy.exp",
"tf_agents.specs.tensor_spec.is_discrete",
"tf_agents.networks.normal_projection_network.NormalPro... | [((1260, 1389), 'tf_agents.networks.categorical_projection_network.CategoricalProjectionNetwork', 'categorical_projection_network.CategoricalProjectionNetwork', (['action_spec'], {'logits_init_output_factor': 'logits_init_output_factor'}), '(action_spec,\n logits_init_output_factor=logits_init_output_factor)\n', (1319, 1389), False, 'from tf_agents.networks import categorical_projection_network\n'), ((1625, 1831), 'tf_agents.networks.normal_projection_network.NormalProjectionNetwork', 'normal_projection_network.NormalProjectionNetwork', (['action_spec'], {'init_means_output_factor': 'init_means_output_factor', 'std_bias_initializer_value': 'std_bias_initializer_value', 'scale_distribution': '(False)'}), '(action_spec,\n init_means_output_factor=init_means_output_factor,\n std_bias_initializer_value=std_bias_initializer_value,\n scale_distribution=False)\n', (1674, 1831), False, 'from tf_agents.networks import normal_projection_network\n'), ((5817, 6201), 'tf_agents.networks.encoding_network.EncodingNetwork', 'encoding_network.EncodingNetwork', (['input_tensor_spec'], {'preprocessing_layers': 'preprocessing_layers', 'preprocessing_combiner': 'preprocessing_combiner', 'conv_layer_params': 'conv_layer_params', 'fc_layer_params': 'fc_layer_params', 'dropout_layer_params': 'dropout_layer_params', 'activation_fn': 'activation_fn', 'kernel_initializer': 'kernel_initializer', 'batch_squash': 'batch_squash', 'dtype': 'dtype'}), '(input_tensor_spec, preprocessing_layers=\n preprocessing_layers, preprocessing_combiner=preprocessing_combiner,\n conv_layer_params=conv_layer_params, fc_layer_params=fc_layer_params,\n dropout_layer_params=dropout_layer_params, activation_fn=activation_fn,\n kernel_initializer=kernel_initializer, batch_squash=batch_squash, dtype\n =dtype)\n', (5849, 6201), False, 'from tf_agents.networks import encoding_network\n'), ((6457, 6508), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['map_proj', 'output_tensor_spec'], {}), '(map_proj, output_tensor_spec)\n', (6478, 6508), True, 'import tensorflow as tf\n'), ((6527, 6612), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda proj_net: proj_net.output_spec)', 'projection_networks'], {}), '(lambda proj_net: proj_net.output_spec,\n projection_networks)\n', (6548, 6612), True, 'import tensorflow as tf\n'), ((7283, 7346), 'tf_agents.utils.nest_utils.get_outer_rank', 'nest_utils.get_outer_rank', (['observations', 'self.input_tensor_spec'], {}), '(observations, self.input_tensor_spec)\n', (7308, 7346), False, 'from tf_agents.utils import nest_utils\n'), ((1583, 1609), 'numpy.exp', 'np.exp', (['init_action_stddev'], {}), '(init_action_stddev)\n', (1589, 1609), True, 'import numpy as np\n'), ((5753, 5801), 'tensorflow.compat.v1.keras.initializers.glorot_uniform', 'tf.compat.v1.keras.initializers.glorot_uniform', ([], {}), '()\n', (5799, 5801), True, 'import tensorflow as tf\n'), ((6295, 6324), 'tf_agents.specs.tensor_spec.is_discrete', 'tensor_spec.is_discrete', (['spec'], {}), '(spec)\n', (6318, 6324), False, 'from tf_agents.specs import tensor_spec\n')] |
import numpy as np
import gym
import matplotlib.pyplot as plt
SHOW_ENV_DISPLAY_FREQUENCY = 100
DEFAULT_ITERATION_COUNT = 100000
Observation = [30, 30, 50, 50]
np_array_win_size = np.array([0.25, 0.25, 0.01, 0.1])
# Creates a table of Q_values (state-action) initialized with zeros
# Initialize Q(s, a), for all s ∈ S, a ∈ A(s), arbitrarily, and Q(terminal-state, ·) = 0.
def createQ_table():
q_table = np.random.uniform(low=0, high=1, size=(Observation + [env.action_space.n]))
# print("q_table shape: ", q_table.shape)
# print("q_tabel content: ", q_table[:1])
return q_table
def get_discrete_state(state):
discrete_state = state / np_array_win_size + np.array([15,10,1,10])
return tuple(discrete_state.astype(np.int))
# Choosing action using policy
# Sutton's code pseudocode: Choose A from S using policy derived from Q (e.g., ε-greedy)
# %10 exploration to avoid stucking at a local optima
def epsilon_greedy_policy(discrete_state, q_table, epsilon = 0.1):
# choose a random float from an uniform distribution [0.0, 1.0)
explore_exploit = np.random.random()
if explore_exploit < epsilon:
action = np.random.randint(0, env.action_space.n)
else:
action = np.argmax(q_table[discrete_state])
return action
def q_learning(num_episodes = DEFAULT_ITERATION_COUNT, gamma_discount = 0.9, alpha = 0.5, epsilon = 0.1):
reward_cache = list()
step_cache = list()
q_table = createQ_table()
discrete_state = get_discrete_state(env.reset())
for episode in range(0, num_episodes):
discrete_state = get_discrete_state(env.reset())
done = False
reward_cum = 0
step_cum = 0
while(done == False):
action = epsilon_greedy_policy(discrete_state, q_table)
new_state, reward, done, _ = env.step(action) #step action to get new states, reward, and the "done" status.
reward_cum += reward
step_cum += 1
new_discrete_state = get_discrete_state(new_state)
if episode % SHOW_ENV_DISPLAY_FREQUENCY == 0: #render
env.render()
if not done: #update q-table
max_future_q = np.max(q_table[new_discrete_state])
current_q = q_table[discrete_state + (action,)]
new_q = (1 - alpha) * current_q + alpha * (reward + gamma_discount * max_future_q)
q_table[discrete_state + (action,)] = new_q
discrete_state = new_discrete_state
reward_cache.append(reward_cum)
step_cache.append(step_cum)
if episode % SHOW_ENV_DISPLAY_FREQUENCY == 0: #render
print("q_learning episodes count: ", episode)
return q_table, reward_cache, step_cache
def sarsa(num_episodes = DEFAULT_ITERATION_COUNT, gamma_discount = 0.9, alpha = 0.5, epsilon = 0.1):
reward_cache = list()
step_cache = list()
q_table = createQ_table()
discrete_state = get_discrete_state(env.reset())
for episode in range(0, num_episodes):
discrete_state = get_discrete_state(env.reset())
done = False
reward_cum = 0
step_cum = 0
while(done == False):
action = epsilon_greedy_policy(discrete_state, q_table)
new_state, reward, done, _ = env.step(action) #step action to get new states, reward, and the "done" status.
reward_cum += reward
step_cum += 1
new_discrete_state = get_discrete_state(new_state)
if episode % SHOW_ENV_DISPLAY_FREQUENCY == 0: #render
env.render()
if not done: #update q-table
next_state_value = q_table[new_discrete_state][action]
current_q = q_table[discrete_state + (action,)]
new_q = (1 - alpha) * current_q + alpha * (reward + gamma_discount * next_state_value)
q_table[discrete_state + (action,)] = new_q
discrete_state = new_discrete_state
reward_cache.append(reward_cum)
step_cache.append(step_cum)
if episode % SHOW_ENV_DISPLAY_FREQUENCY == 0: #render
print("sarsa episodes count: ", episode)
return q_table, reward_cache, step_cache
def plot_cumreward_normalized(reward_cache_qlearning, reward_cache_SARSA):
"""
Visualizes the reward convergence
Args:
reward_cache -- type(list) contains cumulative_reward
"""
cum_rewards_q = []
rewards_mean = np.array(reward_cache_qlearning).mean()
rewards_std = np.array(reward_cache_qlearning).std()
count = 0 # used to determine the batches
cur_reward = 0 # accumulate reward for the batch
for cache in reward_cache_qlearning:
count = count + 1
cur_reward += cache
if(count == 10):
# normalize the sample
normalized_reward = (cur_reward - rewards_mean)/rewards_std
cum_rewards_q.append(normalized_reward)
cur_reward = 0
count = 0
cum_rewards_SARSA = []
rewards_mean = np.array(reward_cache_SARSA).mean()
rewards_std = np.array(reward_cache_SARSA).std()
count = 0 # used to determine the batches
cur_reward = 0 # accumulate reward for the batch
for cache in reward_cache_SARSA:
count = count + 1
cur_reward += cache
if(count == 10):
# normalize the sample
normalized_reward = (cur_reward - rewards_mean)/rewards_std
cum_rewards_SARSA.append(normalized_reward)
cur_reward = 0
count = 0
# prepare the graph
plt.plot(cum_rewards_q, label = "q_learning")
plt.plot(cum_rewards_SARSA, label = "SARSA")
plt.ylabel('Cumulative Rewards')
plt.xlabel('Batches of Episodes (sample size 10) ')
plt.title("Q-Learning/SARSA Convergence of Cumulative Reward")
plt.legend(loc='lower right', ncol=2, mode="expand", borderaxespad=0.)
plt.show()
plt.savefig('cumulative_reward.png')
def plot_number_steps(step_cache_qlearning, step_cache_SARSA):
"""
Visualize number of steps taken
"""
cum_step_q = []
steps_mean = np.array(step_cache_qlearning).mean()
steps_std = np.array(step_cache_qlearning).std()
count = 0 # used to determine the batches
cur_step = 0 # accumulate reward for the batch
for cache in step_cache_qlearning:
count = count + 1
cur_step += cache
if(count == 10):
# normalize the sample
normalized_step = (cur_step - steps_mean)/steps_std
cum_step_q.append(normalized_step)
cur_step = 0
count = 0
cum_step_SARSA = []
steps_mean = np.array(step_cache_SARSA).mean()
steps_std = np.array(step_cache_SARSA).std()
count = 0 # used to determine the batches
cur_step = 0 # accumulate reward for the batch
for cache in step_cache_SARSA:
count = count + 1
cur_step += cache
if(count == 10):
# normalize the sample
normalized_step = (cur_step - steps_mean)/steps_std
cum_step_SARSA.append(normalized_step)
cur_step = 0
count = 0
# prepare the graph
plt.plot(cum_step_q, label = "q_learning")
plt.plot(cum_step_SARSA, label = "SARSA")
plt.ylabel('Number of iterations')
plt.xlabel('Batches of Episodes (sample size 10) ')
plt.title("Q-Learning/SARSA Iteration number untill game ends")
plt.legend(loc='lower right', ncol=2, mode="expand", borderaxespad=0.)
plt.show()
plt.savefig('number_steps.png')
def plot_qlearning_smooth(reward_cache):
"""
Visualizes the reward convergence using weighted average of previous 10 cumulative rewards
NOTE: Normalization gives better visualization
Args:
reward_cache -- type(list) contains cumulative_rewards for episodes
"""
mean_rev = (np.array(reward_cache[0:11]).sum())/10
# initialize with cache mean
cum_rewards = [mean_rev] * 10
idx = 0
for cache in reward_cache:
cum_rewards[idx] = cache
idx += 1
smooth_reward = (np.array(cum_rewards).mean())
cum_rewards.append(smooth_reward)
if(idx == 10):
idx = 0
plt.plot(cum_rewards)
plt.ylabel('Cumulative Rewards')
plt.xlabel('Batches of Episodes (sample size 10) ')
plt.title("Q-Learning Convergence of Cumulative Reward")
plt.legend(loc='lower left', ncol=2, mode="expand", borderaxespad=0.)
plt.show()
if __name__ == "__main__":
env = gym.make("CartPole-v1")
#SARSA
q_table_SARSA, reward_cache_SARSA, step_cache_SARSA = sarsa()
# QLEARNING
q_table_qlearning, reward_cache_qlearning, step_cache_qlearning = q_learning()
plot_number_steps(step_cache_qlearning, step_cache_SARSA)
# Visualize the result
plot_cumreward_normalized(reward_cache_qlearning,reward_cache_SARSA)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"matplotlib.pyplot.title",
"gym.make",
"matplotlib.pyplot.legend",... | [((181, 214), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.01, 0.1]'], {}), '([0.25, 0.25, 0.01, 0.1])\n', (189, 214), True, 'import numpy as np\n'), ((409, 482), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': '(Observation + [env.action_space.n])'}), '(low=0, high=1, size=Observation + [env.action_space.n])\n', (426, 482), True, 'import numpy as np\n'), ((1081, 1099), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1097, 1099), True, 'import numpy as np\n'), ((5602, 5645), 'matplotlib.pyplot.plot', 'plt.plot', (['cum_rewards_q'], {'label': '"""q_learning"""'}), "(cum_rewards_q, label='q_learning')\n", (5610, 5645), True, 'import matplotlib.pyplot as plt\n'), ((5652, 5694), 'matplotlib.pyplot.plot', 'plt.plot', (['cum_rewards_SARSA'], {'label': '"""SARSA"""'}), "(cum_rewards_SARSA, label='SARSA')\n", (5660, 5694), True, 'import matplotlib.pyplot as plt\n'), ((5701, 5733), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Rewards"""'], {}), "('Cumulative Rewards')\n", (5711, 5733), True, 'import matplotlib.pyplot as plt\n'), ((5738, 5789), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batches of Episodes (sample size 10) """'], {}), "('Batches of Episodes (sample size 10) ')\n", (5748, 5789), True, 'import matplotlib.pyplot as plt\n'), ((5794, 5856), 'matplotlib.pyplot.title', 'plt.title', (['"""Q-Learning/SARSA Convergence of Cumulative Reward"""'], {}), "('Q-Learning/SARSA Convergence of Cumulative Reward')\n", (5803, 5856), True, 'import matplotlib.pyplot as plt\n'), ((5861, 5932), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(loc='lower right', ncol=2, mode='expand', borderaxespad=0.0)\n", (5871, 5932), True, 'import matplotlib.pyplot as plt\n'), ((5936, 5946), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5944, 5946), True, 'import matplotlib.pyplot as plt\n'), ((5951, 5987), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cumulative_reward.png"""'], {}), "('cumulative_reward.png')\n", (5962, 5987), True, 'import matplotlib.pyplot as plt\n'), ((7231, 7271), 'matplotlib.pyplot.plot', 'plt.plot', (['cum_step_q'], {'label': '"""q_learning"""'}), "(cum_step_q, label='q_learning')\n", (7239, 7271), True, 'import matplotlib.pyplot as plt\n'), ((7278, 7317), 'matplotlib.pyplot.plot', 'plt.plot', (['cum_step_SARSA'], {'label': '"""SARSA"""'}), "(cum_step_SARSA, label='SARSA')\n", (7286, 7317), True, 'import matplotlib.pyplot as plt\n'), ((7324, 7358), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (7334, 7358), True, 'import matplotlib.pyplot as plt\n'), ((7363, 7414), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batches of Episodes (sample size 10) """'], {}), "('Batches of Episodes (sample size 10) ')\n", (7373, 7414), True, 'import matplotlib.pyplot as plt\n'), ((7419, 7482), 'matplotlib.pyplot.title', 'plt.title', (['"""Q-Learning/SARSA Iteration number untill game ends"""'], {}), "('Q-Learning/SARSA Iteration number untill game ends')\n", (7428, 7482), True, 'import matplotlib.pyplot as plt\n'), ((7487, 7558), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(loc='lower right', ncol=2, mode='expand', borderaxespad=0.0)\n", (7497, 7558), True, 'import matplotlib.pyplot as plt\n'), ((7562, 7572), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7570, 7572), True, 'import matplotlib.pyplot as plt\n'), ((7577, 7608), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""number_steps.png"""'], {}), "('number_steps.png')\n", (7588, 7608), True, 'import matplotlib.pyplot as plt\n'), ((8282, 8303), 'matplotlib.pyplot.plot', 'plt.plot', (['cum_rewards'], {}), '(cum_rewards)\n', (8290, 8303), True, 'import matplotlib.pyplot as plt\n'), ((8308, 8340), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Rewards"""'], {}), "('Cumulative Rewards')\n", (8318, 8340), True, 'import matplotlib.pyplot as plt\n'), ((8345, 8396), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batches of Episodes (sample size 10) """'], {}), "('Batches of Episodes (sample size 10) ')\n", (8355, 8396), True, 'import matplotlib.pyplot as plt\n'), ((8401, 8458), 'matplotlib.pyplot.title', 'plt.title', (['"""Q-Learning Convergence of Cumulative Reward"""'], {}), "('Q-Learning Convergence of Cumulative Reward')\n", (8410, 8458), True, 'import matplotlib.pyplot as plt\n'), ((8463, 8533), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(loc='lower left', ncol=2, mode='expand', borderaxespad=0.0)\n", (8473, 8533), True, 'import matplotlib.pyplot as plt\n'), ((8537, 8547), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8545, 8547), True, 'import matplotlib.pyplot as plt\n'), ((8587, 8610), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (8595, 8610), False, 'import gym\n'), ((678, 703), 'numpy.array', 'np.array', (['[15, 10, 1, 10]'], {}), '([15, 10, 1, 10])\n', (686, 703), True, 'import numpy as np\n'), ((1152, 1192), 'numpy.random.randint', 'np.random.randint', (['(0)', 'env.action_space.n'], {}), '(0, env.action_space.n)\n', (1169, 1192), True, 'import numpy as np\n'), ((1220, 1254), 'numpy.argmax', 'np.argmax', (['q_table[discrete_state]'], {}), '(q_table[discrete_state])\n', (1229, 1254), True, 'import numpy as np\n'), ((4465, 4497), 'numpy.array', 'np.array', (['reward_cache_qlearning'], {}), '(reward_cache_qlearning)\n', (4473, 4497), True, 'import numpy as np\n'), ((4523, 4555), 'numpy.array', 'np.array', (['reward_cache_qlearning'], {}), '(reward_cache_qlearning)\n', (4531, 4555), True, 'import numpy as np\n'), ((5048, 5076), 'numpy.array', 'np.array', (['reward_cache_SARSA'], {}), '(reward_cache_SARSA)\n', (5056, 5076), True, 'import numpy as np\n'), ((5102, 5130), 'numpy.array', 'np.array', (['reward_cache_SARSA'], {}), '(reward_cache_SARSA)\n', (5110, 5130), True, 'import numpy as np\n'), ((6153, 6183), 'numpy.array', 'np.array', (['step_cache_qlearning'], {}), '(step_cache_qlearning)\n', (6161, 6183), True, 'import numpy as np\n'), ((6207, 6237), 'numpy.array', 'np.array', (['step_cache_qlearning'], {}), '(step_cache_qlearning)\n', (6215, 6237), True, 'import numpy as np\n'), ((6704, 6730), 'numpy.array', 'np.array', (['step_cache_SARSA'], {}), '(step_cache_SARSA)\n', (6712, 6730), True, 'import numpy as np\n'), ((6754, 6780), 'numpy.array', 'np.array', (['step_cache_SARSA'], {}), '(step_cache_SARSA)\n', (6762, 6780), True, 'import numpy as np\n'), ((2192, 2227), 'numpy.max', 'np.max', (['q_table[new_discrete_state]'], {}), '(q_table[new_discrete_state])\n', (2198, 2227), True, 'import numpy as np\n'), ((7930, 7958), 'numpy.array', 'np.array', (['reward_cache[0:11]'], {}), '(reward_cache[0:11])\n', (7938, 7958), True, 'import numpy as np\n'), ((8154, 8175), 'numpy.array', 'np.array', (['cum_rewards'], {}), '(cum_rewards)\n', (8162, 8175), True, 'import numpy as np\n')] |
# PARAMETERS #
import os
import pathlib
import subprocess
import json
import numpy as np
import argparse
def execute_command(cmd):
subprocess.Popen(cmd).wait()
parser = argparse.ArgumentParser(description="Target model training")
parser.add_argument("--path", "-p", type=str, default="./experiments/purchases100")
parser.add_argument("--dataset", "-d", type=str, required=True)
parser.add_argument("--seed", "-s", type=str, required=True)
parser.add_argument("--output_size", "-os", type=int, default=100)
parser.add_argument("--optimize", "-opt", default=0, required=False, type=int, help="Execute bayesian optimization")
parser.add_argument("--learning_rate", "-target_lr", type=str, default="0.0008")
parser.add_argument("--batch_size", "-bs", type=str, default="64")
parser.add_argument("--index", "-ai", type=str, default="0,1,2,3,4,5,6")
args = parser.parse_args()
num_classes = args.output_size # for purchases 100
workers = 4 # parallel workers
learning_rate = args.learning_rate
optimize = args.optimize
batch_size = args.batch_size
# File pathes. Should all be relative to the Project root
path = args.path
input = f"{path}/target"
seed = args.seed
data_file = args.dataset
with open(f"{input}/training_information.json", 'r') as f:
data = json.load(f)
data_owners = data["clients"]
last_round = int(np.array(data["validation_accuracy"])[-1, 0])
last_round = last_round - (last_round % 5)
epochs = [x for x in range(last_round, 0, -5)]
if len(epochs) > 5:
epochs = epochs[-5:]
for data_owner in data_owners:
output = f"{path}/aia"
experiment_name = f"attack_{data_owner}"
pathlib.Path(output).mkdir(parents=True, exist_ok=True)
models = reversed(list(map(lambda epoch: f"{input}/{data_owner}_{epoch}_local_model.h5", epochs)))
indices = f"{input}/{data_owner}_indices.npy"
execute_command(
["python3", "-m", "libs.AIA.sophisticate", "--save_epochs", str(len(epochs)), "--num_classes",
str(num_classes),
"--experiment", experiment_name, "--indices", indices, "--output", output, "--batch_size", str(batch_size),
"--models", " ".join(models), "--data", data_file, "--workers", str(workers), "--seed", str(seed),
"--learning_rate", str(learning_rate), "--optimize", str(optimize), "--index", args.index])
exit(1)
| [
"argparse.ArgumentParser",
"pathlib.Path",
"subprocess.Popen",
"numpy.array",
"json.load"
] | [((177, 237), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Target model training"""'}), "(description='Target model training')\n", (200, 237), False, 'import argparse\n'), ((1266, 1278), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1275, 1278), False, 'import json\n'), ((137, 158), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {}), '(cmd)\n', (153, 158), False, 'import subprocess\n'), ((1334, 1371), 'numpy.array', 'np.array', (["data['validation_accuracy']"], {}), "(data['validation_accuracy'])\n", (1342, 1371), True, 'import numpy as np\n'), ((1654, 1674), 'pathlib.Path', 'pathlib.Path', (['output'], {}), '(output)\n', (1666, 1674), False, 'import pathlib\n')] |
# from turtle import update
from .camera import Camera
from statistics import median
import math
import cv2
import time
import numpy as np
import random
import mediapipe as mp
import numpy.typing as npt
import interpreter.constants as constants
from threading import Thread, Lock
from joblib import load
from . import streamer
from .new_model.preprocess.feature_extractor import extract_features
from display.display import Display
# Interpreter class to parse images into signs, and build signs
class Interpreter:
def __init__(self, display_instance: Display):
self.display_instance = display_instance
checkpoint_path = "./interpreter/new_model/output/modelpy39.joblib"
self.model = load(checkpoint_path)
sequence_path = "./interpreter/new_model/output/sequence-model1.joblib"
self.seq_model = load(sequence_path)
# mediapipe model
self.mp_drawing = mp.solutions.drawing_utils
self.mp_drawing_styles = mp.solutions.drawing_styles
self.mp_hands = mp.solutions.hands
self.mp_drawing = mp.solutions.drawing_utils
self.hands = self.mp_hands.Hands(
model_complexity=constants.MODEL_COMPLEXITY,
min_detection_confidence=constants.MIN_DETECTION_CONFIDENCE,
min_tracking_confidence=constants.MIN_TRACKING_CONFIDENCE)
self.hand_landmarks = None
self.hand_landmark_lock = Lock()
# interpreter sentence inference variables
self.curr_letter = ''
self.curr_seq_letter = ''
self.curr_input = ''
self.hand_assigned = False
self.med_delta_time = constants.INITIAL_TIME
self.time_buffer = []
self.match_size = self.compute_match_size()
self.prev_time = time.time()
self.buffer = ['*' for _ in range(constants.MAX_BUFFER_SIZE)]
self.prob_buffer = [0 for _ in range(constants.MAX_BUFFER_SIZE)]
# sequence feature buffer
self.feature_buffer = []
self.sequence_buffer = ['*' for _ in range(constants.MAX_BUFFER_SIZE)]
self.sequence_prob_buffer = [0 for _ in range(constants.MAX_BUFFER_SIZE)]
self.word_is_signed = False
self.word_signed = ''
# Parses the current frame from ASL to a letter
def parse_frame(self):
frame = streamer.frame
char_signed = False
word_signed = False
if frame is not None:
# convert frame to RGB for processing
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = self.hands.process(frame)
# convert frame back to BGR for display
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# if hand detected
if results.multi_hand_landmarks:
if not self.hand_assigned:
for hand in results.multi_handedness:
handType=hand.classification[0].label
self.display_instance.display_state(
'hand', {"hand": handType.lower()})
self.hand_assigned = True
break
# update match size value based on frame rate
self.update_match_size()
# get current hand landmarks
hand_landmarks = results.multi_hand_landmarks[0]
with self.hand_landmark_lock:
self.hand_landmarks = hand_landmarks
# editting frame
frame = self.frame_transform(frame)
# making prediction
features, _ = extract_features(
[hand_landmarks], ['a'], input_type='inference')
preds = self.model.predict_proba(features)
cp = self.model.classes_[np.argmax(preds)]
cp_prob = np.max(preds)
# update prob buffer
self.prob_buffer.pop(0)
self.prob_buffer.append(cp_prob)
# update buffer
self.buffer.pop(0)
self.buffer.append(cp)
# making sequence prediction
if len(self.feature_buffer) == constants.SEQUENCE_INPUT_SIZE:
seq_features = np.array(self.feature_buffer)
seq_features = np.reshape(seq_features, seq_features.size).reshape(1, -1)
seq_preds = self.seq_model.predict_proba(seq_features)
seq_cp = self.seq_model.classes_[np.argmax(seq_preds)]
seq_cp_prob = np.max(seq_preds)
# update sequence buffer
self.sequence_buffer.pop(0)
self.sequence_buffer.append(seq_cp)
# update sequence prob buffer
self.sequence_prob_buffer.pop(0)
self.sequence_prob_buffer.append(seq_cp_prob)
# update feature buffer
if len(self.feature_buffer) == constants.SEQUENCE_INPUT_SIZE:
self.feature_buffer.pop(0)
self.feature_buffer.append(features)
# send current input to the display
self.display_instance.display_state(
'green', {"letter": cp, "input": self.curr_input})
# convert '_' output to space
if cp == '_':
cp = ' '
# if we've seen cp self.match_size times in a row, add it
if all(x == self.buffer[-1] for x in self.buffer[-self.match_size:]):
char_signed = True
if all(x == self.sequence_buffer[-1] for x in self.sequence_buffer[-int(self.match_size/2):]) \
and self.sequence_buffer[-1] in ["j", "z"]:
word_signed = True
if char_signed and word_signed:
word_weight = sum(self.sequence_prob_buffer[-int(self.match_size/2):])
char_weight = sum(self.prob_buffer[-int(self.match_size/2):])
if char_weight > word_weight:
if self.curr_letter != cp:
self.add_letter(cp)
if word_weight > char_weight:
if self.curr_seq_letter != seq_cp:
self.add_seq_letter(seq_cp)
elif word_signed:
if self.curr_seq_letter != seq_cp:
self.add_seq_letter(seq_cp)
elif char_signed:
if self.curr_letter != cp:
self.add_letter(cp)
if results.multi_hand_landmarks == None:
self.hand_assigned = False
self.curr_letter = ""
self.curr_seq_letter = ""
self.display_instance.display_query(self.curr_input)
self.input_finished = 1
with self.hand_landmark_lock:
self.hand_landmarks = None
# Add letter to input query and display it
def add_letter(self, cp: str):
self.curr_letter = cp
self.curr_seq_letter = ''
if self.curr_letter == 'x':
self.curr_input = self.curr_input[:-1]
self.curr_letter = ''
else:
self.curr_input += self.curr_letter
self.buffer = ['*' for _ in range(constants.MAX_BUFFER_SIZE)]
self.sequence_buffer = ['*' for _ in range(constants.MAX_BUFFER_SIZE)]
self.display_instance.display_state(
"save", {"input": self.curr_input})
def add_seq_letter(self, seq_cp: str):
self.curr_seq_letter = seq_cp
self.curr_letter = ''
self.curr_input += self.curr_seq_letter
self.buffer = ['*' for _ in range(constants.MAX_BUFFER_SIZE)]
self.sequence_buffer = ['*' for _ in range(constants.MAX_BUFFER_SIZE)]
self.display_instance.display_state(
"save", {"input": self.curr_input})
# Dynamically adjust buffer size based on frame rate
def update_match_size(self):
curr_time = time.time()
delta_time = curr_time-self.prev_time
self.time_buffer.append(delta_time)
self.prev_time = curr_time
if len(self.time_buffer) > constants.TIME_LOOKBACK:
self.time_buffer.pop(0)
self.med_delta_time = median(self.time_buffer)
self.compute_match_size()
def compute_match_size(self):
match_size = int(math.floor(constants.TIME_PER_SIGN/self.med_delta_time))
self.match_size = max(min((match_size, constants.MAX_BUFFER_SIZE)), constants.MIN_BUFFER_SIZE)
# functions for displaying frames on a separate thread
def display_frame_predict_thread(self, stop):
self.display_frame_thread(stop, predict=True)
def display_frame_wait_thread(self, stop):
self.display_frame_thread(stop, predict=False)
def display_frame_thread(self, stop, predict=False):
while True:
frame = streamer.frame
if frame is not None:
if predict:
frame = self.frame_transform(frame)
frame = self.mp_hand_transform(frame)
with streamer.lock:
streamer.outputFrame = frame.copy()
if stop():
break
def mp_hand_transform(self, frame):
with self.hand_landmark_lock:
if self.hand_landmarks != None:
landmarks_style = self.mp_drawing_styles.get_default_hand_landmarks_style()
for style in landmarks_style.values():
style.color = (128, 64, 128)
style.circle_radius = 0
connections_style = self.mp_drawing_styles.get_default_hand_connections_style()
for style in connections_style.values():
style.color = (128, 64, 128)
# draw landmarks on top of frame
self.mp_drawing.draw_landmarks(
frame,
self.hand_landmarks,
self.mp_hands.HAND_CONNECTIONS,
self.mp_drawing_styles.get_default_hand_landmarks_style(),
self.mp_drawing_styles.get_default_hand_connections_style())
return frame
def frame_transform(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
return frame
# Wait for a user to initiate an input, returns when the user is about to give an input, runs on FSM
def is_hand_in_frame(self, frame: npt.NDArray):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = self.hands.process(frame)
hand_in_frame = results.multi_hand_landmarks != None
return hand_in_frame
def wait_for_input(self):
print("Waiting for user input")
frame = streamer.frame
while frame is None:
frame = streamer.frame
time.sleep(.1)
stop_threads = False
t1 = Thread(target=self.display_frame_wait_thread, args=(lambda : stop_threads, ))
t1.start()
while not self.is_hand_in_frame(frame):
frame = streamer.frame
stop_threads = True
t1.join()
# Captures the full sign input from the user, utilizes more complicated FSM logic
def capture_full_input(self):
print("Capturing input")
self.curr_letter = ''
self.curr_input = ''
self.input_finished = 0
stop_threads = False
t1 = Thread(target=self.display_frame_predict_thread, args=(lambda : stop_threads, ))
t1.start()
while not self.input_finished:
self.parse_frame()
stop_threads = True
t1.join()
return self.curr_input
def teardown(self):
streamer.camera.teardown()
| [
"numpy.reshape",
"math.floor",
"threading.Lock",
"numpy.argmax",
"time.sleep",
"numpy.max",
"statistics.median",
"numpy.array",
"cv2.cvtColor",
"joblib.load",
"threading.Thread",
"time.time"
] | [((725, 746), 'joblib.load', 'load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (729, 746), False, 'from joblib import load\n'), ((853, 872), 'joblib.load', 'load', (['sequence_path'], {}), '(sequence_path)\n', (857, 872), False, 'from joblib import load\n'), ((1432, 1438), 'threading.Lock', 'Lock', ([], {}), '()\n', (1436, 1438), False, 'from threading import Thread, Lock\n'), ((1781, 1792), 'time.time', 'time.time', ([], {}), '()\n', (1790, 1792), False, 'import time\n'), ((8183, 8194), 'time.time', 'time.time', ([], {}), '()\n', (8192, 8194), False, 'import time\n'), ((10481, 10520), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (10493, 10520), False, 'import cv2\n'), ((10537, 10576), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_GRAY2RGB'], {}), '(frame, cv2.COLOR_GRAY2RGB)\n', (10549, 10576), False, 'import cv2\n'), ((10772, 10810), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (10784, 10810), False, 'import cv2\n'), ((11189, 11265), 'threading.Thread', 'Thread', ([], {'target': 'self.display_frame_wait_thread', 'args': '(lambda : stop_threads,)'}), '(target=self.display_frame_wait_thread, args=(lambda : stop_threads,))\n', (11195, 11265), False, 'from threading import Thread, Lock\n'), ((11720, 11799), 'threading.Thread', 'Thread', ([], {'target': 'self.display_frame_predict_thread', 'args': '(lambda : stop_threads,)'}), '(target=self.display_frame_predict_thread, args=(lambda : stop_threads,))\n', (11726, 11799), False, 'from threading import Thread, Lock\n'), ((2519, 2557), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2531, 2557), False, 'import cv2\n'), ((2680, 2718), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (2692, 2718), False, 'import cv2\n'), ((8451, 8475), 'statistics.median', 'median', (['self.time_buffer'], {}), '(self.time_buffer)\n', (8457, 8475), False, 'from statistics import median\n'), ((8579, 8636), 'math.floor', 'math.floor', (['(constants.TIME_PER_SIGN / self.med_delta_time)'], {}), '(constants.TIME_PER_SIGN / self.med_delta_time)\n', (8589, 8636), False, 'import math\n'), ((11123, 11138), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (11133, 11138), False, 'import time\n'), ((3900, 3913), 'numpy.max', 'np.max', (['preds'], {}), '(preds)\n', (3906, 3913), True, 'import numpy as np\n'), ((3856, 3872), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (3865, 3872), True, 'import numpy as np\n'), ((4307, 4336), 'numpy.array', 'np.array', (['self.feature_buffer'], {}), '(self.feature_buffer)\n', (4315, 4336), True, 'import numpy as np\n'), ((4615, 4632), 'numpy.max', 'np.max', (['seq_preds'], {}), '(seq_preds)\n', (4621, 4632), True, 'import numpy as np\n'), ((4559, 4579), 'numpy.argmax', 'np.argmax', (['seq_preds'], {}), '(seq_preds)\n', (4568, 4579), True, 'import numpy as np\n'), ((4372, 4415), 'numpy.reshape', 'np.reshape', (['seq_features', 'seq_features.size'], {}), '(seq_features, seq_features.size)\n', (4382, 4415), True, 'import numpy as np\n')] |
# Copyright (C) 2020 * Ltd. All rights reserved.
# author : <NAME> <<EMAIL>>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.puzzle_utils import *
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--data_dir', default='../VOCtrainval_11-May-2012/', type=str)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='resnet50', type=str)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--model_name', default='', type=str)
parser.add_argument('--cam_dir', default='', type=str)
parser.add_argument('--domain', default='train', type=str)
parser.add_argument('--beta', default=10, type=int)
parser.add_argument('--exp_times', default=8, type=int)
# parser.add_argument('--threshold', default=0.25, type=float)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
from iputils import get_host_ip
ip = get_host_ip()
# print(ip)
if ip == '172.31.234.159':
args.data_dir = '/data1/xjheng/dataset/VOC2012/'
elif ip == '172.31.111.180':
args.data_dir = '/home/lzr/data/VOC/VOC2012/'
else:
raise NotImplementedError
experiment_name = args.model_name
if 'train' in args.domain:
experiment_name += '@train'
else:
experiment_name += '@val'
experiment_name += '@beta=%d'%args.beta
experiment_name += '@exp_times=%d'%args.exp_times
# experiment_name += '@threshold=%.2f'%args.threshold
experiment_name += '@rw'
cam_dir = f'./experiments/predictions/{args.cam_dir}/'
pred_dir = create_directory(f'./experiments/predictions/{experiment_name}/')
model_path = './experiments/models/' + f'{args.model_name}.pth'
cam_path = create_directory(f'vis_cam/rw/{experiment_name}')
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
# for mIoU
meta_dic = read_json('./data/VOC_2012.json')
dataset = VOC_Dataset_For_Making_CAM(args.data_dir, args.domain)
###################################################################################
# Network
###################################################################################
path_index = PathIndex(radius=10, default_size=(512 // 4, 512 // 4))
model = AffinityNet(args.architecture, path_index)
model = model.cuda()
model.eval()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model(model, model_path, parallel=the_number_of_gpu > 1)
#################################################################################################
# Evaluation
#################################################################################################
eval_timer = Timer()
vis_cam = True
with torch.no_grad():
length = len(dataset)
for step, (ori_image, image_id, label, gt_mask) in enumerate(dataset):
ori_w, ori_h = ori_image.size
npy_path = pred_dir + image_id + '.npy'
if os.path.isfile(npy_path):
continue
# preprocessing
image = np.asarray(ori_image)
image = normalize_fn(image)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
flipped_image = image.flip(-1)
images = torch.stack([image, flipped_image])
images = images.cuda()
# inference
edge = model.get_edge(images)
# postprocessing
cam_dict = np.load(cam_dir + image_id + '.npy', allow_pickle=True).item()
cams = cam_dict['cam']
cam_downsized_values = cams.cuda()
rw = propagate_to_edge(cam_downsized_values, edge, beta=args.beta, exp_times=args.exp_times, radius=5)
rw_up = F.interpolate(rw, scale_factor=4, mode='bilinear', align_corners=False)[..., 0, :ori_h, :ori_w]
rw_up = rw_up / torch.max(rw_up)
if vis_cam:
cam = torch.sum(rw_up, dim=0)
cam = cam.unsqueeze(0).unsqueeze(0)
cam = make_cam(cam).squeeze()
cam = get_numpy_from_tensor(cam)
image = np.array(ori_image)
h, w, c = image.shape
cam = (cam * 255).astype(np.uint8)
cam = cv2.resize(cam, (w, h), interpolation=cv2.INTER_LINEAR)
cam = colormap(cam)
image = cv2.addWeighted(image, 0.5, cam, 0.5, 0)
cv2.imwrite(f'{cam_path}/{image_id}.png', image.astype(np.uint8))
np.save(npy_path, {"keys": cam_dict['keys'], "rw": rw_up.cpu().numpy()})
sys.stdout.write('\r# Make CAM with Random Walk [{}/{}] = {:.2f}%, ({}, rw_up={}, rw={})'.format(step + 1, length, (step + 1) / length * 100, (ori_h, ori_w), rw_up.size(), rw.size()))
sys.stdout.flush()
print()
print("python3 evaluate.py --experiment_name {} --domain {}".format(experiment_name, args.domain)) | [
"iputils.get_host_ip",
"argparse.ArgumentParser",
"torch.stack",
"numpy.asarray",
"torch.nn.DataParallel",
"torch.from_numpy",
"torch.max",
"os.path.isfile",
"numpy.array",
"torch.sum",
"torch.nn.functional.interpolate",
"torch.no_grad",
"sys.stdout.flush",
"numpy.load"
] | [((849, 874), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (872, 874), False, 'import argparse\n'), ((2303, 2316), 'iputils.get_host_ip', 'get_host_ip', ([], {}), '()\n', (2314, 2316), False, 'from iputils import get_host_ip\n'), ((4529, 4551), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (4544, 4551), True, 'import torch.nn as nn\n'), ((4898, 4913), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4911, 4913), False, 'import torch\n'), ((5134, 5158), 'os.path.isfile', 'os.path.isfile', (['npy_path'], {}), '(npy_path)\n', (5148, 5158), False, 'import os\n'), ((5234, 5255), 'numpy.asarray', 'np.asarray', (['ori_image'], {}), '(ori_image)\n', (5244, 5255), True, 'import numpy as np\n'), ((5364, 5387), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (5380, 5387), False, 'import torch\n'), ((5453, 5488), 'torch.stack', 'torch.stack', (['[image, flipped_image]'], {}), '([image, flipped_image])\n', (5464, 5488), False, 'import torch\n'), ((7006, 7024), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7022, 7024), False, 'import sys\n'), ((5951, 6022), 'torch.nn.functional.interpolate', 'F.interpolate', (['rw'], {'scale_factor': '(4)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(rw, scale_factor=4, mode='bilinear', align_corners=False)\n", (5964, 6022), True, 'import torch.nn.functional as F\n'), ((6075, 6091), 'torch.max', 'torch.max', (['rw_up'], {}), '(rw_up)\n', (6084, 6091), False, 'import torch\n'), ((6140, 6163), 'torch.sum', 'torch.sum', (['rw_up'], {'dim': '(0)'}), '(rw_up, dim=0)\n', (6149, 6163), False, 'import torch\n'), ((6337, 6356), 'numpy.array', 'np.array', (['ori_image'], {}), '(ori_image)\n', (6345, 6356), True, 'import numpy as np\n'), ((5644, 5699), 'numpy.load', 'np.load', (["(cam_dir + image_id + '.npy')"], {'allow_pickle': '(True)'}), "(cam_dir + image_id + '.npy', allow_pickle=True)\n", (5651, 5699), True, 'import numpy as np\n')] |
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
from Elasticity.ConstitutiveRelations.IsotropicHomogeneous import (
youngs_modulus, poisson_ratio)
def displacement(x, length, height, bending_moment, bulk_modulus,
shear_modulus):
local_youngs_modulus = youngs_modulus(bulk_modulus, shear_modulus)
local_poisson_ratio = poisson_ratio(bulk_modulus, shear_modulus)
prefactor = 12. * bending_moment / (local_youngs_modulus * height**3)
return np.array([
-prefactor * x[0] * x[1], prefactor / 2. *
(x[0]**2 + local_poisson_ratio * x[1]**2 - length**2 / 4.)
])
def strain(x, length, height, bending_moment, bulk_modulus, shear_modulus):
local_youngs_modulus = youngs_modulus(bulk_modulus, shear_modulus)
local_poisson_ratio = poisson_ratio(bulk_modulus, shear_modulus)
prefactor = 12. * bending_moment / (local_youngs_modulus * height**3)
result = np.zeros((2, 2))
result[0, 0] = -prefactor * x[1]
result[1, 1] = prefactor * local_poisson_ratio * x[1]
return result
def minus_stress(x, length, height, bending_moment, bulk_modulus,
shear_modulus):
return -np.array([[12. * bending_moment / height**3 * x[1], 0], [0, 0]])
def potential_energy_density(x, length, height, bending_moment, bulk_modulus,
shear_modulus):
local_strain = strain(x, length, height, bending_moment, bulk_modulus,
shear_modulus)
local_minus_stress = minus_stress(x, length, height, bending_moment,
bulk_modulus, shear_modulus)
return 0.5 * np.einsum('ij,ij', local_strain, local_minus_stress)
def source(x):
return np.zeros(x.shape)
| [
"Elasticity.ConstitutiveRelations.IsotropicHomogeneous.youngs_modulus",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"Elasticity.ConstitutiveRelations.IsotropicHomogeneous.poisson_ratio"
] | [((319, 362), 'Elasticity.ConstitutiveRelations.IsotropicHomogeneous.youngs_modulus', 'youngs_modulus', (['bulk_modulus', 'shear_modulus'], {}), '(bulk_modulus, shear_modulus)\n', (333, 362), False, 'from Elasticity.ConstitutiveRelations.IsotropicHomogeneous import youngs_modulus, poisson_ratio\n'), ((389, 431), 'Elasticity.ConstitutiveRelations.IsotropicHomogeneous.poisson_ratio', 'poisson_ratio', (['bulk_modulus', 'shear_modulus'], {}), '(bulk_modulus, shear_modulus)\n', (402, 431), False, 'from Elasticity.ConstitutiveRelations.IsotropicHomogeneous import youngs_modulus, poisson_ratio\n'), ((517, 643), 'numpy.array', 'np.array', (['[-prefactor * x[0] * x[1], prefactor / 2.0 * (x[0] ** 2 + \n local_poisson_ratio * x[1] ** 2 - length ** 2 / 4.0)]'], {}), '([-prefactor * x[0] * x[1], prefactor / 2.0 * (x[0] ** 2 + \n local_poisson_ratio * x[1] ** 2 - length ** 2 / 4.0)])\n', (525, 643), True, 'import numpy as np\n'), ((758, 801), 'Elasticity.ConstitutiveRelations.IsotropicHomogeneous.youngs_modulus', 'youngs_modulus', (['bulk_modulus', 'shear_modulus'], {}), '(bulk_modulus, shear_modulus)\n', (772, 801), False, 'from Elasticity.ConstitutiveRelations.IsotropicHomogeneous import youngs_modulus, poisson_ratio\n'), ((828, 870), 'Elasticity.ConstitutiveRelations.IsotropicHomogeneous.poisson_ratio', 'poisson_ratio', (['bulk_modulus', 'shear_modulus'], {}), '(bulk_modulus, shear_modulus)\n', (841, 870), False, 'from Elasticity.ConstitutiveRelations.IsotropicHomogeneous import youngs_modulus, poisson_ratio\n'), ((958, 974), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (966, 974), True, 'import numpy as np\n'), ((1745, 1762), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (1753, 1762), True, 'import numpy as np\n'), ((1201, 1268), 'numpy.array', 'np.array', (['[[12.0 * bending_moment / height ** 3 * x[1], 0], [0, 0]]'], {}), '([[12.0 * bending_moment / height ** 3 * x[1], 0], [0, 0]])\n', (1209, 1268), True, 'import numpy as np\n'), ((1664, 1716), 'numpy.einsum', 'np.einsum', (['"""ij,ij"""', 'local_strain', 'local_minus_stress'], {}), "('ij,ij', local_strain, local_minus_stress)\n", (1673, 1716), True, 'import numpy as np\n')] |
"""
Contains functions for generating prior pseudo-samples, initializing for RR-NPL and maximizing weighted log likelihood in GMM examples
Uses modified sklearn sk_base and sk_gaussian_mixture classes to carry out weighted EM
"""
import numpy as np
import npl.sk_gaussian_mixture as skgm
from scipy.stats import norm
from scipy.stats import invgamma
def sampleprior_MNIST(D_data,T_trunc,K_clusters, B_postsamples, postsamples = None): #generate prior data points
y_prior = norm.rvs(loc = 0,scale = 0.1, size = (B_postsamples,T_trunc,D_data)) #approximately the marginal y from GMM, centre at empirical mean
return y_prior
def sampleprior_toy(D_data,T_trunc,K_clusters,B_postsamples, postsamples = None): #generate prior data points
y_prior = norm.rvs(loc = 0,scale = 1, size = (B_postsamples,T_trunc,D_data)) #approximately the marginal y from GMM, centre at empirical mean
return y_prior
#Update this
def sampleprior_toyMDP(D_data,T_trunc,K_clusters, B_postsamples,postsamples): #generate prior data points
par_nuts = postsamples
pi_nuts =par_nuts.iloc[:,3:K_clusters+3]
mu_nuts =par_nuts.iloc[:,3+K_clusters: 3+(K_clusters*(D_data+1))]
sigma_nuts = par_nuts.iloc[:,3+K_clusters*(D_data+1) :3+ K_clusters*(2*D_data+1)]
B_nuts = np.shape(pi_nuts)[0]
ind_postsample = np.random.choice(B_nuts,size = B_postsamples)
y_prior = np.zeros((B_postsamples,T_trunc,D_data))
for i in range(B_postsamples):
ind_cluster = np.random.choice(K_clusters, p = pi_nuts.iloc[ind_postsample[i]])
y_prior[i] = norm.rvs(loc = mu_nuts.iloc[ind_postsample[i],ind_cluster], scale = sigma_nuts.iloc[ind_postsample[i],ind_cluster] \
, size = (T_trunc,D_data))
return y_prior
def init_toy(R_restarts, K_clusters,B_postsamples, D_data):
pi_init = np.random.dirichlet(np.ones(K_clusters),R_restarts*B_postsamples)
mu_init = 8*np.random.rand(R_restarts*B_postsamples,K_clusters,D_data) - 2
sigma_init = invgamma.rvs(1, size = (R_restarts*B_postsamples,K_clusters,D_data)) #covariances
return pi_init,mu_init,sigma_init
def init_MNIST(R_restarts, K_cluster,B_postsamples, D_data):
import pkg_resources
pi_init = np.tile([np.load(pkg_resources.resource_filename('npl','init_parameters/pi_init_VB.npy'))],(B_postsamples,1))
mu_init = np.tile([np.load(pkg_resources.resource_filename('npl','init_parameters/mu_init_VB.npy'))],(B_postsamples,1,1))
sigma_init = np.tile([np.load(pkg_resources.resource_filename('npl','init_parameters/sigma_init_VB.npy'))],(B_postsamples,1,1))
return pi_init,mu_init,sigma_init
def init_params(y,N_data,K_clusters,D_data,tol,max_iter): #initialize parameters for FI-NPL by picking MLE
R_restarts = 10
pi_bb = np.zeros((R_restarts,K_clusters)) #mixing weights (randomly)
mu_bb = np.zeros((R_restarts,K_clusters,D_data)) #means
sigma_bb = np.zeros((R_restarts,K_clusters,D_data)) #covariances
ll_bb = np.zeros(R_restarts)
#Initialize parameters randomly
pi_init = np.random.dirichlet(np.ones(K_clusters),R_restarts)
mu_init = 8*np.random.rand(R_restarts,K_clusters,D_data) - 2
sigma_init = invgamma.rvs(1, size = (R_restarts,K_clusters,D_data))
for i in range(R_restarts):
model = skgm.GaussianMixture(K_clusters, covariance_type = 'diag',means_init = mu_init[i],weights_init = pi_init[i],precisions_init = 1/sigma_init[i], tol = tol,max_iter = max_iter)
model.fit(y,np.ones(N_data))
pi_bb[i] = model.weights_
mu_bb[i]= model.means_
sigma_bb[i] = np.sqrt(model.covariances_)
ll_bb[i] = model.score(y,np.ones(N_data))*N_data
ind = np.argmax(ll_bb)
return pi_bb[ind],mu_bb[ind],sigma_bb[ind]
def maximise_mle(y,weights,pi_init,mu_init, sigma_init,K_clusters,tol,max_iter,N_data): #maximization for FI-NPL
model = skgm.GaussianMixture(K_clusters, covariance_type = 'diag',means_init = mu_init,weights_init = pi_init,precisions_init = 1/sigma_init, tol = tol,max_iter = max_iter)
model.fit(y,N_data*weights)
pi_bb = model.weights_
mu_bb= model.means_
sigma_bb = np.sqrt(model.covariances_)
return pi_bb,mu_bb,sigma_bb
def maximise(y,y_prior,weights,pi_init,mu_init,sigma_init,alph_conc, T_trunc,K_clusters,tol,max_iter,R_restarts,N_data,D_data, postsamples = None): #maximization when c = 0 for RR-NPL
if alph_conc !=0:
y_tot = np.concatenate((y,y_prior))
else:
y_tot = y
pi_bb = np.zeros((R_restarts,K_clusters)) #mixing weights (randomly)
mu_bb = np.zeros((R_restarts,K_clusters,D_data)) #means
sigma_bb = np.zeros((R_restarts,K_clusters,D_data)) #covariances
ll_bb = np.zeros(R_restarts)
n_tot = np.shape(y_tot)[0]
for i in range(R_restarts):
model = skgm.GaussianMixture(K_clusters, covariance_type = 'diag',means_init = mu_init[i],weights_init = pi_init[i],precisions_init = 1/sigma_init[i], tol = tol,max_iter = max_iter)
model.fit(y_tot,n_tot*weights)
pi_bb[i] = model.weights_
mu_bb[i]= model.means_
sigma_bb[i] = np.sqrt(model.covariances_)
ll_bb[i] = model.score(y_tot,weights)*n_tot
ind = np.argmax(ll_bb)
return pi_bb[ind],mu_bb[ind],sigma_bb[ind]
| [
"numpy.sqrt",
"npl.sk_gaussian_mixture.GaussianMixture",
"numpy.ones",
"numpy.random.rand",
"numpy.random.choice",
"numpy.argmax",
"pkg_resources.resource_filename",
"scipy.stats.norm.rvs",
"numpy.zeros",
"scipy.stats.invgamma.rvs",
"numpy.concatenate",
"numpy.shape"
] | [((479, 544), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': '(0.1)', 'size': '(B_postsamples, T_trunc, D_data)'}), '(loc=0, scale=0.1, size=(B_postsamples, T_trunc, D_data))\n', (487, 544), False, 'from scipy.stats import norm\n'), ((760, 823), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': '(1)', 'size': '(B_postsamples, T_trunc, D_data)'}), '(loc=0, scale=1, size=(B_postsamples, T_trunc, D_data))\n', (768, 823), False, 'from scipy.stats import norm\n'), ((1319, 1363), 'numpy.random.choice', 'np.random.choice', (['B_nuts'], {'size': 'B_postsamples'}), '(B_nuts, size=B_postsamples)\n', (1335, 1363), True, 'import numpy as np\n'), ((1379, 1421), 'numpy.zeros', 'np.zeros', (['(B_postsamples, T_trunc, D_data)'], {}), '((B_postsamples, T_trunc, D_data))\n', (1387, 1421), True, 'import numpy as np\n'), ((1979, 2049), 'scipy.stats.invgamma.rvs', 'invgamma.rvs', (['(1)'], {'size': '(R_restarts * B_postsamples, K_clusters, D_data)'}), '(1, size=(R_restarts * B_postsamples, K_clusters, D_data))\n', (1991, 2049), False, 'from scipy.stats import invgamma\n'), ((2749, 2783), 'numpy.zeros', 'np.zeros', (['(R_restarts, K_clusters)'], {}), '((R_restarts, K_clusters))\n', (2757, 2783), True, 'import numpy as np\n'), ((2828, 2870), 'numpy.zeros', 'np.zeros', (['(R_restarts, K_clusters, D_data)'], {}), '((R_restarts, K_clusters, D_data))\n', (2836, 2870), True, 'import numpy as np\n'), ((2895, 2937), 'numpy.zeros', 'np.zeros', (['(R_restarts, K_clusters, D_data)'], {}), '((R_restarts, K_clusters, D_data))\n', (2903, 2937), True, 'import numpy as np\n'), ((2963, 2983), 'numpy.zeros', 'np.zeros', (['R_restarts'], {}), '(R_restarts)\n', (2971, 2983), True, 'import numpy as np\n'), ((3169, 3223), 'scipy.stats.invgamma.rvs', 'invgamma.rvs', (['(1)'], {'size': '(R_restarts, K_clusters, D_data)'}), '(1, size=(R_restarts, K_clusters, D_data))\n', (3181, 3223), False, 'from scipy.stats import invgamma\n'), ((3667, 3683), 'numpy.argmax', 'np.argmax', (['ll_bb'], {}), '(ll_bb)\n', (3676, 3683), True, 'import numpy as np\n'), ((3858, 4025), 'npl.sk_gaussian_mixture.GaussianMixture', 'skgm.GaussianMixture', (['K_clusters'], {'covariance_type': '"""diag"""', 'means_init': 'mu_init', 'weights_init': 'pi_init', 'precisions_init': '(1 / sigma_init)', 'tol': 'tol', 'max_iter': 'max_iter'}), "(K_clusters, covariance_type='diag', means_init=mu_init,\n weights_init=pi_init, precisions_init=1 / sigma_init, tol=tol, max_iter\n =max_iter)\n", (3878, 4025), True, 'import npl.sk_gaussian_mixture as skgm\n'), ((4121, 4148), 'numpy.sqrt', 'np.sqrt', (['model.covariances_'], {}), '(model.covariances_)\n', (4128, 4148), True, 'import numpy as np\n'), ((4473, 4507), 'numpy.zeros', 'np.zeros', (['(R_restarts, K_clusters)'], {}), '((R_restarts, K_clusters))\n', (4481, 4507), True, 'import numpy as np\n'), ((4551, 4593), 'numpy.zeros', 'np.zeros', (['(R_restarts, K_clusters, D_data)'], {}), '((R_restarts, K_clusters, D_data))\n', (4559, 4593), True, 'import numpy as np\n'), ((4622, 4664), 'numpy.zeros', 'np.zeros', (['(R_restarts, K_clusters, D_data)'], {}), '((R_restarts, K_clusters, D_data))\n', (4630, 4664), True, 'import numpy as np\n'), ((4691, 4711), 'numpy.zeros', 'np.zeros', (['R_restarts'], {}), '(R_restarts)\n', (4699, 4711), True, 'import numpy as np\n'), ((5181, 5197), 'numpy.argmax', 'np.argmax', (['ll_bb'], {}), '(ll_bb)\n', (5190, 5197), True, 'import numpy as np\n'), ((1276, 1293), 'numpy.shape', 'np.shape', (['pi_nuts'], {}), '(pi_nuts)\n', (1284, 1293), True, 'import numpy as np\n'), ((1479, 1542), 'numpy.random.choice', 'np.random.choice', (['K_clusters'], {'p': 'pi_nuts.iloc[ind_postsample[i]]'}), '(K_clusters, p=pi_nuts.iloc[ind_postsample[i]])\n', (1495, 1542), True, 'import numpy as np\n'), ((1566, 1708), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': 'mu_nuts.iloc[ind_postsample[i], ind_cluster]', 'scale': 'sigma_nuts.iloc[ind_postsample[i], ind_cluster]', 'size': '(T_trunc, D_data)'}), '(loc=mu_nuts.iloc[ind_postsample[i], ind_cluster], scale=sigma_nuts\n .iloc[ind_postsample[i], ind_cluster], size=(T_trunc, D_data))\n', (1574, 1708), False, 'from scipy.stats import norm\n'), ((1837, 1856), 'numpy.ones', 'np.ones', (['K_clusters'], {}), '(K_clusters)\n', (1844, 1856), True, 'import numpy as np\n'), ((3055, 3074), 'numpy.ones', 'np.ones', (['K_clusters'], {}), '(K_clusters)\n', (3062, 3074), True, 'import numpy as np\n'), ((3274, 3451), 'npl.sk_gaussian_mixture.GaussianMixture', 'skgm.GaussianMixture', (['K_clusters'], {'covariance_type': '"""diag"""', 'means_init': 'mu_init[i]', 'weights_init': 'pi_init[i]', 'precisions_init': '(1 / sigma_init[i])', 'tol': 'tol', 'max_iter': 'max_iter'}), "(K_clusters, covariance_type='diag', means_init=mu_init\n [i], weights_init=pi_init[i], precisions_init=1 / sigma_init[i], tol=\n tol, max_iter=max_iter)\n", (3294, 3451), True, 'import npl.sk_gaussian_mixture as skgm\n'), ((3572, 3599), 'numpy.sqrt', 'np.sqrt', (['model.covariances_'], {}), '(model.covariances_)\n', (3579, 3599), True, 'import numpy as np\n'), ((4405, 4433), 'numpy.concatenate', 'np.concatenate', (['(y, y_prior)'], {}), '((y, y_prior))\n', (4419, 4433), True, 'import numpy as np\n'), ((4724, 4739), 'numpy.shape', 'np.shape', (['y_tot'], {}), '(y_tot)\n', (4732, 4739), True, 'import numpy as np\n'), ((4791, 4968), 'npl.sk_gaussian_mixture.GaussianMixture', 'skgm.GaussianMixture', (['K_clusters'], {'covariance_type': '"""diag"""', 'means_init': 'mu_init[i]', 'weights_init': 'pi_init[i]', 'precisions_init': '(1 / sigma_init[i])', 'tol': 'tol', 'max_iter': 'max_iter'}), "(K_clusters, covariance_type='diag', means_init=mu_init\n [i], weights_init=pi_init[i], precisions_init=1 / sigma_init[i], tol=\n tol, max_iter=max_iter)\n", (4811, 4968), True, 'import npl.sk_gaussian_mixture as skgm\n'), ((5091, 5118), 'numpy.sqrt', 'np.sqrt', (['model.covariances_'], {}), '(model.covariances_)\n', (5098, 5118), True, 'import numpy as np\n'), ((1899, 1961), 'numpy.random.rand', 'np.random.rand', (['(R_restarts * B_postsamples)', 'K_clusters', 'D_data'], {}), '(R_restarts * B_postsamples, K_clusters, D_data)\n', (1913, 1961), True, 'import numpy as np\n'), ((3103, 3149), 'numpy.random.rand', 'np.random.rand', (['R_restarts', 'K_clusters', 'D_data'], {}), '(R_restarts, K_clusters, D_data)\n', (3117, 3149), True, 'import numpy as np\n'), ((3468, 3483), 'numpy.ones', 'np.ones', (['N_data'], {}), '(N_data)\n', (3475, 3483), True, 'import numpy as np\n'), ((2219, 2291), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""npl"""', '"""init_parameters/pi_init_VB.npy"""'], {}), "('npl', 'init_parameters/pi_init_VB.npy')\n", (2250, 2291), False, 'import pkg_resources\n'), ((2343, 2415), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""npl"""', '"""init_parameters/mu_init_VB.npy"""'], {}), "('npl', 'init_parameters/mu_init_VB.npy')\n", (2374, 2415), False, 'import pkg_resources\n'), ((2472, 2547), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""npl"""', '"""init_parameters/sigma_init_VB.npy"""'], {}), "('npl', 'init_parameters/sigma_init_VB.npy')\n", (2503, 2547), False, 'import pkg_resources\n'), ((3633, 3648), 'numpy.ones', 'np.ones', (['N_data'], {}), '(N_data)\n', (3640, 3648), True, 'import numpy as np\n')] |
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from summariser.rouge.rouge import Rouge
import summariser.utils.data_helpers as util
import numpy as np
import operator as op
import functools
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from resources import *
#from resources import BASE_DIR,ROUGE_DIR
class State:
def __init__(self, sum_token_length, base_length, sent_num, block_num, language):
# hyper parameters
self.reward_lambda = 0.9
# learning arguments
if sum_token_length != None:
self.sum_token_length = sum_token_length
else:
self.sum_token_length = 99999
self.state_length_computer = StateLengthComputer(block_num,base_length,sent_num)
self.vec_length = self.state_length_computer.getTotalLength()
self.summary_vector_length = self.state_length_computer.getStatesLength(block_num)
self.language = language
# stemmers and stop words list
self.stemmer = SnowballStemmer(self.language)
self.stoplist = set(stopwords.words(self.language))
# class variables
#self.draft_summary = ''
self.draft_summary_list = []
self.historical_actions = []
self.available_sents = [i for i in range(0,sent_num+1)]
self.terminal_state = 0 # 0 stands for non-terminal, and 1 stands for terminal
self.draft_summary_length = 0
#some flags/options
self.newReward = False
def getSelfVector(self, top_ngrams, sentences):
return self.getStateVector(self.draft_summary_list,self.historical_actions,
top_ngrams, sentences)
def getStateVector(self, draft_list, draft_index_list, top_ngrams, sentences):
'''
Represent the current draft summary using a vector
:param draft_list: a list of sentences, included in the current draft summary
:param draft_index_list: the indices of the included sentences
:param top_ngrams: top n-grams for all the original documents
:param sentences: all sentences information, used to find positions information
:param tfidf: decides to use the Japan version (state_type==True) or the REAPER version
:return: an numpy array, the vector representation of the state
'''
# for empty or over-length draft, return a full-zero vector
draft_length = 0
for sent in draft_list:
draft_length += len(sent.split(' '))
if len(draft_list) == 0 or draft_list == None: #or draft_length>self.sum_token_length:
return np.zeros(self.vec_length)
vector = [0] * self.vec_length
coverage_num = 0
redundant_count = 0
sent_num = len(draft_index_list)
index = -1 + self.state_length_computer.getIndexUntilSentNum(sent_num)
draft_ngrams = util.extract_ngrams_count(draft_list,self.stemmer,self.language,self.stoplist)
num = self.state_length_computer.getStatesLength(sent_num)-5
for i in range(num):
index += 1
if top_ngrams[i] in draft_ngrams:
vector[index] = 1
coverage_num += 1
if draft_ngrams[top_ngrams[i]] >= 2:
redundant_count += 1 # draft_ngrams[top_ngrams[i]]-1
#this is needed, because the above loop does not perform the last add
index += 1
#second part: coverage ratio
vector[index] = coverage_num*1.0/len(top_ngrams)
index += 1
#third part: redundant ratio;
vector[index] = redundant_count*1.0/len(top_ngrams)
index += 1
#fourth part: length ratio
vector[index] = draft_length*1.0/self.sum_token_length
index += 1
vector[index] = self.getPositionInfo(sentences,draft_index_list)
index += 1
#sixth part: length violation bit
if draft_length <= self.sum_token_length:
vector[index] = 1
if sent_num >= self.state_length_computer.block_num:
assert index == -1 + self.state_length_computer.getTotalLength()
else:
assert index == -1 + self.state_length_computer.getIndexUntilSentNum(sent_num+1)
return np.array(vector)
def getPositionInfo(self, sentences, draft_index_list):
position_index = 0
for idx in draft_index_list:
pos = sentences[idx].position
position_index += 1.0/pos
return position_index
def noCommonTokens(self, token_list1, token_list2, word_num_limit=float("inf")):
# we do not check long sentences
if len(token_list1) <= word_num_limit and len(token_list2) <= word_num_limit:
if set(token_list1).isdisjoint(token_list2):
return True
else:
return False
else:
return False
#the Japan version
def getSimilarity(self, tokens1, sentences2, fullDoc=False):
# tokens1 is a string of tokens
# sentences2 is a list of sentences, and each sentences is a list of tokens
token_list1 = tokens1.split(' ')
token_str1 = tokens1
if fullDoc:
token_str2 = ' '.join(sentences2)
token_list2 = token_str2.split(' ')
else:
token_list2 = sentences2.split(' ')
token_str2 = sentences2
tfidf_vectorizer = TfidfVectorizer(min_df=0)
#print('token_list 1: '+' '.join(token_list1))
#print('token_list 2: '+' '.join(token_list2))
if self.noCommonTokens(token_list1,token_list2):
return 0
if token_str2 == token_str1:
return 1
try:
tfidf_matrix_train = tfidf_vectorizer.fit_transform([token_str1, token_str2])
except ValueError:
return 0
return cosine_similarity(tfidf_matrix_train[0], tfidf_matrix_train[1])[0][0]
def getNewStateVec(self, new_sent_id, top_ngrams, sentences):
temp_draft_summary_list = self.draft_summary_list+[sentences[new_sent_id].untokenized_form]
draft_index_list = self.historical_actions+[new_sent_id]
return self.getStateVector(temp_draft_summary_list,draft_index_list,top_ngrams,sentences)
def removeOverlengthSents(self, sents, production):
new_avai_acts = [0]
for sent_id in self.available_sents:
if sent_id == 0:
continue
if not production and len(sents[sent_id-1].untokenized_form.split(' ')) > self.sum_token_length-self.draft_summary_length:
continue
elif production and len(sents[sent_id-1].untokenized_form.split(' ')) > int(1.2*self.sum_token_length)-self.draft_summary_length:
continue
else:
new_avai_acts.append(sent_id)
self.available_sents = new_avai_acts[:]
del new_avai_acts
def updateState(self, new_sent_id, sents, read=False, production=False):
self.draft_summary_list.append(sents[new_sent_id].untokenized_form)
self.historical_actions.append(new_sent_id)
self.draft_summary_length += len(sents[new_sent_id].untokenized_form.split(' '))
if not read:
self.available_sents.remove(new_sent_id+1)
self.removeOverlengthSents(sents,production)
if not production and self.draft_summary_length > self.sum_token_length:
self.available_sents = [0]
self.terminal_state = 1
print('overlength! should not happen')
return -1
return 0
def getOptimalTerminalRougeScores(self, model):
if len(self.draft_summary_list) == 0:
return 0.
rouge = Rouge(ROUGE_DIR, BASE_DIR, True, True)
R1, R2, R3, R4, RL, RSU = rouge(' '.join(self.draft_summary_list), [model], self.sum_token_length)
rouge.clean()
return [R1, R2, R3, R4, RL, RSU]
def getTerminalReward(self, sentences, sentences_stemmed_aggreate, sent2tokens, sim_scores):
# assert self.draft_summary_length <= self.sum_token_length
# print('summary: \n'+' ||| '.join(self.draft_summary_list))
relatedness_score = 0
redundant_score = 0
for i in range(len(self.historical_actions)):
sent_idx = self.historical_actions[i]
# compute relatedness scores
# the original version used in the japan version
# -1 stands for full docs
if (sent_idx, -1) in sim_scores:
relatedness_score += sim_scores[(sent_idx, -1)]
elif (-1, sent_idx) in sim_scores:
relatedness_score += sim_scores[(-1, sent_idx)]
else:
sim_score = self.getSimilarity(' '.join(sent2tokens(self.draft_summary_list[i])),
sentences_stemmed_aggreate, True) + 1.0 / sentences[sent_idx].position
relatedness_score += sim_score
sim_scores[(sent_idx, -1)] = sim_score
# compute redundancy scores
for j in range(i):
idx2 = self.historical_actions[j]
if (sent_idx, idx2) in sim_scores:
redundant_score += sim_scores[(sent_idx, idx2)]
elif (idx2, sent_idx) in sim_scores:
redundant_score += sim_scores[(idx2, sent_idx)]
else:
red_score = self.getSimilarity(' '.join(sent2tokens(self.draft_summary_list[j])),
' '.join(sent2tokens(self.draft_summary_list[i])))
redundant_score += red_score
sim_scores[(sent_idx, idx2)] = red_score
return relatedness_score*self.reward_lambda-(1-self.reward_lambda)*redundant_score
class StateLengthComputer():
def __init__(self, block_num, base_length, sent_num):
self.block_num = block_num
self.lengths = []
base_num = np.log10(self.ncr(sent_num,1))
for i in range(block_num):
self.lengths.append(int(base_length*np.log10(self.ncr(sent_num,i+1))*1.0/base_num)+5)
def getStatesLength(self, sent_num):
if sent_num < self.block_num:
return self.lengths[sent_num-1]
else:
return self.lengths[-1]
def getIndexUntilSentNum(self,n):
idx = 0
nn = min(n,self.block_num)
for i in range(0,nn-1):
idx += self.getStatesLength(i+1)
return idx
def getTotalLength(self):
return sum(self.lengths)
def ncr(self, n, r):
r = min(r, n - r)
if r == 0: return 1
numer = functools.reduce(op.mul, range(n, n - r, -1))
denom = functools.reduce(op.mul, range(1, r + 1))
return numer // denom
if __name__ == '__main__':
block_num = 6
base_num = 100
sent_num = 400
print('block num: {}; sentence num: {}; '
'the summary of length 1 will have {}-dimension states.'.format(block_num, sent_num, base_num))
com = StateLengthComputer(block_num, base_num, sent_num)
print('each state length:')
for i in range(1,9):
print(com.getStatesLength(i))
print('starting index:')
for i in range(1,9):
print(com.getIndexUntilSentNum(i))
print('total length:{}'.format(com.getTotalLength()))
| [
"nltk.corpus.stopwords.words",
"sklearn.metrics.pairwise.cosine_similarity",
"nltk.stem.snowball.SnowballStemmer",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.zeros",
"summariser.utils.data_helpers.extract_ngrams_count",
"summariser.rouge.rouge.Rouge"
] | [((1092, 1122), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['self.language'], {}), '(self.language)\n', (1107, 1122), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((2965, 3051), 'summariser.utils.data_helpers.extract_ngrams_count', 'util.extract_ngrams_count', (['draft_list', 'self.stemmer', 'self.language', 'self.stoplist'], {}), '(draft_list, self.stemmer, self.language, self.\n stoplist)\n', (2990, 3051), True, 'import summariser.utils.data_helpers as util\n'), ((4332, 4348), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (4340, 4348), True, 'import numpy as np\n'), ((5493, 5518), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(0)'}), '(min_df=0)\n', (5508, 5518), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((7822, 7860), 'summariser.rouge.rouge.Rouge', 'Rouge', (['ROUGE_DIR', 'BASE_DIR', '(True)', '(True)'], {}), '(ROUGE_DIR, BASE_DIR, True, True)\n', (7827, 7860), False, 'from summariser.rouge.rouge import Rouge\n'), ((1151, 1181), 'nltk.corpus.stopwords.words', 'stopwords.words', (['self.language'], {}), '(self.language)\n', (1166, 1181), False, 'from nltk.corpus import stopwords\n'), ((2701, 2726), 'numpy.zeros', 'np.zeros', (['self.vec_length'], {}), '(self.vec_length)\n', (2709, 2726), True, 'import numpy as np\n'), ((5931, 5994), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tfidf_matrix_train[0]', 'tfidf_matrix_train[1]'], {}), '(tfidf_matrix_train[0], tfidf_matrix_train[1])\n', (5948, 5994), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')] |
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.kernel_ridge import KernelRidge as SklKernelRidge
import sys
sys.path.append('../')
from bareml.machinelearning.supervised import KernelRidge
from bareml.machinelearning.utils.model_selection import train_test_split
def test_linear_kernel():
data = load_boston()
X = data.data
y = data.target
reg_skl = SklKernelRidge(alpha=1.0)
reg_bareml = KernelRidge(alpha=1.0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
reg_skl.fit(X_train,y_train)
reg_bareml.fit(X_train,y_train)
# round in 4 decimals
preds_skl = np.round(reg_skl.predict(X_test),4).tolist()
preds_bareml = np.round(reg_bareml.predict(X_test),4).tolist()
# should be the same result
assert np.allclose(preds_bareml, preds_skl)
def test_rbf_kernel():
data = load_boston()
X = data.data
y = data.target
reg_skl = SklKernelRidge(alpha=1.0,kernel='rbf')
reg_bareml = KernelRidge(alpha=1.0,kernel='rbf')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
reg_skl.fit(X_train,y_train)
reg_bareml.fit(X_train,y_train)
# round in 4 decimals
preds_skl = np.round(reg_skl.predict(X_test),4).tolist()
preds_bareml = np.round(reg_bareml.predict(X_test),4).tolist()
# should be the same result
assert np.allclose(preds_bareml, preds_skl)
def test_sigmoid_kernel():
data = load_boston()
X = data.data
y = data.target
reg_skl = SklKernelRidge(alpha=1.0,kernel='sigmoid')
reg_bareml = KernelRidge(alpha=1.0,kernel='sigmoid')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
reg_skl.fit(X_train,y_train)
reg_bareml.fit(X_train,y_train)
# round in 4 decimals
preds_skl = np.round(reg_skl.predict(X_test),4).tolist()
preds_bareml = np.round(reg_bareml.predict(X_test),4).tolist()
# should be the same result
assert np.allclose(preds_bareml, preds_skl)
def test_polynomial_kernel():
data = load_boston()
X = data.data
y = data.target
reg_skl = SklKernelRidge(alpha=2.0,kernel='polynomial',degree=2)
reg_bareml = KernelRidge(alpha=2.0,kernel='polynomial',degree=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
reg_skl.fit(X_train,y_train)
reg_bareml.fit(X_train,y_train)
# with polynomial kernel, sklearn's result is not quite stable
# so check with only 1 decimal
preds_skl = np.round(reg_skl.predict(X_test),1).tolist()
preds_bareml = np.round(reg_bareml.predict(X_test),1).tolist()
# should be the same result
assert np.allclose(preds_bareml, preds_skl) | [
"bareml.machinelearning.supervised.KernelRidge",
"numpy.allclose",
"sklearn.datasets.load_boston",
"bareml.machinelearning.utils.model_selection.train_test_split",
"sklearn.kernel_ridge.KernelRidge",
"sys.path.append"
] | [((155, 177), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (170, 177), False, 'import sys\n'), ((350, 363), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (361, 363), False, 'from sklearn.datasets import load_boston\n'), ((417, 442), 'sklearn.kernel_ridge.KernelRidge', 'SklKernelRidge', ([], {'alpha': '(1.0)'}), '(alpha=1.0)\n', (431, 442), True, 'from sklearn.kernel_ridge import KernelRidge as SklKernelRidge\n'), ((460, 482), 'bareml.machinelearning.supervised.KernelRidge', 'KernelRidge', ([], {'alpha': '(1.0)'}), '(alpha=1.0)\n', (471, 482), False, 'from bareml.machinelearning.supervised import KernelRidge\n'), ((523, 576), 'bareml.machinelearning.utils.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (539, 576), False, 'from bareml.machinelearning.utils.model_selection import train_test_split\n'), ((847, 883), 'numpy.allclose', 'np.allclose', (['preds_bareml', 'preds_skl'], {}), '(preds_bareml, preds_skl)\n', (858, 883), True, 'import numpy as np\n'), ((920, 933), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (931, 933), False, 'from sklearn.datasets import load_boston\n'), ((987, 1026), 'sklearn.kernel_ridge.KernelRidge', 'SklKernelRidge', ([], {'alpha': '(1.0)', 'kernel': '"""rbf"""'}), "(alpha=1.0, kernel='rbf')\n", (1001, 1026), True, 'from sklearn.kernel_ridge import KernelRidge as SklKernelRidge\n'), ((1043, 1079), 'bareml.machinelearning.supervised.KernelRidge', 'KernelRidge', ([], {'alpha': '(1.0)', 'kernel': '"""rbf"""'}), "(alpha=1.0, kernel='rbf')\n", (1054, 1079), False, 'from bareml.machinelearning.supervised import KernelRidge\n'), ((1119, 1172), 'bareml.machinelearning.utils.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (1135, 1172), False, 'from bareml.machinelearning.utils.model_selection import train_test_split\n'), ((1443, 1479), 'numpy.allclose', 'np.allclose', (['preds_bareml', 'preds_skl'], {}), '(preds_bareml, preds_skl)\n', (1454, 1479), True, 'import numpy as np\n'), ((1520, 1533), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (1531, 1533), False, 'from sklearn.datasets import load_boston\n'), ((1587, 1630), 'sklearn.kernel_ridge.KernelRidge', 'SklKernelRidge', ([], {'alpha': '(1.0)', 'kernel': '"""sigmoid"""'}), "(alpha=1.0, kernel='sigmoid')\n", (1601, 1630), True, 'from sklearn.kernel_ridge import KernelRidge as SklKernelRidge\n'), ((1647, 1687), 'bareml.machinelearning.supervised.KernelRidge', 'KernelRidge', ([], {'alpha': '(1.0)', 'kernel': '"""sigmoid"""'}), "(alpha=1.0, kernel='sigmoid')\n", (1658, 1687), False, 'from bareml.machinelearning.supervised import KernelRidge\n'), ((1727, 1780), 'bareml.machinelearning.utils.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (1743, 1780), False, 'from bareml.machinelearning.utils.model_selection import train_test_split\n'), ((2051, 2087), 'numpy.allclose', 'np.allclose', (['preds_bareml', 'preds_skl'], {}), '(preds_bareml, preds_skl)\n', (2062, 2087), True, 'import numpy as np\n'), ((2131, 2144), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (2142, 2144), False, 'from sklearn.datasets import load_boston\n'), ((2198, 2254), 'sklearn.kernel_ridge.KernelRidge', 'SklKernelRidge', ([], {'alpha': '(2.0)', 'kernel': '"""polynomial"""', 'degree': '(2)'}), "(alpha=2.0, kernel='polynomial', degree=2)\n", (2212, 2254), True, 'from sklearn.kernel_ridge import KernelRidge as SklKernelRidge\n'), ((2270, 2323), 'bareml.machinelearning.supervised.KernelRidge', 'KernelRidge', ([], {'alpha': '(2.0)', 'kernel': '"""polynomial"""', 'degree': '(2)'}), "(alpha=2.0, kernel='polynomial', degree=2)\n", (2281, 2323), False, 'from bareml.machinelearning.supervised import KernelRidge\n'), ((2362, 2415), 'bareml.machinelearning.utils.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (2378, 2415), False, 'from bareml.machinelearning.utils.model_selection import train_test_split\n'), ((2762, 2798), 'numpy.allclose', 'np.allclose', (['preds_bareml', 'preds_skl'], {}), '(preds_bareml, preds_skl)\n', (2773, 2798), True, 'import numpy as np\n')] |
import os
from PIL import Image
import cv2
import numpy as np
import torch
from model.stcgan import *
from model.stcutils import *
class AutoColor:
def __init__(self, enableCUDA = True):
self.cuda_enabled = torch.cuda.is_available() and enableCUDA
self.gan_draft = DraftGAN(enableCUDA = self.cuda_enabled)
self.gan_refine = RefineGAN(enableCUDA = self.cuda_enabled)
self.tensor_draft = None
self.tensor_sketch = None
self.tensor_hint = None
self.tensor_refined = None
self.count = 0
def loadModels(self, path_draft, path_refine):
self.gan_draft.loadModels(path_draft)
self.gan_refine.loadModels(path_refine)
def loadImages(self, img_sketch, img_hint, shrinkImages = False, edgeDetect = False):
if type(img_sketch) == str:
with Image.open(img_sketch) as img:
self.numpy_sketch = np.array(img)[:,:,:3]
else:
self.numpy_sketch = img_sketch[:,:,:3]
if type(img_hint) == str:
with Image.open(img_hint) as img:
self.numpy_hint = np.array(img)[:,:,:3] #Make sure it's 3 channels
else:
self.numpy_hint = img_hint[:,:,:3]
self.numpy_sketch = cv2.cvtColor(self.numpy_sketch, cv2.COLOR_RGB2GRAY)
if shrinkImages:
self.numpy_sketch = cv2.resize(self.numpy_sketch, dsize=(256, 256), interpolation=cv2.INTER_CUBIC)
self.numpy_hint = cv2.resize(self.numpy_hint, dsize=(256, 256), interpolation=cv2.INTER_CUBIC)
if edgeDetect:
self.numpy_sketch = GrayToSketch(self.numpy_sketch)
self.numpy_hint = cv2.GaussianBlur(self.numpy_hint, (115, 115), 0)
self.tensor_draft = None
self.tensor_sketch = None
self.tensor_hint = None
self.tensor_refined = None
def getDraft(self):
if self.tensor_draft is None:
self.tensor_sketch = torch.from_numpy(self.numpy_sketch).unsqueeze(0).unsqueeze(0).float() / 255
self.tensor_hint = torch.from_numpy(self.numpy_hint).permute(2, 0, 1).unsqueeze(0).float() / 255
if self.cuda_enabled:
self.tensor_sketch = self.tensor_sketch.cuda()
self.tensor_hint = self.tensor_hint.cuda()
self.gan_draft.G = self.gan_draft.G.cuda()
self.gan_draft.G.eval()
with torch.no_grad():
self.tensor_draft = self.gan_draft.G(torch.cat([self.tensor_sketch, self.tensor_hint], dim=1))
return self.tensor_draft
def getRefined(self):
if self.tensor_refined is None:
if self.tensor_draft is not None:
if self.cuda_enabled:
self.gan_refine.G = self.gan_refine.G.cuda()
self.gan_refine.G.eval()
with torch.no_grad():
self.tensor_refined = self.gan_refine.G(torch.cat([self.tensor_sketch, self.tensor_hint, self.tensor_draft], dim=1))
else:
self.getDraft()
self.getRefined()
return self.tensor_refined
def plotImages(self, saveToFile = False):
fig = plt.figure(figsize=(50,50))
ax = plt.subplot(1,4,1)
ax.set_title('Sketch', fontsize=50)
DisplayTorch(self.tensor_sketch)
ax = plt.subplot(1,4,2)
ax.set_title('Color Hint', fontsize=50)
DisplayTorch(self.tensor_hint)
ax = plt.subplot(1,4,3)
ax.set_title('Draft', fontsize=50)
DisplayTorch(self.tensor_draft)
ax = plt.subplot(1,4,4)
ax.set_title('Refined', fontsize=50)
DisplayTorch(self.tensor_refined)
if saveToFile:
fig.savefig('Plot{}.png'.format(self.count))
plt.close()
self.count += 1 | [
"PIL.Image.open",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"cv2.cvtColor",
"torch.no_grad",
"cv2.resize",
"cv2.GaussianBlur",
"torch.cat"
] | [((1265, 1316), 'cv2.cvtColor', 'cv2.cvtColor', (['self.numpy_sketch', 'cv2.COLOR_RGB2GRAY'], {}), '(self.numpy_sketch, cv2.COLOR_RGB2GRAY)\n', (1277, 1316), False, 'import cv2\n'), ((1681, 1729), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.numpy_hint', '(115, 115)', '(0)'], {}), '(self.numpy_hint, (115, 115), 0)\n', (1697, 1729), False, 'import cv2\n'), ((220, 245), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (243, 245), False, 'import torch\n'), ((1379, 1457), 'cv2.resize', 'cv2.resize', (['self.numpy_sketch'], {'dsize': '(256, 256)', 'interpolation': 'cv2.INTER_CUBIC'}), '(self.numpy_sketch, dsize=(256, 256), interpolation=cv2.INTER_CUBIC)\n', (1389, 1457), False, 'import cv2\n'), ((1491, 1567), 'cv2.resize', 'cv2.resize', (['self.numpy_hint'], {'dsize': '(256, 256)', 'interpolation': 'cv2.INTER_CUBIC'}), '(self.numpy_hint, dsize=(256, 256), interpolation=cv2.INTER_CUBIC)\n', (1501, 1567), False, 'import cv2\n'), ((859, 881), 'PIL.Image.open', 'Image.open', (['img_sketch'], {}), '(img_sketch)\n', (869, 881), False, 'from PIL import Image\n'), ((1064, 1084), 'PIL.Image.open', 'Image.open', (['img_hint'], {}), '(img_hint)\n', (1074, 1084), False, 'from PIL import Image\n'), ((2498, 2513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2511, 2513), False, 'import torch\n'), ((926, 939), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (934, 939), True, 'import numpy as np\n'), ((1127, 1140), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1135, 1140), True, 'import numpy as np\n'), ((2569, 2625), 'torch.cat', 'torch.cat', (['[self.tensor_sketch, self.tensor_hint]'], {'dim': '(1)'}), '([self.tensor_sketch, self.tensor_hint], dim=1)\n', (2578, 2625), False, 'import torch\n'), ((2976, 2991), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2989, 2991), False, 'import torch\n'), ((3054, 3129), 'torch.cat', 'torch.cat', (['[self.tensor_sketch, self.tensor_hint, self.tensor_draft]'], {'dim': '(1)'}), '([self.tensor_sketch, self.tensor_hint, self.tensor_draft], dim=1)\n', (3063, 3129), False, 'import torch\n'), ((2013, 2048), 'torch.from_numpy', 'torch.from_numpy', (['self.numpy_sketch'], {}), '(self.numpy_sketch)\n', (2029, 2048), False, 'import torch\n'), ((2123, 2156), 'torch.from_numpy', 'torch.from_numpy', (['self.numpy_hint'], {}), '(self.numpy_hint)\n', (2139, 2156), False, 'import torch\n')] |
"""
This module provide the defence method for THERMOMETER ENCODING's implement.
THERMOMETER ENCODING: ONE HOT WAY TO RESIST ADVERSARIAL EXAMPLES
"""
from builtins import range
import logging
logger=logging.getLogger(__name__)
import numpy as np
from keras.utils import to_categorical
__all__ = [
'ThermometerEncodingDefence'
]
def _perchannel(x,num_space):
pos = np.zeros(shape=x.shape)
for i in range(1, num_space):
pos[x > float(i) / num_space] += 1
onehot_rep = to_categorical(pos.reshape(-1), num_space)
for i in reversed(list(range(1, num_space))):
onehot_rep[:, i] += np.sum(onehot_rep[:, :i], axis=1)
result = onehot_rep.reshape(list(x.shape) + [num_space])
return result
#num_space=10为 一般为10
#clip_values为最终处理后取值范围 可能包含负数 常见的为[0,1] [-1,1]
# 支持的格式为[28,28,1]
def ThermometerEncodingDefence(x, y=None, num_space=10, clip_values=(0.0, 1.0)):
result = []
#for c in range(x.shape[-1]):
# result.append(_perchannel(x[:, :, :, c],num_space))
for c in range(x.shape[1]):
result.append(_perchannel(x[:, c, :, :],num_space))
result = np.concatenate(result, axis=3)
result = np.clip(result, clip_values[0], clip_values[1])
return result
| [
"logging.getLogger",
"numpy.clip",
"numpy.sum",
"numpy.zeros",
"builtins.range",
"numpy.concatenate"
] | [((202, 229), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (219, 229), False, 'import logging\n'), ((382, 405), 'numpy.zeros', 'np.zeros', ([], {'shape': 'x.shape'}), '(shape=x.shape)\n', (390, 405), True, 'import numpy as np\n'), ((419, 438), 'builtins.range', 'range', (['(1)', 'num_space'], {}), '(1, num_space)\n', (424, 438), False, 'from builtins import range\n'), ((1050, 1067), 'builtins.range', 'range', (['x.shape[1]'], {}), '(x.shape[1])\n', (1055, 1067), False, 'from builtins import range\n'), ((1143, 1173), 'numpy.concatenate', 'np.concatenate', (['result'], {'axis': '(3)'}), '(result, axis=3)\n', (1157, 1173), True, 'import numpy as np\n'), ((1187, 1234), 'numpy.clip', 'np.clip', (['result', 'clip_values[0]', 'clip_values[1]'], {}), '(result, clip_values[0], clip_values[1])\n', (1194, 1234), True, 'import numpy as np\n'), ((623, 656), 'numpy.sum', 'np.sum', (['onehot_rep[:, :i]'], {'axis': '(1)'}), '(onehot_rep[:, :i], axis=1)\n', (629, 656), True, 'import numpy as np\n'), ((572, 591), 'builtins.range', 'range', (['(1)', 'num_space'], {}), '(1, num_space)\n', (577, 591), False, 'from builtins import range\n')] |
#To detect Moire ́ patternzs, images are first decomposed using Wavelet decomposition (refer to file '') and trained using multi-input Convolutional neural network. The strength of the proposed CNN model is, it uses the LL intensity image (from the Wavelet decomposition) as a weight parameter for the Moire ́ pattern, thereby approximating the spatial spread of the Moire ́ pattern in the image. Usage of CNN model performs better than frequency thresholding approach as the model is trained considering diverse scenarios and it is able to distinguish between the high frequency of background texture and the Moire ́ pattern.
from matplotlib import pyplot as plt
import numpy as np
import sys
import argparse
from os import listdir
from os.path import isfile, join
from PIL import Image
from sklearn import preprocessing
from skimage import io
from sklearn.model_selection import train_test_split
import os
from mCNN import createModel
from keras.utils import np_utils # utilities for one-hot encoding of ground truth values
from keras.callbacks import ModelCheckpoint
#constants
WIDTH = 500#384
HEIGHT = 375#512
def scaleData(inp, minimum, maximum):
minMaxScaler = preprocessing.MinMaxScaler(copy=True, feature_range=(minimum,maximum))
inp = inp.reshape(-1, 1)
inp = minMaxScaler.fit_transform(inp)
return inp
# - read positive and negative training data
# - create X and Y from training data
def main(args):
positiveImagePath = (args.positiveImages)
negativeImagePath = (args.negativeImages)
numEpochs = (args.epochs)
positiveTrainImagePath = args.trainingDataPositive
negativeTrainImagePath = args.trainingDataNegative
X_LL, X_LH, X_HL, X_HH, X_index, Y, imageCount = readWaveletData(positiveImagePath, negativeImagePath, positiveTrainImagePath, negativeTrainImagePath)
X_LL_train,X_LH_train,X_HL_train,X_HH_train,Y_train,X_LL_test,X_LH_test,X_HL_test,X_HH_test,Y_test = trainTestSplit(X_LL, X_LH, X_HL, X_HH, X_index, Y, imageCount)
model = trainCNNModel(X_LL_train,X_LH_train,X_HL_train,X_HH_train,Y_train,
X_LL_test,X_LH_test,X_HL_test,X_HH_test,Y_test, numEpochs)
evaluate(model, X_LL_test,X_LH_test,X_HL_test,X_HH_test,Y_test)
def readAndScaleImage(f, customStr, trainImagePath, X_LL, X_LH, X_HL, X_HH, X_index, Y, sampleIndex, sampleVal):
fileName = (os.path.splitext(f)[0])
fLL = (f.replace(fileName, fileName + customStr + '_LL')).replace('.jpg','.tiff')
fLH = (f.replace(fileName, fileName + customStr + '_LH')).replace('.jpg','.tiff')
fHL = (f.replace(fileName, fileName + customStr + '_HL')).replace('.jpg','.tiff')
fHH = (f.replace(fileName, fileName + customStr + '_HH')).replace('.jpg','.tiff')
try:
imgLL = Image.open(join(trainImagePath, fLL))
imgLH = Image.open(join(trainImagePath, fLH))
imgHL = Image.open(join(trainImagePath, fHL))
imgHH = Image.open(join(trainImagePath, fHH))
except Exception as e:
print('Error: Couldnt read the file {}. Make sure only images are present in the folder'.format(fileName))
print('Exception:', e)
return None
imgLL = np.array(imgLL)
imgLH = np.array(imgLH)
imgHL = np.array(imgHL)
imgHH = np.array(imgHH)
imgLL = scaleData(imgLL, 0, 1)
imgLH = scaleData(imgLH, -1, 1)
imgHL = scaleData(imgHL, -1, 1)
imgHH = scaleData(imgHH, -1, 1)
imgVector = imgLL.reshape(1, WIDTH*HEIGHT)
X_LL[sampleIndex, :] = imgVector
imgVector = imgLH.reshape(1, WIDTH*HEIGHT)
X_LH[sampleIndex, :] = imgVector
imgVector = imgHL.reshape(1, WIDTH*HEIGHT)
X_HL[sampleIndex, :] = imgVector
imgVector = imgHH.reshape(1, WIDTH*HEIGHT)
X_HH[sampleIndex, :] = imgVector
Y[sampleIndex, 0] = sampleVal;
X_index[sampleIndex, 0] = sampleIndex;
return True
def readImageSet(imageFiles, trainImagePath, X_LL, X_LH, X_HL, X_HH, X_index, Y, sampleIndex, bClass):
for f in imageFiles:
ret = readAndScaleImage(f, '', trainImagePath, X_LL, X_LH, X_HL, X_HH, X_index, Y, sampleIndex, bClass)
if ret == True:
sampleIndex = sampleIndex + 1
#read 180deg rotated data
ret = readAndScaleImage(f, '_180', trainImagePath, X_LL, X_LH, X_HL, X_HH, X_index, Y, sampleIndex,bClass)
if ret == True:
sampleIndex = sampleIndex + 1
#read 180deg FLIP data
ret = readAndScaleImage(f, '_180_FLIP', trainImagePath, X_LL, X_LH, X_HL, X_HH, X_index, Y, sampleIndex, bClass)
if ret == True:
sampleIndex = sampleIndex + 1
return sampleIndex
def readWaveletData(positiveImagePath, negativeImagePath, positiveTrainImagePath, negativeTrainImagePath):
# get augmented, balanced training data image files by class
positiveImageFiles = [f for f in listdir(positiveImagePath) if (isfile(join(positiveImagePath, f)))]
negativeImageFiles = [f for f in listdir(negativeImagePath) if (isfile(join(negativeImagePath, f)))]
positiveCount = len(positiveImageFiles)*4
negativeCount = len(negativeImageFiles)*4
print('positive samples: ' + str(positiveCount))
print('negative samples: ' + str(negativeCount))
imageCount = positiveCount + negativeCount
#intialization
X_LL = np.zeros((positiveCount + negativeCount, WIDTH*HEIGHT))
X_LH = np.zeros((positiveCount + negativeCount, WIDTH*HEIGHT))
X_HL = np.zeros((positiveCount + negativeCount, WIDTH*HEIGHT))
X_HH = np.zeros((positiveCount + negativeCount, WIDTH*HEIGHT))
X_index = np.zeros((positiveCount + negativeCount, 1))
Y = np.zeros((positiveCount + negativeCount, 1))
sampleIndex = 0
# read all images, convert to float, divide by 255 (leads to gray range 0..1), reshape into a row vector
# write class 1 for positive and 0 for negative samples
sampleIndex = readImageSet(positiveImageFiles, positiveTrainImagePath, X_LL, X_LH, X_HL, X_HH, X_index, Y, sampleIndex, 0)
print('positive data loaded.')
sampleIndex += readImageSet(negativeImageFiles, negativeTrainImagePath, X_LL, X_LH, X_HL, X_HH, X_index, Y, sampleIndex, 1)
print('negative data loaded.')
print('Total Samples Loaded: ', sampleIndex)
print(X_LL)
print(X_LH)
print(Y)
return X_LL, X_LH, X_HL, X_HH, X_index, Y, imageCount
#Here, we perform index based splitting and use those indices to split the our multi-input datasets. This is done because the CNN model is multi-input network
def splitTrainTestDataForBands(inputData, X_train_ind, X_test_ind):
X_train = np.zeros((len(X_train_ind), WIDTH*HEIGHT))
for i in range(len(X_train_ind)):
X_train[i,:] = inputData[int(X_train_ind[i,0]),:]
X_test = np.zeros((len(X_test_ind), WIDTH*HEIGHT))
for i in range(len(X_test_ind)):
X_test[i,:] = inputData[int(X_test_ind[i,0]),:]
return X_train, X_test
def countPositiveSamplesAfterSplit(trainData):
count = 0;
for i in range(len(trainData)):
if(trainData[i,0] == 0):
count = count + 1
return count
def trainTestSplit(X_LL, X_LH, X_HL, X_HH, X_index, Y, imageCount):
testCountPercent = 0.1
# evaluate the model by splitting into train and test sets
X_train_ind, X_test_ind, y_train, y_test = train_test_split(X_index, Y, test_size=testCountPercent, random_state=1, stratify=Y)
X_LL_train, X_LL_test = splitTrainTestDataForBands(X_LL, X_train_ind, X_test_ind)
X_LH_train, X_LH_test = splitTrainTestDataForBands(X_LH, X_train_ind, X_test_ind)
X_HL_train, X_HL_test = splitTrainTestDataForBands(X_HL, X_train_ind, X_test_ind)
X_HH_train, X_HH_test = splitTrainTestDataForBands(X_HH, X_train_ind, X_test_ind)
imageHeight = HEIGHT
imageWidth = WIDTH
print(countPositiveSamplesAfterSplit(y_train))
print(len(X_LL_train))
print(len(y_train))
print(len(X_LL_test))
print(len(y_test))
num_train_samples = len(y_train)
print('num_train_samples', num_train_samples)
X_LL_train = np.array(X_LL_train)
X_LL_train = X_LL_train.reshape((num_train_samples, imageHeight, imageWidth, 1))
X_LL_test = np.array(X_LL_test)
X_LL_test = X_LL_test.reshape((imageCount - num_train_samples, imageHeight, imageWidth, 1))
X_LH_train = np.array(X_LH_train)
X_LH_train = X_LH_train.reshape((num_train_samples, imageHeight, imageWidth, 1))
X_LH_test = np.array(X_LH_test)
X_LH_test = X_LH_test.reshape((imageCount - num_train_samples, imageHeight, imageWidth, 1))
X_HL_train = np.array(X_HL_train)
X_HL_train = X_HL_train.reshape((num_train_samples, imageHeight, imageWidth, 1))
X_HL_test = np.array(X_HL_test)
X_HL_test = X_HL_test.reshape((imageCount - num_train_samples, imageHeight, imageWidth, 1))
X_HH_train = np.array(X_HH_train)
X_HH_train = X_HH_train.reshape((num_train_samples, imageHeight, imageWidth, 1))
X_HH_test = np.array(X_HH_test)
X_HH_test = X_HH_test.reshape((imageCount - num_train_samples, imageHeight, imageWidth, 1))
y_train = np.array(y_train)
y_test = np.array(y_test)
num_train, height, width, depth = X_LL_train.shape
num_test = X_LL_test.shape[0]
num_classes = len(np.unique(y_train))
return X_LL_train,X_LH_train,X_HL_train,X_HH_train,y_train,X_LL_test,X_LH_test,X_HL_test,X_HH_test,y_test
def trainCNNModel(X_LL_train,X_LH_train,X_HL_train,X_HH_train,y_train,
X_LL_test,X_LH_test,X_HL_test,X_HH_test,y_test, num_epochs):
batch_size = 32 # in each iteration, we consider 32 training examples at once
num_train, height, width, depth = X_LL_train.shape
num_classes = len(np.unique(y_train))
Y_train = np_utils.to_categorical(y_train, num_classes) # One-hot encode the labels
Y_test = np_utils.to_categorical(y_test, num_classes) # One-hot encode the labels
checkPointFolder = 'checkPoint'
checkpoint_name = checkPointFolder + '/Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose = 1, save_best_only = True, mode ='auto')
callbacks_list = [checkpoint]
if not os.path.exists(checkPointFolder):
os.makedirs(checkPointFolder)
model = createModel(height, width, depth, num_classes)
model.compile(loss='categorical_crossentropy', # using the cross-entropy loss function
optimizer='adam', # using the Adam optimiser
metrics=['accuracy']) # reporting the accuracy
model.fit([X_LL_train,X_LH_train,X_HL_train,X_HH_train], Y_train, # Train the model using the training set...
batch_size=batch_size, epochs=num_epochs,
verbose=1, validation_split=0.1, callbacks=callbacks_list) # ...holding out 10% of the data for validation
score, acc = model.evaluate([X_LL_test,X_LH_test,X_HL_test,X_HH_test], Y_test, verbose=1) # Evaluate the trained model on the test set!
model.save('moirePattern3CNN_.h5')
return model
def evaluate(model, X_LL_test,X_LH_test,X_HL_test,X_HH_test,y_test):
model_out = model.predict([X_LL_test,X_LH_test,X_HL_test,X_HH_test])
passCnt = 0
TP = 0
TN = 0
FP = 0
FN = 0
for i in range(len(y_test)):
if np.argmax(model_out[i, :]) == y_test[i]:
str_label='Pass'
passCnt = passCnt + 1
else:
str_label='Fail'
if y_test[i] ==0:
if np.argmax(model_out[i, :]) == y_test[i]:
TP = TP + 1;
else:
FN = FN + 1
else:
if np.argmax(model_out[i, :]) == y_test[i]:
TN = TN + 1;
else:
FP = FP + 1
start = "\033[1m"
end = "\033[0;0m"
print(start + 'confusion matrix (test / validation)' + end)
print(start + 'true positive: '+ end + str(TP))
print(start + 'false positive: '+ end + str(FP))
print(start + 'true negative: '+ end + str(TN))
print(start + 'false negative: '+ end + str(FN))
print('\n')
print(start + 'accuracy: ' + end + "{:.4f} %".format(100*(TP+TN)/(TP+FP+FN+TN)))
print(start + 'precision: ' + end + "{:.4f} %".format(100*TP/(TP + FP)))
print(start + 'recall: ' + end + "{:.4f} %".format(100*TP/(TP + FN)))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('positiveImages', type=str, help='Directory with original positive (Moiré pattern) images.')
parser.add_argument('negativeImages', type=str, help='Directory with original negative (Normal) images.')
parser.add_argument('trainingDataPositive', type=str, help='Directory with transformed positive (Moiré pattern) images.')
parser.add_argument('trainingDataNegative', type=str, help='Directory with transformed negative (Normal) images.')
parser.add_argument('epochs', type=int, help='Number of epochs for training')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| [
"mCNN.createModel",
"os.path.exists",
"os.listdir",
"numpy.unique",
"keras.callbacks.ModelCheckpoint",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"os.makedirs",
"os.path.splitext",
"os.path.join",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"keras.utils.np_uti... | [((1174, 1245), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'copy': '(True)', 'feature_range': '(minimum, maximum)'}), '(copy=True, feature_range=(minimum, maximum))\n', (1200, 1245), False, 'from sklearn import preprocessing\n'), ((3189, 3204), 'numpy.array', 'np.array', (['imgLL'], {}), '(imgLL)\n', (3197, 3204), True, 'import numpy as np\n'), ((3217, 3232), 'numpy.array', 'np.array', (['imgLH'], {}), '(imgLH)\n', (3225, 3232), True, 'import numpy as np\n'), ((3245, 3260), 'numpy.array', 'np.array', (['imgHL'], {}), '(imgHL)\n', (3253, 3260), True, 'import numpy as np\n'), ((3273, 3288), 'numpy.array', 'np.array', (['imgHH'], {}), '(imgHH)\n', (3281, 3288), True, 'import numpy as np\n'), ((5348, 5405), 'numpy.zeros', 'np.zeros', (['(positiveCount + negativeCount, WIDTH * HEIGHT)'], {}), '((positiveCount + negativeCount, WIDTH * HEIGHT))\n', (5356, 5405), True, 'import numpy as np\n'), ((5415, 5472), 'numpy.zeros', 'np.zeros', (['(positiveCount + negativeCount, WIDTH * HEIGHT)'], {}), '((positiveCount + negativeCount, WIDTH * HEIGHT))\n', (5423, 5472), True, 'import numpy as np\n'), ((5482, 5539), 'numpy.zeros', 'np.zeros', (['(positiveCount + negativeCount, WIDTH * HEIGHT)'], {}), '((positiveCount + negativeCount, WIDTH * HEIGHT))\n', (5490, 5539), True, 'import numpy as np\n'), ((5549, 5606), 'numpy.zeros', 'np.zeros', (['(positiveCount + negativeCount, WIDTH * HEIGHT)'], {}), '((positiveCount + negativeCount, WIDTH * HEIGHT))\n', (5557, 5606), True, 'import numpy as np\n'), ((5619, 5663), 'numpy.zeros', 'np.zeros', (['(positiveCount + negativeCount, 1)'], {}), '((positiveCount + negativeCount, 1))\n', (5627, 5663), True, 'import numpy as np\n'), ((5672, 5716), 'numpy.zeros', 'np.zeros', (['(positiveCount + negativeCount, 1)'], {}), '((positiveCount + negativeCount, 1))\n', (5680, 5716), True, 'import numpy as np\n'), ((7369, 7457), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_index', 'Y'], {'test_size': 'testCountPercent', 'random_state': '(1)', 'stratify': 'Y'}), '(X_index, Y, test_size=testCountPercent, random_state=1,\n stratify=Y)\n', (7385, 7457), False, 'from sklearn.model_selection import train_test_split\n'), ((8106, 8126), 'numpy.array', 'np.array', (['X_LL_train'], {}), '(X_LL_train)\n', (8114, 8126), True, 'import numpy as np\n'), ((8228, 8247), 'numpy.array', 'np.array', (['X_LL_test'], {}), '(X_LL_test)\n', (8236, 8247), True, 'import numpy as np\n'), ((8362, 8382), 'numpy.array', 'np.array', (['X_LH_train'], {}), '(X_LH_train)\n', (8370, 8382), True, 'import numpy as np\n'), ((8484, 8503), 'numpy.array', 'np.array', (['X_LH_test'], {}), '(X_LH_test)\n', (8492, 8503), True, 'import numpy as np\n'), ((8618, 8638), 'numpy.array', 'np.array', (['X_HL_train'], {}), '(X_HL_train)\n', (8626, 8638), True, 'import numpy as np\n'), ((8740, 8759), 'numpy.array', 'np.array', (['X_HL_test'], {}), '(X_HL_test)\n', (8748, 8759), True, 'import numpy as np\n'), ((8878, 8898), 'numpy.array', 'np.array', (['X_HH_train'], {}), '(X_HH_train)\n', (8886, 8898), True, 'import numpy as np\n'), ((9000, 9019), 'numpy.array', 'np.array', (['X_HH_test'], {}), '(X_HH_test)\n', (9008, 9019), True, 'import numpy as np\n'), ((9131, 9148), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (9139, 9148), True, 'import numpy as np\n'), ((9162, 9178), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (9170, 9178), True, 'import numpy as np\n'), ((9771, 9816), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (9794, 9816), False, 'from keras.utils import np_utils\n'), ((9858, 9902), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (9881, 9902), False, 'from keras.utils import np_utils\n'), ((10071, 10172), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['checkpoint_name'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""auto"""'}), "(checkpoint_name, monitor='val_loss', verbose=1,\n save_best_only=True, mode='auto')\n", (10086, 10172), False, 'from keras.callbacks import ModelCheckpoint\n'), ((10326, 10372), 'mCNN.createModel', 'createModel', (['height', 'width', 'depth', 'num_classes'], {}), '(height, width, depth, num_classes)\n', (10337, 10372), False, 'from mCNN import createModel\n'), ((12423, 12448), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12446, 12448), False, 'import argparse\n'), ((2377, 2396), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (2393, 2396), False, 'import os\n'), ((9293, 9311), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (9302, 9311), True, 'import numpy as np\n'), ((9737, 9755), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (9746, 9755), True, 'import numpy as np\n'), ((10224, 10256), 'os.path.exists', 'os.path.exists', (['checkPointFolder'], {}), '(checkPointFolder)\n', (10238, 10256), False, 'import os\n'), ((10266, 10295), 'os.makedirs', 'os.makedirs', (['checkPointFolder'], {}), '(checkPointFolder)\n', (10277, 10295), False, 'import os\n'), ((2786, 2811), 'os.path.join', 'join', (['trainImagePath', 'fLL'], {}), '(trainImagePath, fLL)\n', (2790, 2811), False, 'from os.path import isfile, join\n'), ((2840, 2865), 'os.path.join', 'join', (['trainImagePath', 'fLH'], {}), '(trainImagePath, fLH)\n', (2844, 2865), False, 'from os.path import isfile, join\n'), ((2894, 2919), 'os.path.join', 'join', (['trainImagePath', 'fHL'], {}), '(trainImagePath, fHL)\n', (2898, 2919), False, 'from os.path import isfile, join\n'), ((2948, 2973), 'os.path.join', 'join', (['trainImagePath', 'fHH'], {}), '(trainImagePath, fHH)\n', (2952, 2973), False, 'from os.path import isfile, join\n'), ((4893, 4919), 'os.listdir', 'listdir', (['positiveImagePath'], {}), '(positiveImagePath)\n', (4900, 4919), False, 'from os import listdir\n'), ((4998, 5024), 'os.listdir', 'listdir', (['negativeImagePath'], {}), '(negativeImagePath)\n', (5005, 5024), False, 'from os import listdir\n'), ((11356, 11382), 'numpy.argmax', 'np.argmax', (['model_out[i, :]'], {}), '(model_out[i, :])\n', (11365, 11382), True, 'import numpy as np\n'), ((4931, 4957), 'os.path.join', 'join', (['positiveImagePath', 'f'], {}), '(positiveImagePath, f)\n', (4935, 4957), False, 'from os.path import isfile, join\n'), ((5036, 5062), 'os.path.join', 'join', (['negativeImagePath', 'f'], {}), '(negativeImagePath, f)\n', (5040, 5062), False, 'from os.path import isfile, join\n'), ((11545, 11571), 'numpy.argmax', 'np.argmax', (['model_out[i, :]'], {}), '(model_out[i, :])\n', (11554, 11571), True, 'import numpy as np\n'), ((11690, 11716), 'numpy.argmax', 'np.argmax', (['model_out[i, :]'], {}), '(model_out[i, :])\n', (11699, 11716), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2017 by <NAME> <<EMAIL>>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
r"""Norms and their associated proximal maps and projections
The :math:`p`-norm of a vector is defined as
.. math::
\| \mathbf{x} \|_p = \left( \sum_i | x_i |^p \right)^{1/p}
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
The max norm is a special case
.. math::
\| \mathbf{x} \|_{\infty} = \max_i | x_i | \;\;.
The mixed matrix norm :math:`\|X\|_{p,q}` is defined here as
:cite:`kowalski-2009-sparse`
.. math::
\|X\|_{p,q} = \left( \sum_i \left( \sum_j |X_{i,j}|^p \right)^{q/p}
\right)^{1/q} = \left( \sum_i \| \mathbf{x}_i \|_p^q \right)^{1/q}
where :math:`\mathbf{x}_i` is row :math:`i` of matrix
:math:`X`. Note that some authors use a notation that reverses the
positions of :math:`p` and :math:`q`.
The proximal operator of function :math:`f` is defined as
.. math::
\mathrm{prox}_f(\mathbf{v}) = \mathrm{argmin}_{\mathbf{x}}
\left\{ (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 + f(\mathbf{x})
\right\} \;\;.
The projection operator of function :math:`f` is defined as
.. math::
\mathrm{proj}_{f,\gamma}(\mathbf{v}) &= \mathrm{argmin}_{\mathbf{x}}
(1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \; \text{ s.t. } \;
f(\mathbf{x}) \leq \gamma \\ &= \mathrm{prox}_g(\mathbf{v})
where :math:`g(\mathbf{v}) = \iota_C(\mathbf{v})`, with
:math:`\iota_C` denoting the indicator function of set
:math:`C = \{ \mathbf{x} \; | \; f(\mathbf{x}) \leq \gamma \}`.
"""
from __future__ import division
from builtins import range
import numpy as np
import scipy.optimize as optim
try:
import numexpr as ne
except ImportError:
have_numexpr = False
else:
have_numexpr = True
import sporco.linalg as sl
__author__ = """<NAME> <<EMAIL>>"""
def norm_l0(x, axis=None, eps=0.0):
r"""Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
"""
nl0 = np.sum(np.abs(x) > eps, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl0.size == 1:
nl0 = nl0.ravel()[0]
return nl0
def prox_l0(v, alpha):
r"""Proximal operator of the :math:`\ell_0` "norm" (hard thresholding)
.. math::
\mathrm{prox}_{\alpha f}(v) = \mathcal{S}_{0,\alpha}(\mathbf{v})
= \left\{ \begin{array}{ccc} 0 & \text{if} &
| v | < \sqrt{2 \alpha} \\ v &\text{if} &
| v | \geq \sqrt{2 \alpha} \end{array} \right.
Unlike the corresponding :func:`norm_l0`, there is no need for an
`axis` parameter since the proximal operator of the :math:`\ell_0`
norm is the same when taken independently over each element, or
over their sum.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
alpha : float or array_like
Parameter :math:`\alpha`
Returns
-------
x : ndarray
Output array
"""
return (np.abs(v) >= np.sqrt(2.0*alpha)) * v
def norm_l1(x, axis=None):
r"""Compute the :math:`\ell_1` norm
.. math::
\| \mathbf{x} \|_1 = \sum_i | x_i |
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_1` norm. If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
Returns
-------
nl1 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
"""
nl1 = np.sum(np.abs(x), axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl1.size == 1:
nl1 = nl1.ravel()[0]
return nl1
def prox_l1(v, alpha):
r"""Proximal operator of the :math:`\ell_1` norm (scalar
shrinkage/soft thresholding)
.. math::
\mathrm{prox}_{\alpha f}(\mathbf{v}) =
\mathcal{S}_{1,\alpha}(\mathbf{v}) = \mathrm{sign}(\mathbf{v}) \odot
\max(0, |\mathbf{v}| - \alpha)
where :math:`f(\mathbf{x}) = \|\mathbf{x}\|_1`.
Unlike the corresponding :func:`norm_l1`, there is no need for an
`axis` parameter since the proximal operator of the :math:`\ell_1`
norm is the same when taken independently over each element, or
over their sum.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
alpha : float or array_like
Parameter :math:`\alpha`
Returns
-------
x : ndarray
Output array
"""
if have_numexpr:
return ne.evaluate(
'where(abs(v)-alpha > 0, where(v >= 0, 1, -1) * (abs(v)-alpha), 0)'
)
else:
return np.sign(v) * (np.clip(np.abs(v) - alpha, 0, float('Inf')))
def proj_l1(v, gamma, axis=None, method=None):
r"""Projection operator of the :math:`\ell_1` norm.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
gamma : float
Parameter :math:`\gamma`
axis : None or int or tuple of ints, optional (default None)
Axes of `v` over which to compute the :math:`\ell_1` norm. If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct norm values are
computed over the indices of the remaining axes of input array
`v`. **Note:** specifying a tuple of ints is not supported by
this function.
method : None or str, optional (default None)
Solver method to use. If `None`, the most appropriate choice is made
based on the `axis` parameter. Valid methods are
- 'scalarroot'
The solution is computed via the method of Sec. 6.5.2 in
:cite:`parikh-2014-proximal`.
- 'sortcumsum'
The solution is computed via the method of
:cite:`duchi-2008-efficient`.
Returns
-------
x : ndarray
Output array
"""
if method is None:
if axis is None:
method = 'scalarroot'
else:
method = 'sortcumsum'
if method == 'scalarroot':
if axis is not None:
raise ValueError('Method scalarroot only supports axis=None')
return _proj_l1_scalar_root(v, gamma)
elif method == 'sortcumsum':
if isinstance(axis, tuple):
raise ValueError('Method sortcumsum does not support tuple axis'
' values')
return _proj_l1_sortsum(v, gamma, axis)
else:
raise ValueError('Unknown solver method %s' % method)
def _proj_l1_scalar_root(v, gamma):
r"""Projection operator of the :math:`\ell_1` norm. The solution is
computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`.
There is no `axis` parameter since the algorithm for computing the
solution treats the input `v` as a single vector.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
gamma : float
Parameter :math:`\gamma`
Returns
-------
x : ndarray
Output array
"""
if norm_l1(v) <= gamma:
return v
else:
av = np.abs(v)
fn = lambda t: np.sum(np.maximum(0, av - t)) - gamma
t = optim.brentq(fn, 0, av.max())
return prox_l1(v, t)
def _proj_l1_sortsum(v, gamma, axis=None):
r"""Projection operator of the :math:`\ell_1` norm. The solution is
computed via the method of :cite:`duchi-2008-efficient`.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
gamma : float
Parameter :math:`\gamma`
axis : None or int, optional (default None)
Axes of `v` over which to compute the :math:`\ell_1` norm. If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct norm values are
computed over the indices of the remaining axes of input array
`v`. **Note:** specifying a tuple of ints is not supported by
this function.
Returns
-------
x : ndarray
Output array
"""
if axis is None and norm_l1(v) <= gamma:
return v
if axis is not None and axis < 0:
axis = v.ndim + axis
av = np.abs(v)
vs = np.sort(av, axis=axis)
if axis is None:
N = v.size
c = 1.0 / np.arange(1, N+1, dtype=v.dtype).reshape(v.shape)
vs = vs[::-1].reshape(v.shape)
else:
N = v.shape[axis]
ns = [v.shape[k] if k == axis else 1 for k in range(v.ndim)]
c = 1.0 / np.arange(1, N+1, dtype=v.dtype).reshape(ns)
vs = vs[(slice(None),)*axis + (slice(None, None, -1),)]
t = c * (np.cumsum(vs, axis=axis).reshape(v.shape) - gamma)
K = np.sum(vs >= t, axis=axis, keepdims=True)
t = (np.sum(vs * (vs >= t), axis=axis, keepdims=True) - gamma) / K
t = np.asarray(np.maximum(0, t), dtype=v.dtype)
return np.sign(v) * np.where(av > t, av - t, 0)
def norm_2l2(x, axis=None):
r"""Compute the squared :math:`\ell_2` norm
.. math::
\| \mathbf{x} \|_2^2 = \sum_i x_i^2
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_2` norm. If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
Returns
-------
nl2 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector.
"""
nl2 = np.sum(x**2, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl2.size == 1:
nl2 = nl2.ravel()[0]
return nl2
def norm_l2(x, axis=None):
r"""Compute the :math:`\ell_2` norm
.. math::
\| \mathbf{x} \|_2 = \sqrt{ \sum_i x_i^2 }
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_2` norm. If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
Returns
-------
nl2 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector.
"""
return np.sqrt(norm_2l2(x, axis))
def norm_l21(x, axis=-1):
r"""Compute the :math:`\ell_{2,1}` mixed norm
.. math::
\| X \|_{2,1} = \sum_i \sqrt{ \sum_j X_{i,j}^2 }
where :math:`X_{i,j}` is element :math:`i,j` of matrix :math:`X`.
Parameters
----------
x : array_like
Input array :math:`X`
axis : None or int or tuple of ints, optional (default -1)
Axes of `x` over which to compute the :math:`\ell_2` norm. If
`None`, an entire multi-dimensional array is treated as a
vector, in which case the result is just the :math:`\ell_2` norm.
If axes are specified, then the sum over the :math:`\ell_2` norm
values is computed over the indices of the remaining axes of input
array `x`.
Returns
-------
nl21 : float
Norm of :math:`X`
"""
return np.sum(norm_l2(x, axis=axis))
def prox_l2(v, alpha, axis=None):
r"""Proximal operator of the :math:`\ell_2` norm (vector shrinkage/soft
thresholding)
.. math::
\mathrm{prox}_{\alpha f}(\mathbf{v}) = \frac{\mathbf{v}}
{\|\mathbf{v}\|_2} \max(0, \|\mathbf{v}\|_2 - \alpha) =
\mathcal{S}_{2,\alpha}(\mathbf{v})
where :math:`f(\mathbf{x}) = \|\mathbf{x}\|_2`.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
alpha : float or array_like
Parameter :math:`\alpha`
axis : None or int or tuple of ints, optional (default None)
Axes of `v` over which to compute the :math:`\ell_2` norm. If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct norm values are
computed over the indices of the remaining axes of input array
`v`, which is equivalent to the proximal operator of the sum over
these values (i.e. an :math:`\ell_{2,1}` norm).
Returns
-------
x : ndarray
Output array
"""
a = np.sqrt(np.sum(v**2, axis=axis, keepdims=True))
b = np.maximum(0, a - alpha)
b = sl.zdivide(b, a)
return b*v
def proj_l2(v, gamma, axis=None):
r"""Projection operator of the :math:`\ell_2` norm.
The projection operator of the uncentered :math:`\ell_2` norm,
.. math::
\mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \;
\text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma
can be computed as :math:`\mathbf{s} + \mathrm{proj}_{f,\gamma}
(\mathbf{v} - \mathbf{s})` where :math:`f(\mathbf{x}) =
\| \mathbf{x} \|_2`.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
gamma : float
Parameter :math:`\gamma`
axis : None or int or tuple of ints, optional (default None)
Axes of `v` over which to compute the :math:`\ell_2` norm. If
`None`, an entire multi-dimensional array is treated as a vector.
If axes are specified, then distinct norm values are computed
over the indices of the remaining axes of input array `v`.
Returns
-------
x : ndarray
Output array
"""
d = np.sqrt(np.sum(v**2, axis=axis, keepdims=True))
return (d <= gamma)*v + (d > gamma)*(gamma*sl.zdivide(v, d))
def prox_l1l2(v, alpha, beta, axis=None):
r"""Proximal operator of the :math:`\ell_1` plus :math:`\ell_2` norm
(compound shrinkage/soft thresholding) :cite:`wohlberg-2012-local`
:cite:`chartrand-2013-nonconvex`
.. math::
\mathrm{prox}_{f}(\mathbf{v}) =
\mathcal{S}_{1,2,\alpha,\beta}(\mathbf{v}) =
\mathcal{S}_{2,\beta}(\mathcal{S}_{1,\alpha}(\mathbf{v}))
where :math:`f(\mathbf{x}) = \alpha \|\mathbf{x}\|_1 +
\beta \|\mathbf{x}\|_2`.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
alpha : float or array_like
Parameter :math:`\alpha`
beta : float or array_like
Parameter :math:`\beta`
axis : None or int or tuple of ints, optional (default None)
Axes of `v` over which to compute the :math:`\ell_2` norm. If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct norm values are
computed over the indices of the remaining axes of input array
`v`, which is equivalent to the proximal operator of the sum over
these values (i.e. an :math:`\ell_{2,1}` norm).
Returns
-------
x : ndarray
Output array
"""
return prox_l2(prox_l1(v, alpha), beta, axis)
def norm_nuclear(x):
r"""Compute the nuclear norm
.. math::
\| X \|_1 = \sum_i \sigma_i
where :math:`\sigma_i` are the singular values of matrix :math:`X`.
Parameters
----------
x : array_like
Input array :math:`X`
Returns
-------
nncl : float
Norm of `x`
"""
return np.sum(np.linalg.svd(sl.promote16(x), compute_uv=False))
def prox_nuclear(v, alpha):
r"""Proximal operator of the nuclear norm :cite:`cai-2010-singular`.
Parameters
----------
v : array_like
Input array :math:`V`
alpha : float
Parameter :math:`\alpha`
Returns
-------
x : ndarray
Output array
s : ndarray
Singular values of `x`
"""
U, s, V = sl.promote16(v, fn=np.linalg.svd, full_matrices=False)
ss = np.maximum(0, s - alpha)
return np.dot(U, np.dot(np.diag(ss), V)), ss
| [
"numpy.abs",
"numpy.sqrt",
"numpy.arange",
"numpy.where",
"numpy.sort",
"numpy.diag",
"numpy.sum",
"builtins.range",
"numpy.sign",
"numpy.cumsum",
"numexpr.evaluate",
"sporco.linalg.promote16",
"numpy.maximum",
"sporco.linalg.zdivide"
] | [((9516, 9525), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (9522, 9525), True, 'import numpy as np\n'), ((9535, 9557), 'numpy.sort', 'np.sort', (['av'], {'axis': 'axis'}), '(av, axis=axis)\n', (9542, 9557), True, 'import numpy as np\n'), ((10009, 10050), 'numpy.sum', 'np.sum', (['(vs >= t)'], {'axis': 'axis', 'keepdims': '(True)'}), '(vs >= t, axis=axis, keepdims=True)\n', (10015, 10050), True, 'import numpy as np\n'), ((11016, 11056), 'numpy.sum', 'np.sum', (['(x ** 2)'], {'axis': 'axis', 'keepdims': '(True)'}), '(x ** 2, axis=axis, keepdims=True)\n', (11022, 11056), True, 'import numpy as np\n'), ((13949, 13973), 'numpy.maximum', 'np.maximum', (['(0)', '(a - alpha)'], {}), '(0, a - alpha)\n', (13959, 13973), True, 'import numpy as np\n'), ((13982, 13998), 'sporco.linalg.zdivide', 'sl.zdivide', (['b', 'a'], {}), '(b, a)\n', (13992, 13998), True, 'import sporco.linalg as sl\n'), ((17183, 17237), 'sporco.linalg.promote16', 'sl.promote16', (['v'], {'fn': 'np.linalg.svd', 'full_matrices': '(False)'}), '(v, fn=np.linalg.svd, full_matrices=False)\n', (17195, 17237), True, 'import sporco.linalg as sl\n'), ((17247, 17271), 'numpy.maximum', 'np.maximum', (['(0)', '(s - alpha)'], {}), '(0, s - alpha)\n', (17257, 17271), True, 'import numpy as np\n'), ((4885, 4894), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (4891, 4894), True, 'import numpy as np\n'), ((5885, 5970), 'numexpr.evaluate', 'ne.evaluate', (['"""where(abs(v)-alpha > 0, where(v >= 0, 1, -1) * (abs(v)-alpha), 0)"""'], {}), "('where(abs(v)-alpha > 0, where(v >= 0, 1, -1) * (abs(v)-alpha), 0)'\n )\n", (5896, 5970), True, 'import numexpr as ne\n'), ((8446, 8455), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (8452, 8455), True, 'import numpy as np\n'), ((10141, 10157), 'numpy.maximum', 'np.maximum', (['(0)', 't'], {}), '(0, t)\n', (10151, 10157), True, 'import numpy as np\n'), ((10185, 10195), 'numpy.sign', 'np.sign', (['v'], {}), '(v)\n', (10192, 10195), True, 'import numpy as np\n'), ((10198, 10225), 'numpy.where', 'np.where', (['(av > t)', '(av - t)', '(0)'], {}), '(av > t, av - t, 0)\n', (10206, 10225), True, 'import numpy as np\n'), ((13901, 13941), 'numpy.sum', 'np.sum', (['(v ** 2)'], {'axis': 'axis', 'keepdims': '(True)'}), '(v ** 2, axis=axis, keepdims=True)\n', (13907, 13941), True, 'import numpy as np\n'), ((15049, 15089), 'numpy.sum', 'np.sum', (['(v ** 2)'], {'axis': 'axis', 'keepdims': '(True)'}), '(v ** 2, axis=axis, keepdims=True)\n', (15055, 15089), True, 'import numpy as np\n'), ((3084, 3093), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (3090, 3093), True, 'import numpy as np\n'), ((4061, 4070), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (4067, 4070), True, 'import numpy as np\n'), ((4074, 4094), 'numpy.sqrt', 'np.sqrt', (['(2.0 * alpha)'], {}), '(2.0 * alpha)\n', (4081, 4094), True, 'import numpy as np\n'), ((6013, 6023), 'numpy.sign', 'np.sign', (['v'], {}), '(v)\n', (6020, 6023), True, 'import numpy as np\n'), ((10060, 10108), 'numpy.sum', 'np.sum', (['(vs * (vs >= t))'], {'axis': 'axis', 'keepdims': '(True)'}), '(vs * (vs >= t), axis=axis, keepdims=True)\n', (10066, 10108), True, 'import numpy as np\n'), ((16787, 16802), 'sporco.linalg.promote16', 'sl.promote16', (['x'], {}), '(x)\n', (16799, 16802), True, 'import sporco.linalg as sl\n'), ((9795, 9808), 'builtins.range', 'range', (['v.ndim'], {}), '(v.ndim)\n', (9800, 9808), False, 'from builtins import range\n'), ((15136, 15152), 'sporco.linalg.zdivide', 'sl.zdivide', (['v', 'd'], {}), '(v, d)\n', (15146, 15152), True, 'import sporco.linalg as sl\n'), ((17300, 17311), 'numpy.diag', 'np.diag', (['ss'], {}), '(ss)\n', (17307, 17311), True, 'import numpy as np\n'), ((6035, 6044), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (6041, 6044), True, 'import numpy as np\n'), ((8486, 8507), 'numpy.maximum', 'np.maximum', (['(0)', '(av - t)'], {}), '(0, av - t)\n', (8496, 8507), True, 'import numpy as np\n'), ((9616, 9650), 'numpy.arange', 'np.arange', (['(1)', '(N + 1)'], {'dtype': 'v.dtype'}), '(1, N + 1, dtype=v.dtype)\n', (9625, 9650), True, 'import numpy as np\n'), ((9828, 9862), 'numpy.arange', 'np.arange', (['(1)', '(N + 1)'], {'dtype': 'v.dtype'}), '(1, N + 1, dtype=v.dtype)\n', (9837, 9862), True, 'import numpy as np\n'), ((9950, 9974), 'numpy.cumsum', 'np.cumsum', (['vs'], {'axis': 'axis'}), '(vs, axis=axis)\n', (9959, 9974), True, 'import numpy as np\n')] |
from pynq import Overlay
from pynq.lib import AxiGPIO
ol = Overlay("./overlays/CICADA_N_CLAIRE.bit")
import numpy as np
import time
trig_ldo_dut_ip = ol.ip_dict['gpio_spi_trig_ldo_dut']
dut_tx_rx_data_ip = ol.ip_dict['gpio_spi_dut_tx_rx_data']
ts_rst_dut_rst_ip = ol.ip_dict['gpio_spi_ts_rst_dut_rst']
trig_dut = AxiGPIO(trig_ldo_dut_ip).channel2
dut_tx_data = AxiGPIO(dut_tx_rx_data_ip).channel1
dut_rx_data = AxiGPIO(dut_tx_rx_data_ip).channel2
dut_rst = AxiGPIO(ts_rst_dut_rst_ip).channel2
def rst():
dut_rst.write(0,0xf)
time.sleep(0.1)
dut_rst.write(1,0xf)
time.sleep(0.1)
dut_rst.write(0,0xf)
time.sleep(0.1)
def send_trig():
trig_val = trig_dut.read()
#print('prev trig val: %i' %(trig_val))
trig_dut.write(trig_val^1,0xf)
trig_val = trig_dut.read()
#print('current trig val: %i' %(trig_val))
def make_addr_packet(addr):
addr_ld1 = addr%2
addr_ld2 = addr%4
addr_ld3 = addr%8
addr_ld4 = addr%16
addr_ld5 = addr%32
addr_ld6 = addr%64
addr_ld7 = addr%128
addr0 = addr_ld1
addr1 = (addr_ld2 - addr_ld1)>>1
addr2 = (addr_ld3 - addr_ld2)>>2
addr3 = (addr_ld4 - addr_ld3)>>3
addr4 = (addr_ld5 - addr_ld4)>>4
addr5 = (addr_ld6 - addr_ld5)>>5
addr6 = (addr_ld7 - addr_ld6)>>6
#print(addr0, addr1, addr2, addr3, addr4, addr5, addr6)
return (addr6) + (addr5 << 1) + (addr4 << 2) + (addr3 << 3) + (addr2 << 4) + (addr1<<5) + (addr0 << 6)
#def write_single(addr, d0, d1, d2, d3, d4, d5, d6, d7):
def read_wc():
addr = int(input("target read addr.:"))
print('')
if(addr > 63):
print("invalid target address!")
return -1
addr_reversed = make_addr_packet(addr)
spi_packet = (1<<15) + (addr_reversed<<8) + 0
dut_tx_data.write(spi_packet,0xffff)
send_trig()
data_read = dut_rx_data.read()
bin_data_read = bin(data_read)
#print("addr %i data: 0x%X",data_read)
#print("addr %i data ==> ", bin_data_read)
print("Addr:",addr,"|| data: ",bin_data_read[2:].zfill(8),"read")
return bin_data_read
def write_single_wc(): #dut spi write with command
addr = int(input("target addr: "))
print(' ')
addr_reversed = make_addr_packet(addr)
#print(addr_reversed)
weight_array = np.array([128,64,32,16,8,4,2,1])
index = 0
data_packet = 0
for weight in weight_array:
#print(weight)
print(index,"th bit")
current_bit = int(input("enter the value (1,0):"))
if(current_bit > 1):
print("FALSE INPUT!!!")
return 0
else:
data_packet = data_packet + current_bit * weight
index = index + 1
spi_packet = (0<<15) + (addr_reversed<<8) + data_packet
#print(hex(data_packet), hex(addr_reversed), hex(spi_packet))
dut_tx_data.write(int(spi_packet),0xffff)
send_trig()
print(' ')
print("Addr:",addr,"|| data: ",bin(spi_packet%256)[2:].zfill(8),"sent")
def write_single(addr,b0,b1,b2,b3,b4,b5,b6,b7): #dut spi write with input
addr_reversed = make_addr_packet(addr)
weight_array = np.array([128,64,32,16,8,4,2,1])
data_array = np.array([b0,b1,b2,b3,b4,b5,b6,b7])
index = 0
data_packet = 0
for weight in weight_array:
#print(weight)
#print(index,"th bit")
current_bit = data_array[index]
#print("debug: data value ==>",current_bit)
if(current_bit > 1):
print("FALSE INPUT!!!")
return 0
else:
data_packet = data_packet + current_bit * weight
index = index + 1
spi_packet = (0<<15) + (addr_reversed<<8) + data_packet
#print(hex(data_packet), hex(addr_reversed), hex(spi_packet))
dut_tx_data.write(int(spi_packet),0xffff)
send_trig()
print("Addr:",addr,"|| data: ",bin(spi_packet%256)[2:].zfill(8),"sent")
def commit_w_addr0to15(): ## write commit (from addr0 to addr15)
#write 1 to addr 127 & write 0 to addr 127 (after write addr 0 ~ 15)
#update addr 0 ~ 15 together
write_single(127,1,0,0,0,0,0,0,0)
write_single(127,0,0,0,0,0,0,0,0)
"""
spi_packet_127_0 = (0<<15) + (127<<8) + (0<<7)
spi_packet_127_1 = (0<<15) + (127<<8) + (1<<7)
dut_tx_data.write(spi_packet_127_1,0xffff)
send_trig()
dut_tx_data.write(spi_packet_127_0,0xffff)
send_trig()
"""
def sel_io_50ohm(): ## select signal to plot by 50 ohm io
#signal selection & io enable
b0 = int(input("50Ohm driver on/off (""0"": on, ""1"": off): "))
if (b0 == 1):
print("50ohm driver shut down!")
write_single(24,1,0,0,0,0,0,0,0)
return 0
elif (b0 == 0):
sig_sel = int(input("select signal: 0) main clk 1) ck_gvco_ctat 2) ck_gvco_fic 3) vss ==> "))
b1 = int(sig_sel%2)
b2 = int((sig_sel>1))
print("debug",b2,b1)
addr = 24
write_single(addr,b0,b1,b2,0,0,0,0,0)
else:
print("invalid input!")
return -1
def set_str_io_50ohm(): #program 50ohm io pull down strength
pull_strength = int(input("enter 50-ohm io strength [PULL] (0~8)"))
pull_str_bit = [1, 1, 1, 1, 1, 1, 1, 1]
index_l = pull_strength
while (index_l > 0):
#print(" debug: index: ", index)
pull_str_bit[index_l-1] = 0
index_l = index_l - 1
push_strength = int(input("enter 50-ohm io strength [PUSH] (0~8)"))
push_str_bit = [1, 1, 1, 1, 1, 1, 1, 1]
index_h = push_strength
while (index_h > 0):
#print(" debug: index: ", index)
push_str_bit[index_h-1] = 0
index_h = index_h - 1
#print("debug", pull_str_bit)
#write_single(22,1,1,1,1,1,1,1,1) ## disable push
write_single(22,push_str_bit[0],push_str_bit[1],push_str_bit[2],push_str_bit[3],push_str_bit[4],push_str_bit[5],push_str_bit[6],push_str_bit[7])
write_single(23,pull_str_bit[0],pull_str_bit[1],pull_str_bit[2],pull_str_bit[3],pull_str_bit[4],pull_str_bit[5],pull_str_bit[6],pull_str_bit[7])
| [
"pynq.lib.AxiGPIO",
"numpy.array",
"time.sleep",
"pynq.Overlay"
] | [((59, 100), 'pynq.Overlay', 'Overlay', (['"""./overlays/CICADA_N_CLAIRE.bit"""'], {}), "('./overlays/CICADA_N_CLAIRE.bit')\n", (66, 100), False, 'from pynq import Overlay\n'), ((316, 340), 'pynq.lib.AxiGPIO', 'AxiGPIO', (['trig_ldo_dut_ip'], {}), '(trig_ldo_dut_ip)\n', (323, 340), False, 'from pynq.lib import AxiGPIO\n'), ((364, 390), 'pynq.lib.AxiGPIO', 'AxiGPIO', (['dut_tx_rx_data_ip'], {}), '(dut_tx_rx_data_ip)\n', (371, 390), False, 'from pynq.lib import AxiGPIO\n'), ((414, 440), 'pynq.lib.AxiGPIO', 'AxiGPIO', (['dut_tx_rx_data_ip'], {}), '(dut_tx_rx_data_ip)\n', (421, 440), False, 'from pynq.lib import AxiGPIO\n'), ((460, 486), 'pynq.lib.AxiGPIO', 'AxiGPIO', (['ts_rst_dut_rst_ip'], {}), '(ts_rst_dut_rst_ip)\n', (467, 486), False, 'from pynq.lib import AxiGPIO\n'), ((537, 552), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (547, 552), False, 'import time\n'), ((582, 597), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (592, 597), False, 'import time\n'), ((627, 642), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (637, 642), False, 'import time\n'), ((2338, 2377), 'numpy.array', 'np.array', (['[128, 64, 32, 16, 8, 4, 2, 1]'], {}), '([128, 64, 32, 16, 8, 4, 2, 1])\n', (2346, 2377), True, 'import numpy as np\n'), ((3189, 3228), 'numpy.array', 'np.array', (['[128, 64, 32, 16, 8, 4, 2, 1]'], {}), '([128, 64, 32, 16, 8, 4, 2, 1])\n', (3197, 3228), True, 'import numpy as np\n'), ((3239, 3281), 'numpy.array', 'np.array', (['[b0, b1, b2, b3, b4, b5, b6, b7]'], {}), '([b0, b1, b2, b3, b4, b5, b6, b7])\n', (3247, 3281), True, 'import numpy as np\n')] |
"""
Methods to feed visual input to a system-under-test through the screen
"""
import copy
import logging
import os
import numpy as np
from PIL import Image
from pathlib import Path
from result_caching import store, is_iterable
from tqdm import tqdm
from brainio_base.stimuli import StimulusSet
framework_home = Path(os.getenv('BRAINSCORE_HOME', '~/.brain-score')).expanduser()
root_path = framework_home / "stimuli_on_screen"
_logger = logging.getLogger(__name__)
def place_on_screen(stimulus_set: StimulusSet, target_visual_degrees: int, source_visual_degrees: int = None):
_logger.debug(f"Converting {stimulus_set.identifier} to {target_visual_degrees} degrees")
assert source_visual_degrees or 'degrees' in stimulus_set, \
"Need to provide the source images' visual degrees either as a parameter or in the stimulus_set"
assert not (source_visual_degrees and 'degrees' in stimulus_set), \
"Got a parameter for the source images' visual degrees, but also found a 'degrees' column in the stimulus_set"
inferred_visual_degrees = _determine_visual_degrees(source_visual_degrees, stimulus_set)
if (inferred_visual_degrees == target_visual_degrees).all():
return stimulus_set
return _place_on_screen(stimuli_identifier=stimulus_set.identifier, stimulus_set=stimulus_set,
target_visual_degrees=target_visual_degrees, source_visual_degrees=source_visual_degrees)
def _determine_visual_degrees(visual_degrees, stimulus_set):
if not visual_degrees:
visual_degrees = stimulus_set['degrees']
if not is_iterable(visual_degrees):
visual_degrees = np.array([visual_degrees] * len(stimulus_set))
return visual_degrees
@store(identifier_ignore=['stimulus_set'])
def _place_on_screen(stimuli_identifier: str, stimulus_set: StimulusSet,
target_visual_degrees: int, source_visual_degrees: int = None):
converted_stimuli_id = f"{stimuli_identifier}--target{target_visual_degrees}--source{source_visual_degrees}"
source_visual_degrees = _determine_visual_degrees(source_visual_degrees, stimulus_set)
target_dir = root_path / converted_stimuli_id
target_dir.mkdir(parents=True, exist_ok=False)
image_converter = ImageConverter(target_dir=target_dir)
converted_image_paths = {}
for image_id, image_degrees in tqdm(zip(stimulus_set['image_id'], source_visual_degrees),
total=len(stimulus_set), desc='convert image degrees'):
converted_image_path = image_converter.convert_image(image_path=stimulus_set.get_image(image_id),
source_degrees=image_degrees,
target_degrees=target_visual_degrees)
converted_image_paths[image_id] = converted_image_path
converted_stimuli = StimulusSet(stimulus_set.copy(deep=True)) # without copy, it will link to the previous stim set
converted_stimuli.image_paths = converted_image_paths
converted_stimuli.identifier = converted_stimuli_id
converted_stimuli['degrees'] = target_visual_degrees
converted_stimuli.original_paths = copy.deepcopy(stimulus_set.image_paths)
return converted_stimuli
class ImageConverter:
def __init__(self, target_dir):
self._target_dir = Path(target_dir)
def convert_image(self, image_path, source_degrees, target_degrees):
if source_degrees == target_degrees:
return image_path
ratio = target_degrees / source_degrees
with self._load_image(image_path) as image:
converted_image = self.apply_ratio(image, ratio)
target_path = str(self._target_dir / os.path.basename(image_path))
self._write(converted_image, target_path=target_path)
return target_path
def apply_ratio(self, image: Image, ratio: float, background_color='gray'):
image_size = np.array(image.size)
target_image_size = (ratio * image_size).round().astype(int)
if ratio >= 1: # enlarge the image
return self._enlarge(image, target_image_size, background_color=background_color)
else: # crop the image
return self._center_crop(image, target_image_size)
def _enlarge(self, image, target_size, background_color):
background_image = Image.new('RGB', tuple(target_size), background_color)
center_topleft = ((target_size - image.size) / 2).round().astype(int)
background_image.paste(image, tuple(center_topleft))
return background_image
def _center_crop(self, image, crop_size):
left, upper = ((image.size - crop_size) / 2).round().astype(int)
right, lower = [left, upper] + crop_size
image = image.crop((left, upper, right, lower))
return image
def _round(self, number):
return np.array(number).round().astype(int)
def _load_image(self, image_path):
return Image.open(image_path)
def _resize_image(self, image, image_size):
return image.resize((image_size, image_size), Image.ANTIALIAS)
def _center_on_background(self, center_image, background_size, background_color='gray'):
image = Image.new('RGB', (background_size, background_size), background_color)
center_topleft = self._round(np.subtract(background_size, center_image.size) / 2)
image.paste(center_image, tuple(center_topleft))
return image
def _write(self, image, target_path):
image.save(target_path)
| [
"logging.getLogger",
"PIL.Image.open",
"os.getenv",
"pathlib.Path",
"PIL.Image.new",
"numpy.subtract",
"numpy.array",
"os.path.basename",
"copy.deepcopy",
"result_caching.store",
"result_caching.is_iterable"
] | [((440, 467), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (457, 467), False, 'import logging\n'), ((1720, 1761), 'result_caching.store', 'store', ([], {'identifier_ignore': "['stimulus_set']"}), "(identifier_ignore=['stimulus_set'])\n", (1725, 1761), False, 'from result_caching import store, is_iterable\n'), ((3198, 3237), 'copy.deepcopy', 'copy.deepcopy', (['stimulus_set.image_paths'], {}), '(stimulus_set.image_paths)\n', (3211, 3237), False, 'import copy\n'), ((1590, 1617), 'result_caching.is_iterable', 'is_iterable', (['visual_degrees'], {}), '(visual_degrees)\n', (1601, 1617), False, 'from result_caching import store, is_iterable\n'), ((3354, 3370), 'pathlib.Path', 'Path', (['target_dir'], {}), '(target_dir)\n', (3358, 3370), False, 'from pathlib import Path\n'), ((3959, 3979), 'numpy.array', 'np.array', (['image.size'], {}), '(image.size)\n', (3967, 3979), True, 'import numpy as np\n'), ((4982, 5004), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4992, 5004), False, 'from PIL import Image\n'), ((5235, 5305), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(background_size, background_size)', 'background_color'], {}), "('RGB', (background_size, background_size), background_color)\n", (5244, 5305), False, 'from PIL import Image\n'), ((320, 366), 'os.getenv', 'os.getenv', (['"""BRAINSCORE_HOME"""', '"""~/.brain-score"""'], {}), "('BRAINSCORE_HOME', '~/.brain-score')\n", (329, 366), False, 'import os\n'), ((5343, 5390), 'numpy.subtract', 'np.subtract', (['background_size', 'center_image.size'], {}), '(background_size, center_image.size)\n', (5354, 5390), True, 'import numpy as np\n'), ((3730, 3758), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (3746, 3758), False, 'import os\n'), ((4890, 4906), 'numpy.array', 'np.array', (['number'], {}), '(number)\n', (4898, 4906), True, 'import numpy as np\n')] |
from lib.helper.logger import logger
from lib.core.base_trainer.net_work import Train
from lib.dataset.dataietr import FaceBoxesDataIter,DataIter
from lib.core.model.facebox.net import FaceBoxes
import tensorflow as tf
import cv2
import numpy as np
from train_config import config as cfg
import setproctitle
logger.info('The trainer start')
setproctitle.setproctitle("faceboxes")
def main():
epochs=cfg.TRAIN.epoch
batch_size=cfg.TRAIN.batch_size
enable_function=False
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
# strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model=FaceBoxes()
###run a time to build the model
image = np.zeros(shape=(1, 512, 512, 3), dtype=np.float32)
model.inference(image)
## load pretrained weights
if cfg.MODEL.pretrained_model is not None:
logger.info('load pretrained params from %s'%cfg.MODEL.pretrained_model)
model.load_weights(cfg.MODEL.pretrained_model)
### build trainer
trainer = Train(epochs, enable_function, model, batch_size, strategy)
### build dataiter
train_ds = DataIter(cfg.DATA.root_path, cfg.DATA.train_txt_path, True)
test_ds = DataIter(cfg.DATA.root_path, cfg.DATA.val_txt_path, False)
### it's a tensorpack data iter, produce a batch every iter
train_dataset=tf.data.Dataset.from_generator(train_ds,
output_types=(tf.float32,tf.float32,tf.float32),
output_shapes=([None,None,None,None],[None,None,None],[None,None]))
test_dataset = tf.data.Dataset.from_generator(test_ds,
output_types=(tf.float32,tf.float32,tf.float32),
output_shapes=([None,None,None,None],[None,None,None],[None,None]))
####
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_dist_dataset = strategy.experimental_distribute_dataset(test_dataset)
## check the data
if cfg.TRAIN.vis:
for images, labels, matches in train_dist_dataset:
for i in range(images.shape[0]):
example_image=np.array(images[i],dtype=np.uint8)
example_image = cv2.cvtColor(example_image, cv2.COLOR_BGR2RGB)
example_label=np.array(labels[i])
cv2.imshow('example',example_image)
cv2.waitKey(0)
break
##train
trainer.custom_loop(train_dist_dataset,
test_dist_dataset,
strategy)
if __name__=='__main__':
main() | [
"tensorflow.distribute.OneDeviceStrategy",
"tensorflow.config.experimental.set_memory_growth",
"setproctitle.setproctitle",
"lib.dataset.dataietr.DataIter",
"tensorflow.data.Dataset.from_generator",
"tensorflow.config.experimental.list_logical_devices",
"cv2.imshow",
"lib.core.base_trainer.net_work.Tr... | [((311, 343), 'lib.helper.logger.logger.info', 'logger.info', (['"""The trainer start"""'], {}), "('The trainer start')\n", (322, 343), False, 'from lib.helper.logger import logger\n'), ((345, 383), 'setproctitle.setproctitle', 'setproctitle.setproctitle', (['"""faceboxes"""'], {}), "('faceboxes')\n", (370, 383), False, 'import setproctitle\n'), ((500, 551), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (544, 551), True, 'import tensorflow as tf\n'), ((1052, 1100), 'tensorflow.distribute.OneDeviceStrategy', 'tf.distribute.OneDeviceStrategy', ([], {'device': '"""/gpu:0"""'}), "(device='/gpu:0')\n", (1083, 1100), True, 'import tensorflow as tf\n'), ((1597, 1656), 'lib.core.base_trainer.net_work.Train', 'Train', (['epochs', 'enable_function', 'model', 'batch_size', 'strategy'], {}), '(epochs, enable_function, model, batch_size, strategy)\n', (1602, 1656), False, 'from lib.core.base_trainer.net_work import Train\n'), ((1697, 1756), 'lib.dataset.dataietr.DataIter', 'DataIter', (['cfg.DATA.root_path', 'cfg.DATA.train_txt_path', '(True)'], {}), '(cfg.DATA.root_path, cfg.DATA.train_txt_path, True)\n', (1705, 1756), False, 'from lib.dataset.dataietr import FaceBoxesDataIter, DataIter\n'), ((1771, 1829), 'lib.dataset.dataietr.DataIter', 'DataIter', (['cfg.DATA.root_path', 'cfg.DATA.val_txt_path', '(False)'], {}), '(cfg.DATA.root_path, cfg.DATA.val_txt_path, False)\n', (1779, 1829), False, 'from lib.dataset.dataietr import FaceBoxesDataIter, DataIter\n'), ((1914, 2090), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['train_ds'], {'output_types': '(tf.float32, tf.float32, tf.float32)', 'output_shapes': '([None, None, None, None], [None, None, None], [None, None])'}), '(train_ds, output_types=(tf.float32, tf.\n float32, tf.float32), output_shapes=([None, None, None, None], [None,\n None, None], [None, None]))\n', (1944, 2090), True, 'import tensorflow as tf\n'), ((2189, 2364), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['test_ds'], {'output_types': '(tf.float32, tf.float32, tf.float32)', 'output_shapes': '([None, None, None, None], [None, None, None], [None, None])'}), '(test_ds, output_types=(tf.float32, tf.\n float32, tf.float32), output_shapes=([None, None, None, None], [None,\n None, None], [None, None]))\n', (2219, 2364), True, 'import tensorflow as tf\n'), ((1192, 1203), 'lib.core.model.facebox.net.FaceBoxes', 'FaceBoxes', ([], {}), '()\n', (1201, 1203), False, 'from lib.core.model.facebox.net import FaceBoxes\n'), ((1262, 1312), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 512, 512, 3)', 'dtype': 'np.float32'}), '(shape=(1, 512, 512, 3), dtype=np.float32)\n', (1270, 1312), True, 'import numpy as np\n'), ((1432, 1506), 'lib.helper.logger.logger.info', 'logger.info', (["('load pretrained params from %s' % cfg.MODEL.pretrained_model)"], {}), "('load pretrained params from %s' % cfg.MODEL.pretrained_model)\n", (1443, 1506), False, 'from lib.helper.logger import logger\n'), ((774, 824), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (817, 824), True, 'import tensorflow as tf\n'), ((695, 746), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (735, 746), True, 'import tensorflow as tf\n'), ((2966, 3001), 'numpy.array', 'np.array', (['images[i]'], {'dtype': 'np.uint8'}), '(images[i], dtype=np.uint8)\n', (2974, 3001), True, 'import numpy as np\n'), ((3033, 3079), 'cv2.cvtColor', 'cv2.cvtColor', (['example_image', 'cv2.COLOR_BGR2RGB'], {}), '(example_image, cv2.COLOR_BGR2RGB)\n', (3045, 3079), False, 'import cv2\n'), ((3110, 3129), 'numpy.array', 'np.array', (['labels[i]'], {}), '(labels[i])\n', (3118, 3129), True, 'import numpy as np\n'), ((3146, 3182), 'cv2.imshow', 'cv2.imshow', (['"""example"""', 'example_image'], {}), "('example', example_image)\n", (3156, 3182), False, 'import cv2\n'), ((3198, 3212), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3209, 3212), False, 'import cv2\n')] |
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
def normalize_fn(tensor, mean, std):
"""Differentiable version of torchvision.functional.normalize"""
# here we assume the color channel is in at dim=1
mean = mean[None, :, None, None]
std = std[None, :, None, None]
return tensor.sub(mean).div(std)
class NormalizeByChannelMeanStd(nn.Module):
def __init__(self, mean, std):
super(NormalizeByChannelMeanStd, self).__init__()
if not isinstance(mean, torch.Tensor):
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
std = torch.tensor(std)
self.register_buffer("mean", mean)
self.register_buffer("std", std)
def forward(self, tensor):
return normalize_fn(tensor, self.mean, self.std)
def extra_repr(self):
return 'mean={}, std={}'.format(self.mean, self.std)
class ResNet(nn.Module):
def __init__(self, block, num_blocks, cifar="cifar10", num_classes=10, feat_dim=128, low_freq=False, high_freq=False, radius=0, norm_layer=True):
super(ResNet, self).__init__()
self.in_planes = 64
# self.normalize = NormalizeByChannelMeanStd(
# mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if cifar == "cifar10":
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif cifar == "cifar100":
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
elif cifar == "stl10":
mean = (0.4914, 0.4823, 0.4466)
std = (0.247, 0.243, 0.261)
if norm_layer:
self.normalize = NormalizeByChannelMeanStd(
mean=mean, std=std)
else:
self.normalize = nn.Identity()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
# self.linear_contrast = nn.Linear(512*block.expansion, 128)
dim_in = 512*block.expansion
self.head_proj = nn.Sequential(
nn.Linear(dim_in, dim_in),
# nn.BatchNorm1d(dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, 128)
)
self.head_pred = nn.Sequential(
nn.Linear(128, 128),
# nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Linear(128, 128)
)
self.low_freq = low_freq
self.high_freq = high_freq
self.radius = radius
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def distance(self, i, j, imageSize, r):
dis = np.sqrt((i - imageSize / 2) ** 2 + (j - imageSize / 2) ** 2)
if dis < r:
return 1.0
else:
return 0
def mask_radial(self, img, r):
rows, cols = img.shape
mask = torch.zeros((rows, cols))
for i in range(rows):
for j in range(cols):
mask[i, j] = self.distance(i, j, imageSize=rows, r=r)
return mask.cuda()
def filter_low(self, Images, r):
mask = self.mask_radial(torch.zeros([Images.shape[2], Images.shape[3]]), r)
bs, c, h, w = Images.shape
x = Images.reshape([bs*c, h, w])
fd = torch.fft.fftshift(torch.fft.fftn(x, dim=(-2, -1)))
mask = mask.unsqueeze(0).repeat([bs*c, 1, 1])
fd = fd * mask
fd = torch.fft.ifftn(torch.fft.ifftshift(fd), dim=(-2, -1))
fd = torch.real(fd)
fd = fd.reshape([bs, c, h, w])
return fd
def filter_high(self, Images, r):
mask = self.mask_radial(torch.zeros([Images.shape[2], Images.shape[3]]), r)
bs, c, h, w = Images.shape
x = Images.reshape([bs * c, h, w])
fd = torch.fft.fftshift(torch.fft.fftn(x, dim=(-2, -1)))
mask = mask.unsqueeze(0).repeat([bs * c, 1, 1])
fd = fd * (1. - mask)
fd = torch.fft.ifftn(torch.fft.ifftshift(fd), dim=(-2, -1))
fd = torch.real(fd)
fd = fd.reshape([bs, c, h, w])
return fd
# return np.array(Images_freq_low), np.array(Images_freq_high)
def forward(self, x, contrast=False, return_feat=False, CF=False):
# img_org = x[0]
x = self.normalize(x)
if self.low_freq:
x = self.filter_low(x, self.radius)
x = torch.clamp(x, 0, 1)
if self.high_freq:
x = self.filter_high(x, self.radius)
x = torch.clamp(x, 0, 1)
# img_filter = x[0]
# import cv2
# img_org = img_org.detach().cpu().numpy()*255.
# img_filter = img_filter.detach().cpu().numpy()*255.
# cv2.imwrite('org.jpg', img_org.transpose([1,2,0]))
# cv2.imwrite('filter.jpg', img_filter.transpose([1,2,0]))
# exit(0)
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
# out = self.avgpool(out)
out = out.view(out.size(0), -1)
feat = out
if return_feat:
return out
if contrast:
# out = self.linear_contrast(out)
proj = self.head_proj(out)
pred = self.head_pred(proj)
proj = F.normalize(proj, dim=1)
pred = F.normalize(pred, dim=1)
if CF:
return proj, pred, feat
else:
return proj, pred
else:
out = self.linear(out)
return out
def ResNet18(num_class=10, radius=8, low_freq=False, high_freq=False, **kwas):
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_class, radius=radius, low_freq=low_freq, high_freq=high_freq, **kwas)
def ResNet34(num_class=10):
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_class)
def ResNet50(num_class=10):
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_class)
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = ResNet18()
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
# test() | [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"numpy.sqrt",
"torch.nn.Sequential",
"torch.fft.fftn",
"torch.real",
"torch.nn.Conv2d",
"torch.nn.functional.avg_pool2d",
"torch.tensor",
"torch.fft.ifftshift",
"torch.nn.functional.normalize",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.n... | [((482, 568), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=\n False)\n', (491, 568), True, 'import torch.nn as nn\n'), ((583, 605), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (597, 605), True, 'import torch.nn as nn\n'), ((627, 700), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n', (636, 700), True, 'import torch.nn as nn\n'), ((720, 742), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (734, 742), True, 'import torch.nn as nn\n'), ((768, 783), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (781, 783), True, 'import torch.nn as nn\n'), ((1219, 1230), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (1225, 1230), True, 'import torch.nn.functional as F\n'), ((1417, 1472), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(in_planes, planes, kernel_size=1, bias=False)\n', (1426, 1472), True, 'import torch.nn as nn\n'), ((1492, 1514), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1506, 1514), True, 'import torch.nn as nn\n'), ((1536, 1614), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n', (1545, 1614), True, 'import torch.nn as nn\n'), ((1634, 1656), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1648, 1656), True, 'import torch.nn as nn\n'), ((1678, 1747), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(self.expansion * planes)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes, self.expansion * planes, kernel_size=1, bias=False)\n', (1687, 1747), True, 'import torch.nn as nn\n'), ((1765, 1804), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (1779, 1804), True, 'import torch.nn as nn\n'), ((1828, 1843), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1841, 1843), True, 'import torch.nn as nn\n'), ((2327, 2338), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (2333, 2338), True, 'import torch.nn.functional as F\n'), ((4121, 4185), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n', (4130, 4185), True, 'import torch.nn as nn\n'), ((4205, 4223), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (4219, 4223), True, 'import torch.nn as nn\n'), ((4549, 4594), 'torch.nn.Linear', 'nn.Linear', (['(512 * block.expansion)', 'num_classes'], {}), '(512 * block.expansion, num_classes)\n', (4558, 4594), True, 'import torch.nn as nn\n'), ((5205, 5233), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (5225, 5233), True, 'import torch.nn as nn\n'), ((5531, 5553), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (5544, 5553), True, 'import torch.nn as nn\n'), ((5613, 5673), 'numpy.sqrt', 'np.sqrt', (['((i - imageSize / 2) ** 2 + (j - imageSize / 2) ** 2)'], {}), '((i - imageSize / 2) ** 2 + (j - imageSize / 2) ** 2)\n', (5620, 5673), True, 'import numpy as np\n'), ((5833, 5858), 'torch.zeros', 'torch.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (5844, 5858), False, 'import torch\n'), ((6440, 6454), 'torch.real', 'torch.real', (['fd'], {}), '(fd)\n', (6450, 6454), False, 'import torch\n'), ((6945, 6959), 'torch.real', 'torch.real', (['fd'], {}), '(fd)\n', (6955, 6959), False, 'import torch\n'), ((7943, 7963), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', '(4)'], {}), '(out, 4)\n', (7955, 7963), True, 'import torch.nn.functional as F\n'), ((2833, 2851), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (2845, 2851), False, 'import torch\n'), ((2916, 2933), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (2928, 2933), False, 'import torch\n'), ((4085, 4098), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (4096, 4098), True, 'import torch.nn as nn\n'), ((4753, 4778), 'torch.nn.Linear', 'nn.Linear', (['dim_in', 'dim_in'], {}), '(dim_in, dim_in)\n', (4762, 4778), True, 'import torch.nn as nn\n'), ((4830, 4851), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4837, 4851), True, 'import torch.nn as nn\n'), ((4865, 4887), 'torch.nn.Linear', 'nn.Linear', (['dim_in', '(128)'], {}), '(dim_in, 128)\n', (4874, 4887), True, 'import torch.nn as nn\n'), ((4951, 4970), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (4960, 4970), True, 'import torch.nn as nn\n'), ((5019, 5040), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5026, 5040), True, 'import torch.nn as nn\n'), ((5054, 5073), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (5063, 5073), True, 'import torch.nn as nn\n'), ((6089, 6136), 'torch.zeros', 'torch.zeros', (['[Images.shape[2], Images.shape[3]]'], {}), '([Images.shape[2], Images.shape[3]])\n', (6100, 6136), False, 'import torch\n'), ((6249, 6280), 'torch.fft.fftn', 'torch.fft.fftn', (['x'], {'dim': '(-2, -1)'}), '(x, dim=(-2, -1))\n', (6263, 6280), False, 'import torch\n'), ((6388, 6411), 'torch.fft.ifftshift', 'torch.fft.ifftshift', (['fd'], {}), '(fd)\n', (6407, 6411), False, 'import torch\n'), ((6583, 6630), 'torch.zeros', 'torch.zeros', (['[Images.shape[2], Images.shape[3]]'], {}), '([Images.shape[2], Images.shape[3]])\n', (6594, 6630), False, 'import torch\n'), ((6745, 6776), 'torch.fft.fftn', 'torch.fft.fftn', (['x'], {'dim': '(-2, -1)'}), '(x, dim=(-2, -1))\n', (6759, 6776), False, 'import torch\n'), ((6893, 6916), 'torch.fft.ifftshift', 'torch.fft.ifftshift', (['fd'], {}), '(fd)\n', (6912, 6916), False, 'import torch\n'), ((7308, 7328), 'torch.clamp', 'torch.clamp', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (7319, 7328), False, 'import torch\n'), ((7422, 7442), 'torch.clamp', 'torch.clamp', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (7433, 7442), False, 'import torch\n'), ((8270, 8294), 'torch.nn.functional.normalize', 'F.normalize', (['proj'], {'dim': '(1)'}), '(proj, dim=1)\n', (8281, 8294), True, 'import torch.nn.functional as F\n'), ((8314, 8338), 'torch.nn.functional.normalize', 'F.normalize', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (8325, 8338), True, 'import torch.nn.functional as F\n'), ((9088, 9113), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(32)', '(32)'], {}), '(1, 3, 32, 32)\n', (9099, 9113), False, 'import torch\n'), ((905, 996), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', '(self.expansion * planes)'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(in_planes, self.expansion * planes, kernel_size=1, stride=stride,\n bias=False)\n', (914, 996), True, 'import torch.nn as nn\n'), ((1008, 1047), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (1022, 1047), True, 'import torch.nn as nn\n'), ((1965, 2056), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', '(self.expansion * planes)'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(in_planes, self.expansion * planes, kernel_size=1, stride=stride,\n bias=False)\n', (1974, 2056), True, 'import torch.nn as nn\n'), ((2068, 2107), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (2082, 2107), True, 'import torch.nn as nn\n')] |
"""
Unit tests for utility functions in chaco.base
"""
import unittest
from math import sqrt
from numpy import arange, array
from numpy.testing import assert_equal, assert_almost_equal
from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance
class BinSearchTestCase(unittest.TestCase):
def test_ascending_data(self):
ary = arange(10.0)
# inside bounds
self.assert_(bin_search(ary, 0.0, 1) == 0)
self.assert_(bin_search(ary, 5.0, 1) == 5)
self.assert_(bin_search(ary, 9.0, 1) == 9)
# out of bounds
self.assert_(bin_search(ary, 10.0, 1) == -1)
self.assert_(bin_search(ary, -1.0, 1) == -1)
self.assert_(bin_search(ary, 9.00001, 1) == -1)
self.assert_(bin_search(ary, -0.00001, 1) == -1)
# rounding
self.assert_(bin_search(ary, 5.1, 1) == 5)
self.assert_(bin_search(ary, 4.9, 1) == 4)
return
def test_descending_data(self):
ary = arange(10.0, 0.0, -1.0)
# inside bounds
self.assert_(bin_search(ary, 10.0, -1) == 0)
self.assert_(bin_search(ary, 5.0, -1) == 5)
self.assert_(bin_search(ary, 1.0, -1) == 9)
# out of bounds
self.assert_(bin_search(ary, 10.1, -1) == -1)
self.assert_(bin_search(ary, 0.9, -1) == -1)
# rounding
self.assert_(bin_search(ary, 5.1, -1) == 4)
self.assert_(bin_search(ary, 4.9, -1) == 5)
return
class ReverseMap1DTestCase(unittest.TestCase):
def test_ascending(self):
ary = arange(10.0)
rmap = lambda x: reverse_map_1d(ary, x, 'ascending')
# inside bounds
self.assert_(rmap(0.0) == 0)
self.assert_(rmap(5.0) == 5)
self.assert_(rmap(9.0) == 9)
# out of bounds
self.assertRaises(IndexError, rmap, 10.0)
self.assertRaises(IndexError, rmap, -1.0)
# rounding
self.assert_(rmap(3.4) == 3)
self.assert_(rmap(3.5) == 3)
self.assert_(rmap(3.6) == 4)
return
def test_ascending_floor(self):
ary = arange(10.0)
rmap = lambda x: reverse_map_1d(ary, x, 'ascending', floor_only=True)
# test rounding
self.assert_(rmap(3.4) == 3)
self.assert_(rmap(3.5) == 3)
self.assert_(rmap(3.6) == 3)
return
def test_descending(self):
ary = arange(10.0, 0.0, -1.0)
rmap = lambda x: reverse_map_1d(ary, x, 'descending')
# inside bounds
self.assert_(rmap(10.0) == 0)
self.assert_(rmap(5.0) == 5)
self.assert_(rmap(1.0) == 9)
# out of bounds
self.assertRaises(IndexError, rmap, 0.0)
self.assertRaises(IndexError, rmap, 11.0)
# rounding
self.assert_(rmap(8.6) == 1)
self.assert_(rmap(8.5) == 1)
self.assert_(rmap(8.4) == 2)
return
def test_descending_floor(self):
ary = arange(10.0, 0.0, -1.0)
rmap = lambda x: reverse_map_1d(ary, x, 'descending', floor_only=True)
# test rounding
self.assert_(rmap(8.6) == 1)
self.assert_(rmap(8.5) == 1)
self.assert_(rmap(8.4) == 1)
return
class FindRunsTestCase(unittest.TestCase):
def test_find_runs_middle(self):
x = array([0,8,7,8,9,2,3,4,10])
assert_equal(find_runs(x) , [[0], [8], [7,8,9], [2,3,4], [10]])
def test_find_runs_start(self):
x = array([3,4,5,12,9,17])
assert_equal(find_runs(x) , [[3,4,5],[12],[9],[17]])
def test_find_runs_end(self):
x = array([18,23,24,25])
assert_equal(find_runs(x) , [[18],[23,24,25]])
def test_find_runs_offset(self):
# because of the nature of the find_runs algorithm, there may be
# fencepost errors with runs that start at x[1] or x[-2]
x = array([10,12,13,14,28,16])
assert_equal(find_runs(x) , [[10],[12,13,14],[28],[16]])
x = array([10,15,16,17,34])
assert_equal(find_runs(x) , [[10],[15,16,17],[34]])
def test_find_runs_none(self):
x = array([])
assert_equal(find_runs(x) , [])
x = array([12,15,27])
assert_equal(find_runs(x) , [[12],[15],[27]])
def test_find_runs_descending(self):
x = array([30,41,40,39,38,37,12])
assert_equal(find_runs(x, order='descending') , \
[[30], [41,40,39,38,37], [12]])
class PointLineDistanceTestCase(unittest.TestCase):
def test_horizontal_line(self):
p1 = (10.0, 10.0)
p2 = (60.0, 10.0)
test = (35.0, 30.0)
dist = point_line_distance(test, p1, p2)
assert_equal(dist, 20.0)
def test_vertical_line(self):
p1 = (10.0, 10.0)
p2 = (10.0, 60.0)
test = (30.0, 35.0)
dist = point_line_distance(test, p1, p2)
assert_equal(dist, 20.0)
def test_diag_lines(self):
p1 = (0.0, 0.0)
p2 = (10.0, 10.0)
test = (0.0, 5.0)
dist = point_line_distance(test, p1, p2)
assert_almost_equal(dist, 2.5 * sqrt(2.0))
def test_point_on_line(self):
p1 = (-5.0, 5.0)
p2 = (10.0, -10.0)
test = (3.0, -3.0)
dist = point_line_distance(test, p1, p2)
assert_almost_equal(dist, 0.0)
if __name__ == '__main__':
import nose
nose.run()
| [
"chaco.api.reverse_map_1d",
"chaco.api.find_runs",
"numpy.testing.assert_equal",
"chaco.api.point_line_distance",
"math.sqrt",
"numpy.array",
"numpy.testing.assert_almost_equal",
"chaco.api.bin_search",
"nose.run",
"numpy.arange"
] | [((5287, 5297), 'nose.run', 'nose.run', ([], {}), '()\n', (5295, 5297), False, 'import nose\n'), ((362, 374), 'numpy.arange', 'arange', (['(10.0)'], {}), '(10.0)\n', (368, 374), False, 'from numpy import arange, array\n'), ((982, 1005), 'numpy.arange', 'arange', (['(10.0)', '(0.0)', '(-1.0)'], {}), '(10.0, 0.0, -1.0)\n', (988, 1005), False, 'from numpy import arange, array\n'), ((1549, 1561), 'numpy.arange', 'arange', (['(10.0)'], {}), '(10.0)\n', (1555, 1561), False, 'from numpy import arange, array\n'), ((2081, 2093), 'numpy.arange', 'arange', (['(10.0)'], {}), '(10.0)\n', (2087, 2093), False, 'from numpy import arange, array\n'), ((2369, 2392), 'numpy.arange', 'arange', (['(10.0)', '(0.0)', '(-1.0)'], {}), '(10.0, 0.0, -1.0)\n', (2375, 2392), False, 'from numpy import arange, array\n'), ((2914, 2937), 'numpy.arange', 'arange', (['(10.0)', '(0.0)', '(-1.0)'], {}), '(10.0, 0.0, -1.0)\n', (2920, 2937), False, 'from numpy import arange, array\n'), ((3262, 3297), 'numpy.array', 'array', (['[0, 8, 7, 8, 9, 2, 3, 4, 10]'], {}), '([0, 8, 7, 8, 9, 2, 3, 4, 10])\n', (3267, 3297), False, 'from numpy import arange, array\n'), ((3411, 3438), 'numpy.array', 'array', (['[3, 4, 5, 12, 9, 17]'], {}), '([3, 4, 5, 12, 9, 17])\n', (3416, 3438), False, 'from numpy import arange, array\n'), ((3542, 3565), 'numpy.array', 'array', (['[18, 23, 24, 25]'], {}), '([18, 23, 24, 25])\n', (3547, 3565), False, 'from numpy import arange, array\n'), ((3806, 3837), 'numpy.array', 'array', (['[10, 12, 13, 14, 28, 16]'], {}), '([10, 12, 13, 14, 28, 16])\n', (3811, 3837), False, 'from numpy import arange, array\n'), ((3910, 3937), 'numpy.array', 'array', (['[10, 15, 16, 17, 34]'], {}), '([10, 15, 16, 17, 34])\n', (3915, 3937), False, 'from numpy import arange, array\n'), ((4042, 4051), 'numpy.array', 'array', (['[]'], {}), '([])\n', (4047, 4051), False, 'from numpy import arange, array\n'), ((4104, 4123), 'numpy.array', 'array', (['[12, 15, 27]'], {}), '([12, 15, 27])\n', (4109, 4123), False, 'from numpy import arange, array\n'), ((4230, 4265), 'numpy.array', 'array', (['[30, 41, 40, 39, 38, 37, 12]'], {}), '([30, 41, 40, 39, 38, 37, 12])\n', (4235, 4265), False, 'from numpy import arange, array\n'), ((4564, 4597), 'chaco.api.point_line_distance', 'point_line_distance', (['test', 'p1', 'p2'], {}), '(test, p1, p2)\n', (4583, 4597), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((4606, 4630), 'numpy.testing.assert_equal', 'assert_equal', (['dist', '(20.0)'], {}), '(dist, 20.0)\n', (4618, 4630), False, 'from numpy.testing import assert_equal, assert_almost_equal\n'), ((4761, 4794), 'chaco.api.point_line_distance', 'point_line_distance', (['test', 'p1', 'p2'], {}), '(test, p1, p2)\n', (4780, 4794), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((4803, 4827), 'numpy.testing.assert_equal', 'assert_equal', (['dist', '(20.0)'], {}), '(dist, 20.0)\n', (4815, 4827), False, 'from numpy.testing import assert_equal, assert_almost_equal\n'), ((4951, 4984), 'chaco.api.point_line_distance', 'point_line_distance', (['test', 'p1', 'p2'], {}), '(test, p1, p2)\n', (4970, 4984), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((5165, 5198), 'chaco.api.point_line_distance', 'point_line_distance', (['test', 'p1', 'p2'], {}), '(test, p1, p2)\n', (5184, 5198), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((5207, 5237), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['dist', '(0.0)'], {}), '(dist, 0.0)\n', (5226, 5237), False, 'from numpy.testing import assert_equal, assert_almost_equal\n'), ((1587, 1622), 'chaco.api.reverse_map_1d', 'reverse_map_1d', (['ary', 'x', '"""ascending"""'], {}), "(ary, x, 'ascending')\n", (1601, 1622), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((2119, 2171), 'chaco.api.reverse_map_1d', 'reverse_map_1d', (['ary', 'x', '"""ascending"""'], {'floor_only': '(True)'}), "(ary, x, 'ascending', floor_only=True)\n", (2133, 2171), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((2418, 2454), 'chaco.api.reverse_map_1d', 'reverse_map_1d', (['ary', 'x', '"""descending"""'], {}), "(ary, x, 'descending')\n", (2432, 2454), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((2963, 3016), 'chaco.api.reverse_map_1d', 'reverse_map_1d', (['ary', 'x', '"""descending"""'], {'floor_only': '(True)'}), "(ary, x, 'descending', floor_only=True)\n", (2977, 3016), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((3311, 3323), 'chaco.api.find_runs', 'find_runs', (['x'], {}), '(x)\n', (3320, 3323), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((3455, 3467), 'chaco.api.find_runs', 'find_runs', (['x'], {}), '(x)\n', (3464, 3467), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((3584, 3596), 'chaco.api.find_runs', 'find_runs', (['x'], {}), '(x)\n', (3593, 3596), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((3854, 3866), 'chaco.api.find_runs', 'find_runs', (['x'], {}), '(x)\n', (3863, 3866), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((3955, 3967), 'chaco.api.find_runs', 'find_runs', (['x'], {}), '(x)\n', (3964, 3967), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((4073, 4085), 'chaco.api.find_runs', 'find_runs', (['x'], {}), '(x)\n', (4082, 4085), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((4143, 4155), 'chaco.api.find_runs', 'find_runs', (['x'], {}), '(x)\n', (4152, 4155), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((4281, 4313), 'chaco.api.find_runs', 'find_runs', (['x'], {'order': '"""descending"""'}), "(x, order='descending')\n", (4290, 4313), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((420, 443), 'chaco.api.bin_search', 'bin_search', (['ary', '(0.0)', '(1)'], {}), '(ary, 0.0, 1)\n', (430, 443), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((471, 494), 'chaco.api.bin_search', 'bin_search', (['ary', '(5.0)', '(1)'], {}), '(ary, 5.0, 1)\n', (481, 494), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((522, 545), 'chaco.api.bin_search', 'bin_search', (['ary', '(9.0)', '(1)'], {}), '(ary, 9.0, 1)\n', (532, 545), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((597, 621), 'chaco.api.bin_search', 'bin_search', (['ary', '(10.0)', '(1)'], {}), '(ary, 10.0, 1)\n', (607, 621), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((650, 674), 'chaco.api.bin_search', 'bin_search', (['ary', '(-1.0)', '(1)'], {}), '(ary, -1.0, 1)\n', (660, 674), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((703, 730), 'chaco.api.bin_search', 'bin_search', (['ary', '(9.00001)', '(1)'], {}), '(ary, 9.00001, 1)\n', (713, 730), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((759, 785), 'chaco.api.bin_search', 'bin_search', (['ary', '(-1e-05)', '(1)'], {}), '(ary, -1e-05, 1)\n', (769, 785), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((835, 858), 'chaco.api.bin_search', 'bin_search', (['ary', '(5.1)', '(1)'], {}), '(ary, 5.1, 1)\n', (845, 858), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((886, 909), 'chaco.api.bin_search', 'bin_search', (['ary', '(4.9)', '(1)'], {}), '(ary, 4.9, 1)\n', (896, 909), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((1051, 1076), 'chaco.api.bin_search', 'bin_search', (['ary', '(10.0)', '(-1)'], {}), '(ary, 10.0, -1)\n', (1061, 1076), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((1104, 1128), 'chaco.api.bin_search', 'bin_search', (['ary', '(5.0)', '(-1)'], {}), '(ary, 5.0, -1)\n', (1114, 1128), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((1156, 1180), 'chaco.api.bin_search', 'bin_search', (['ary', '(1.0)', '(-1)'], {}), '(ary, 1.0, -1)\n', (1166, 1180), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((1232, 1257), 'chaco.api.bin_search', 'bin_search', (['ary', '(10.1)', '(-1)'], {}), '(ary, 10.1, -1)\n', (1242, 1257), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((1286, 1310), 'chaco.api.bin_search', 'bin_search', (['ary', '(0.9)', '(-1)'], {}), '(ary, 0.9, -1)\n', (1296, 1310), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((1358, 1382), 'chaco.api.bin_search', 'bin_search', (['ary', '(5.1)', '(-1)'], {}), '(ary, 5.1, -1)\n', (1368, 1382), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((1410, 1434), 'chaco.api.bin_search', 'bin_search', (['ary', '(4.9)', '(-1)'], {}), '(ary, 4.9, -1)\n', (1420, 1434), False, 'from chaco.api import bin_search, find_runs, reverse_map_1d, point_line_distance\n'), ((5025, 5034), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (5029, 5034), False, 'from math import sqrt\n')] |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Tests for CAS Pipeline
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import numpy as np
import pandas as pd
import swat
import swat.utils.testing as tm
import unittest
from pipefitter.estimator import DecisionTree, DecisionForest, GBTree
from pipefitter.pipeline import Pipeline, tosequence
from pipefitter.transformer import Imputer
from swat.utils.compat import patch_pandas_sort
from swat.utils.testing import UUID_RE, get_cas_host_type, load_data
patch_pandas_sort()
USER, PASSWD = tm.get_user_pass()
HOST, PORT, PROTOCOL = tm.get_host_port_proto()
# Classification
ctarget = 'Origin'
# Regression
rtarget = 'MSRP'
inputs = ['MPG_City', 'MPG_Highway', 'Length', 'Weight', 'Type', 'Cylinders']
nominals = ['Type', 'Cylinders', 'Origin']
class TestPipelineUtils(tm.TestCase):
def test_tosequence(self):
self.assertEqual(tosequence(('a', 'b', 'c')), ('a', 'b', 'c'))
self.assertEqual(tosequence(['a', 'b', 'c']), ['a', 'b', 'c'])
self.assertEqual(tosequence(iter(('a', 'b', 'c'))), ['a', 'b', 'c'])
self.assertEqual(tosequence('abc'), 'abc')
self.assertEqual(list(tosequence(np.array((1, 2, 3)))),
list(np.asarray(np.array((1, 2, 3)))))
with self.assertRaises(TypeError):
tosequence(4)
class TestPipeline(tm.TestCase):
server_type = None
def setUp(self):
swat.reset_option()
swat.options.cas.print_messages = True
swat.options.interactive_mode = True
self.s = swat.CAS(HOST, PORT, USER, PASSWD, protocol=PROTOCOL)
if type(self).server_type is None:
type(self).server_type = get_cas_host_type(self.s)
self.srcLib = tm.get_casout_lib(self.server_type)
r = tm.load_data(self.s, 'datasources/cars_single.sashdat', self.server_type)
self.table = r['casTable']
def tearDown(self):
# tear down tests
self.s.terminate()
del self.s
swat.reset_option()
def test_basic(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(tbl)
self.assertEqual(model.__class__.__name__, 'PipelineModel')
self.assertEqual(len(model.stages), 3)
self.assertTrue(model[0] is mean_imp)
self.assertTrue(model[1] is mode_imp)
self.assertEqual(model[2].__class__.__name__, 'DecisionTreeModel')
out = model.score(tbl)
self.assertEqual(set(list(out.index)),
set(['Target', 'Level', 'Var', 'NBins', 'NObsUsed',
'TargetCount', 'TargetMiss', 'PredCount', 'PredMiss',
'Event', 'EventCount', 'NonEventCount', 'EventMiss',
'AreaUnderROCCurve', 'CRCut', 'ClassificationCutOff',
'KS', 'KSCutOff', 'MisClassificationRate']))
# Bad item type
with self.assertRaises(TypeError):
Pipeline([mean_imp, mode_imp, 'foo', dtree])
def test_multiple_estimators(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree1 = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
dtree2 = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree1, dtree2])
model = pipe.fit(tbl)
self.assertEqual(model.__class__.__name__, 'PipelineModel')
self.assertEqual(len(model.stages), 4)
self.assertTrue(model[0] is mean_imp)
self.assertTrue(model[1] is mode_imp)
self.assertEqual(model[2].__class__.__name__, 'DecisionTreeModel')
self.assertEqual(model[3].__class__.__name__, 'DecisionTreeModel')
out = model.score(tbl)
self.assertEqual(set(list(out.index)),
set(['DecisionTree', 'DecisionTree1']))
def test_str(self):
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = "Pipeline([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTree(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(str(pipe).replace("u'", "'"), out)
def test_repr(self):
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = "Pipeline([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTree(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(repr(pipe).replace("u'", "'"), out)
def test_model_str(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
model = Pipeline([mean_imp, mode_imp, dtree]).fit(tbl)
out = "PipelineModel([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTreeModel(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(str(model).replace("u'", "'"), out)
def test_model_repr(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
model = Pipeline([mean_imp, mode_imp, dtree]).fit(tbl)
out = "PipelineModel([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTreeModel(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(repr(model).replace("u'", "'"), out)
def test_set_params(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = pipe.fit(tbl).score(tbl)
self.assertEqual(out.loc['Target'], 'Origin')
# Set extra parameters on Pipeline (not on estimator)
pipe.set_params({dtree.target: 'MSRP'})
self.assertEqual(dtree.target, 'Origin')
out = pipe.fit(tbl).score(tbl)
self.assertEqual(out.loc['Target'], 'MSRP')
# Set parameters during fit
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = pipe.fit(tbl).score(tbl)
self.assertEqual(out.loc['Target'], 'Origin')
out = pipe.fit(tbl, {dtree.target: 'MSRP'}).score(tbl)
self.assertEqual(out.loc['Target'], 'MSRP')
def test_transform(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mode_imp, dtree])
self.assertEqual(tbl.nmiss().max(), 2)
out = pipe.transform(tbl)
self.assertEqual(out.__class__.__name__, 'CASTable')
self.assertEqual(tbl.nmiss().max(), 2)
self.assertEqual(out.nmiss().max(), 0)
def test_model_transform(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mode_imp, dtree])
self.assertEqual(tbl.nmiss().max(), 2)
model = pipe.fit(tbl)
out = model.transform(tbl)
self.assertEqual(out.__class__.__name__, 'CASTable')
self.assertEqual(tbl.nmiss().max(), 2)
self.assertEqual(out.nmiss().max(), 0)
def test_getitem(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mode_imp, dtree])
self.assertTrue(pipe[0] is mode_imp)
self.assertTrue(pipe[1] is dtree)
with self.assertRaises(IndexError):
pipe[2]
with self.assertRaises(TypeError):
pipe['foo']
def test_model_getitem(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
model = Pipeline([mode_imp, dtree]).fit(tbl)
self.assertTrue(model[0] is mode_imp)
self.assertTrue(model[1] is not dtree)
self.assertEqual(model[1].__class__.__name__, 'DecisionTreeModel')
with self.assertRaises(IndexError):
model[2]
with self.assertRaises(TypeError):
model['foo']
def test_classification_score(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(tbl)
score = model.score(tbl)
self.assertTrue(isinstance(score, pd.Series))
self.assertEqual(score.loc['Target'], 'Origin')
self.assertEqual(score.loc['Level'], 'CLASS')
self.assertEqual(score.loc['Event'], 'USA')
self.assertEqual(score.loc['NBins'], 100)
self.assertEqual(score.loc['NObsUsed'], 428)
self.assertTrue(isinstance(score.loc['AreaUnderROCCurve'], float))
self.assertTrue(isinstance(score.loc['CRCut'], float))
self.assertTrue(isinstance(score.loc['KS'], float))
self.assertTrue(isinstance(score.loc['KSCutOff'], float))
self.assertTrue(isinstance(score.loc['MisClassificationRate'], float))
def test_regression_score(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='MSRP', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(tbl)
score = model.score(tbl)
self.assertTrue(isinstance(score, pd.Series))
self.assertEqual(score.loc['Target'], 'MSRP')
self.assertEqual(score.loc['Level'], 'INTERVAL')
self.assertEqual(score.loc['NBins'], 100)
self.assertEqual(score.loc['NObsUsed'], 428)
self.assertTrue(isinstance(score.loc['AverageSquaredError'], float))
self.assertTrue(isinstance(score.loc['AverageAbsoluteError'], float))
self.assertTrue(isinstance(score.loc['AverageSquaredLogarithmicError'], float))
self.assertTrue(isinstance(score.loc['RootAverageSquaredError'], float))
self.assertTrue(isinstance(score.loc['RootAverageAbsoluteError'], float))
self.assertTrue(isinstance(score.loc['RootAverageSquaredLogarithmicError'], float))
def test_unload(self):
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='MSRP', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(self.table)
self.assertEqual(model[-1].data.table.tableexists().exists, 1)
model.unload()
self.assertEqual(model[-1].data.table.tableexists().exists, 0)
if __name__ == '__main__':
tm.runtests()
| [
"swat.CAS",
"pipefitter.pipeline.Pipeline",
"pipefitter.transformer.Imputer",
"swat.utils.testing.get_user_pass",
"swat.utils.testing.get_cas_host_type",
"swat.utils.testing.get_host_port_proto",
"swat.utils.compat.patch_pandas_sort",
"numpy.array",
"swat.utils.testing.runtests",
"swat.utils.testi... | [((1135, 1154), 'swat.utils.compat.patch_pandas_sort', 'patch_pandas_sort', ([], {}), '()\n', (1152, 1154), False, 'from swat.utils.compat import patch_pandas_sort\n'), ((1171, 1189), 'swat.utils.testing.get_user_pass', 'tm.get_user_pass', ([], {}), '()\n', (1187, 1189), True, 'import swat.utils.testing as tm\n'), ((1213, 1237), 'swat.utils.testing.get_host_port_proto', 'tm.get_host_port_proto', ([], {}), '()\n', (1235, 1237), True, 'import swat.utils.testing as tm\n'), ((13312, 13325), 'swat.utils.testing.runtests', 'tm.runtests', ([], {}), '()\n', (13323, 13325), True, 'import swat.utils.testing as tm\n'), ((2056, 2075), 'swat.reset_option', 'swat.reset_option', ([], {}), '()\n', (2073, 2075), False, 'import swat\n'), ((2186, 2239), 'swat.CAS', 'swat.CAS', (['HOST', 'PORT', 'USER', 'PASSWD'], {'protocol': 'PROTOCOL'}), '(HOST, PORT, USER, PASSWD, protocol=PROTOCOL)\n', (2194, 2239), False, 'import swat\n'), ((2370, 2405), 'swat.utils.testing.get_casout_lib', 'tm.get_casout_lib', (['self.server_type'], {}), '(self.server_type)\n', (2387, 2405), True, 'import swat.utils.testing as tm\n'), ((2419, 2492), 'swat.utils.testing.load_data', 'tm.load_data', (['self.s', '"""datasources/cars_single.sashdat"""', 'self.server_type'], {}), "(self.s, 'datasources/cars_single.sashdat', self.server_type)\n", (2431, 2492), True, 'import swat.utils.testing as tm\n'), ((2634, 2653), 'swat.reset_option', 'swat.reset_option', ([], {}), '()\n', (2651, 2653), False, 'import swat\n'), ((2726, 2747), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (2733, 2747), False, 'from pipefitter.transformer import Imputer\n'), ((2767, 2788), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (2774, 2788), False, 'from pipefitter.transformer import Imputer\n'), ((2805, 2868), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (2817, 2868), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((2885, 2922), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (2893, 2922), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((3932, 3953), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (3939, 3953), False, 'from pipefitter.transformer import Imputer\n'), ((3973, 3994), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (3980, 3994), False, 'from pipefitter.transformer import Imputer\n'), ((4012, 4075), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (4024, 4075), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((4093, 4156), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (4105, 4156), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((4173, 4219), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree1, dtree2]'], {}), '([mean_imp, mode_imp, dtree1, dtree2])\n', (4181, 4219), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((4796, 4817), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (4803, 4817), False, 'from pipefitter.transformer import Imputer\n'), ((4837, 4858), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (4844, 4858), False, 'from pipefitter.transformer import Imputer\n'), ((4875, 4938), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (4887, 4938), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((4954, 4991), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (4962, 4991), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((5549, 5570), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (5556, 5570), False, 'from pipefitter.transformer import Imputer\n'), ((5590, 5611), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (5597, 5611), False, 'from pipefitter.transformer import Imputer\n'), ((5628, 5691), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (5640, 5691), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((5707, 5744), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (5715, 5744), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((6326, 6347), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (6333, 6347), False, 'from pipefitter.transformer import Imputer\n'), ((6367, 6388), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (6374, 6388), False, 'from pipefitter.transformer import Imputer\n'), ((6405, 6468), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (6417, 6468), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((7124, 7145), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (7131, 7145), False, 'from pipefitter.transformer import Imputer\n'), ((7165, 7186), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (7172, 7186), False, 'from pipefitter.transformer import Imputer\n'), ((7203, 7266), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (7215, 7266), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((7923, 7944), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (7930, 7944), False, 'from pipefitter.transformer import Imputer\n'), ((7964, 7985), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (7971, 7985), False, 'from pipefitter.transformer import Imputer\n'), ((8002, 8065), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (8014, 8065), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((8082, 8119), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (8090, 8119), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((8517, 8554), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (8525, 8554), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((8841, 8862), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (8848, 8862), False, 'from pipefitter.transformer import Imputer\n'), ((8879, 8942), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (8891, 8942), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((8959, 8986), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mode_imp, dtree]'], {}), '([mode_imp, dtree])\n', (8967, 8986), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((9309, 9330), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (9316, 9330), False, 'from pipefitter.transformer import Imputer\n'), ((9347, 9410), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (9359, 9410), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((9427, 9454), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mode_imp, dtree]'], {}), '([mode_imp, dtree])\n', (9435, 9454), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((9799, 9820), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (9806, 9820), False, 'from pipefitter.transformer import Imputer\n'), ((9837, 9900), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (9849, 9900), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((9925, 9952), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mode_imp, dtree]'], {}), '([mode_imp, dtree])\n', (9933, 9952), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((10254, 10275), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (10261, 10275), False, 'from pipefitter.transformer import Imputer\n'), ((10292, 10355), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (10304, 10355), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((10801, 10822), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (10808, 10822), False, 'from pipefitter.transformer import Imputer\n'), ((10842, 10863), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (10849, 10863), False, 'from pipefitter.transformer import Imputer\n'), ((10880, 10943), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""Origin"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='Origin', nominals=nominals, inputs=inputs)\n", (10892, 10943), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((10960, 10997), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (10968, 10997), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((11808, 11829), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (11815, 11829), False, 'from pipefitter.transformer import Imputer\n'), ((11849, 11870), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (11856, 11870), False, 'from pipefitter.transformer import Imputer\n'), ((11887, 11948), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""MSRP"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='MSRP', nominals=nominals, inputs=inputs)\n", (11899, 11948), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((11965, 12002), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (11973, 12002), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((12881, 12902), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MEAN'], {}), '(Imputer.MEAN)\n', (12888, 12902), False, 'from pipefitter.transformer import Imputer\n'), ((12922, 12943), 'pipefitter.transformer.Imputer', 'Imputer', (['Imputer.MODE'], {}), '(Imputer.MODE)\n', (12929, 12943), False, 'from pipefitter.transformer import Imputer\n'), ((12960, 13021), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {'target': '"""MSRP"""', 'nominals': 'nominals', 'inputs': 'inputs'}), "(target='MSRP', nominals=nominals, inputs=inputs)\n", (12972, 13021), False, 'from pipefitter.estimator import DecisionTree, DecisionForest, GBTree\n'), ((13038, 13075), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (13046, 13075), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((1525, 1552), 'pipefitter.pipeline.tosequence', 'tosequence', (["('a', 'b', 'c')"], {}), "(('a', 'b', 'c'))\n", (1535, 1552), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((1596, 1623), 'pipefitter.pipeline.tosequence', 'tosequence', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1606, 1623), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((1744, 1761), 'pipefitter.pipeline.tosequence', 'tosequence', (['"""abc"""'], {}), "('abc')\n", (1754, 1761), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((1954, 1967), 'pipefitter.pipeline.tosequence', 'tosequence', (['(4)'], {}), '(4)\n', (1964, 1967), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((2321, 2346), 'swat.utils.testing.get_cas_host_type', 'get_cas_host_type', (['self.s'], {}), '(self.s)\n', (2338, 2346), False, 'from swat.utils.testing import UUID_RE, get_cas_host_type, load_data\n'), ((3801, 3845), 'pipefitter.pipeline.Pipeline', 'Pipeline', (["[mean_imp, mode_imp, 'foo', dtree]"], {}), "([mean_imp, mode_imp, 'foo', dtree])\n", (3809, 3845), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((6485, 6522), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (6493, 6522), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((7283, 7320), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mean_imp, mode_imp, dtree]'], {}), '([mean_imp, mode_imp, dtree])\n', (7291, 7320), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((10373, 10400), 'pipefitter.pipeline.Pipeline', 'Pipeline', (['[mode_imp, dtree]'], {}), '([mode_imp, dtree])\n', (10381, 10400), False, 'from pipefitter.pipeline import Pipeline, tosequence\n'), ((1811, 1830), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1819, 1830), True, 'import numpy as np\n'), ((1875, 1894), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1883, 1894), True, 'import numpy as np\n')] |
import numpy as np
from rsrespic.utilities import constants
from numpy import exp, sin, einsum
import numba
pi = np.pi
q = constants.cgs_constants['q']
c = constants.cgs_constants['c']
## Convert units to cgs from mks
class sine_transform_2D(object):
def __init__(self):
self.name = '2-d electrostatic solver using sin transform'
@numba.jit
def compute_grad_psi(self, fields, particles):
kappa_1 = particles.bunch_charge / particles.N
M = fields.n_modes_x
N = fields.n_modes_y
Lx = fields.L_x
Ly = fields.L_y
## shift the particles into the domain for computing the fiels
x = particles.x + Lx / 2.
y = particles.y + Ly / 2.
arr_x = np.exp(1.j *np.pi *x /Lx)
arr_y = np.exp(1.j *np.pi *y /Ly)
E_x = 0.0 *x # field components at the locations of the particles
E_y = 0.0 *y
for i_m in np.arange(M):
m = i_m +1
tmp_x = arr_x**m
tx_r = np.real(tmp_x)
tx_i = np.imag(tmp_x)
for i_n in np.arange(N):
n = i_n +1
tmp_y = arr_y**n
ty_r = np.real(tmp_y)
ty_i = np.imag(tmp_y)
c_mn_rho = np.sum(tx_i *ty_i)
c_mn_rho *= 4./ (Lx *Ly)
c_mn_phi = 4. *np.pi * kappa_1 *c_mn_rho / ( (m*np.pi/Lx)*(m*np.pi/Lx) +(n*np.pi/Ly)*(n*np.pi/Ly) ) # "-" from the Laplacian, "-" from the -4pi in the rhs
E_x -= (m*np.pi/Lx) *c_mn_phi *tx_r *ty_i # *np.cos(m *np.pi *x[ix] /Lx) *np.sin(n *np.pi *y[iy] /Ly) # -d/dx
E_y -= (n*np.pi/Ly) *c_mn_phi *tx_i *ty_r # *np.sin(m *np.pi *x[ix] /Lx) *np.cos(n *np.pi *y[iy] /Ly) # -d/dy
fields.psi_x = E_x / (particles.gamma ** 2.) ## statC^2 s^2 / cm^3
fields.psi_y = E_y / (particles.gamma ** 2.) ## statC^2 s^2 / cm^3
class field_solver_2D(object):
def __init__(self):
self.name = '2-d electrostatic field solver'
def compute_mode_coefficients(self, fields, particles):
## Setup the coefficients for the
kx1 = np.einsum('m, p -> mp', fields.k_x_vector, particles.x)
ky1 = np.einsum('n, p -> np', fields.k_y_vector, particles.y)
trash, kx_mat = np.meshgrid(particles.x, fields.k_x_vector) ## 1/cm
trash, ky_mat = np.meshgrid(particles.y, fields.k_y_vector) ## 1/cm
exp_x = np.exp(1j * kx1) * particles.lambda_twiddle(kx_mat, particles.x_extent) / fields.lambda_x_0 ## no units
exp_y = np.exp(1j * ky1) * particles.lambda_twiddle(ky_mat, particles.y_extent) / fields.lambda_y_0 ## no units
ptcl_exponential = np.einsum('mp, np -> mn', exp_x, exp_y) ## no units
unscalled_coefficients = einsum('xy, xy -> xy', ptcl_exponential, fields.k_sq_inv) ## no units
fields.mode_coefficients = - unscalled_coefficients * particles.weight * 4. * pi * q * np.sqrt(2)/ ((particles.gamma ** 2) ) ## statC s / cm
return
def compute_phi_mesh(self, fields, **kwargs):
if "xmax" in kwargs:
xmax = kwargs["xmax"]
else:
xmax = fields.lambda_x_0
if "ymax" in kwargs:
ymax = kwargs["ymax"]
else:
ymax = fields.lambda_y_0
if "n_grid" in kwargs:
n_grid = kwargs["n_grid"]
else:
n_grid = 10
xarray = np.linspace(-xmax, xmax, n_grid)
yarray = np.linspace(-ymax, ymax, n_grid)
XX, YY = np.meshgrid(xarray, yarray)
phi = fields.mode_coefficients
#statcolomb s / cm
kx4 = np.einsum('m,i -> mi', fields.k_x_vector, xarray) ## no units
ky4 = np.einsum('n,j -> nj', fields.k_y_vector, yarray) ## no units
exp_x = np.exp(-1j * kx4) ## no units
exp_y = np.exp(-1j * ky4) ## no units
#Field components are exp(-i(kxx + kyy))
phi_modes = np.einsum('mi, nj -> mnij', exp_x, exp_y) ## no units
#now multiply by the sigma matrix, component-wise - using shared mn indices
phi_vals = np.einsum('mn,mnij->ij',phi, phi_modes) ## statC s / cm
# statcolomb s / cm
fields.phi_grid = phi_vals - np.min(phi_vals)
fields.x_grid = XX
fields.y_grid = YY
return
def compute_psi_particles(self, fields, particles):
phi = fields.mode_coefficients
## statC s / cm
kx4 = np.einsum('m,i -> mi', fields.k_x_vector, particles.x) ## no units
ky4 = np.einsum('n,i -> ni', fields.k_y_vector, particles.y) ## no units
exp_x = np.exp(-1j * kx4) ## no units
exp_y = np.exp(-1j * ky4) ## no units
modes = np.einsum('mp, np -> mnp', exp_x, exp_y) ## no units
psi_vals = np.einsum('mn, mnp -> p', phi, modes)
fields.psi_vals = psi_vals
return
def compute_grad_psi(self, fields, particles):
phi = fields.mode_coefficients
## statC s / cm
kx4 = np.einsum('m,i -> mi', fields.k_x_vector, particles.x) ## no units
ky4 = np.einsum('n,i -> ni', fields.k_y_vector, particles.y) ## no units
exp_x = np.exp(-1j * kx4) ## no units
exp_y = np.exp(-1j * ky4) ## no units
kick_modes = np.einsum('mp, np -> mnp', exp_x, exp_y) ## no units
grad_psi_x = np.einsum('mn, m, mnp -> p', phi, -1j * fields.k_x_vector, kick_modes) ## statC s / cm^2
grad_psi_y = np.einsum('mn, n, mnp -> p', phi, -1j * fields.k_y_vector, kick_modes) ## statC s / cm^2
fields.psi_x = np.real(grad_psi_x) ## statC^2 s^2 / cm^3
fields.psi_y = np.real(grad_psi_y) ## statC^2 s^2 / cm^3
return
class symplectic_maps:
def __init__(self, name = 'maps'):
self.name = name
def space_charge_kick_2D_sine(self, fields, particles, ds = 0.0):
## compute the fields from the particles using the sin transform
fields.solver.compute_grad_psi(fields, particles)
## compute the kick
kick_x = fields.psi_x * particles.charge * particles.weight / (particles.beta * c) ## statC^2 s^2 / cm^3
kick_y = fields.psi_y * particles.charge * particles.weight / (particles.beta * c) ## statC^2 s^2 / cm^3
## apply the kick
particles.px = particles.px + kick_x * ds ##statC^2 s^2 / cm^2 --> This should be statC^2 s / cm^2
particles.py = particles.py + kick_y * ds ##statC^2 s^2 / cm^2
def space_charge_kick_2D(self, fields, particles, ds = 0.0):
## compute mode coefficients
fields.solver.compute_mode_coefficients(fields, particles)
## compute the field gradients
fields.solver.compute_grad_psi(fields, particles) ## statC s / cm^2
## compute the kick
kick_x = fields.psi_x * particles.charge * particles.weight / (particles.beta * c) ## statC^2 s^2 / cm^3
kick_y = fields.psi_y * particles.charge * particles.weight / (particles.beta * c) ## statC^2 s^2 / cm^3
## apply the kick
particles.px = particles.px + kick_x * ds ##statC^2 s^2 / cm^2 --> This should be statC^2 s / cm^2
particles.py = particles.py + kick_y * ds ##statC^2 s^2 / cm^2
#return fields, particles
def drift(self, particles, ds = 0.0):
## Compute the normalizing factor for the momentum
argument = np.sqrt( (particles.beta * particles.p_xi) **2 - particles.px **2
- particles.py**2 - (particles.m_0 * particles.weight * c)**2)
## update particle positoins
particles.x = particles.x + particles.px / argument * ds
particles.y = particles.y + particles.py / argument * ds
def thin_quad(self, fields, particles, ds = 0.0, kappa = 0.0):
## Thin quad has no drift
dx_ds = 0.
dy_ds = 0.
dz_ds = 0.
## assume a positron convention here kappa is the same as elegant K1
dpx = kappa * particles.x * ds * (particles.pz / 10000.)
dpy = - kappa * particles.y * ds * (particles.pz / 10000.)
particles.px += dpx
particles.py += dpy
def reset_modes(self, fields, particles):
sigma_x = np.std(particles.x)
sigma_y = np.std(particles.y)
#fields.L_y =
return
| [
"numpy.sqrt",
"numpy.min",
"numpy.exp",
"numpy.real",
"numpy.linspace",
"numpy.sum",
"numpy.einsum",
"numpy.std",
"numpy.meshgrid",
"numpy.imag",
"numpy.arange"
] | [((674, 703), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * x / Lx)'], {}), '(1.0j * np.pi * x / Lx)\n', (680, 703), True, 'import numpy as np\n'), ((711, 740), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * y / Ly)'], {}), '(1.0j * np.pi * y / Ly)\n', (717, 740), True, 'import numpy as np\n'), ((839, 851), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (848, 851), True, 'import numpy as np\n'), ((1878, 1933), 'numpy.einsum', 'np.einsum', (['"""m, p -> mp"""', 'fields.k_x_vector', 'particles.x'], {}), "('m, p -> mp', fields.k_x_vector, particles.x)\n", (1887, 1933), True, 'import numpy as np\n'), ((1942, 1997), 'numpy.einsum', 'np.einsum', (['"""n, p -> np"""', 'fields.k_y_vector', 'particles.y'], {}), "('n, p -> np', fields.k_y_vector, particles.y)\n", (1951, 1997), True, 'import numpy as np\n'), ((2017, 2060), 'numpy.meshgrid', 'np.meshgrid', (['particles.x', 'fields.k_x_vector'], {}), '(particles.x, fields.k_x_vector)\n', (2028, 2060), True, 'import numpy as np\n'), ((2087, 2130), 'numpy.meshgrid', 'np.meshgrid', (['particles.y', 'fields.k_y_vector'], {}), '(particles.y, fields.k_y_vector)\n', (2098, 2130), True, 'import numpy as np\n'), ((2391, 2430), 'numpy.einsum', 'np.einsum', (['"""mp, np -> mn"""', 'exp_x', 'exp_y'], {}), "('mp, np -> mn', exp_x, exp_y)\n", (2400, 2430), True, 'import numpy as np\n'), ((2471, 2528), 'numpy.einsum', 'einsum', (['"""xy, xy -> xy"""', 'ptcl_exponential', 'fields.k_sq_inv'], {}), "('xy, xy -> xy', ptcl_exponential, fields.k_sq_inv)\n", (2477, 2528), False, 'from numpy import exp, sin, einsum\n'), ((3012, 3044), 'numpy.linspace', 'np.linspace', (['(-xmax)', 'xmax', 'n_grid'], {}), '(-xmax, xmax, n_grid)\n', (3023, 3044), True, 'import numpy as np\n'), ((3056, 3088), 'numpy.linspace', 'np.linspace', (['(-ymax)', 'ymax', 'n_grid'], {}), '(-ymax, ymax, n_grid)\n', (3067, 3088), True, 'import numpy as np\n'), ((3100, 3127), 'numpy.meshgrid', 'np.meshgrid', (['xarray', 'yarray'], {}), '(xarray, yarray)\n', (3111, 3127), True, 'import numpy as np\n'), ((3192, 3241), 'numpy.einsum', 'np.einsum', (['"""m,i -> mi"""', 'fields.k_x_vector', 'xarray'], {}), "('m,i -> mi', fields.k_x_vector, xarray)\n", (3201, 3241), True, 'import numpy as np\n'), ((3262, 3311), 'numpy.einsum', 'np.einsum', (['"""n,j -> nj"""', 'fields.k_y_vector', 'yarray'], {}), "('n,j -> nj', fields.k_y_vector, yarray)\n", (3271, 3311), True, 'import numpy as np\n'), ((3335, 3354), 'numpy.exp', 'np.exp', (['(-1.0j * kx4)'], {}), '(-1.0j * kx4)\n', (3341, 3354), True, 'import numpy as np\n'), ((3375, 3394), 'numpy.exp', 'np.exp', (['(-1.0j * ky4)'], {}), '(-1.0j * ky4)\n', (3381, 3394), True, 'import numpy as np\n'), ((3463, 3504), 'numpy.einsum', 'np.einsum', (['"""mi, nj -> mnij"""', 'exp_x', 'exp_y'], {}), "('mi, nj -> mnij', exp_x, exp_y)\n", (3472, 3504), True, 'import numpy as np\n'), ((3609, 3649), 'numpy.einsum', 'np.einsum', (['"""mn,mnij->ij"""', 'phi', 'phi_modes'], {}), "('mn,mnij->ij', phi, phi_modes)\n", (3618, 3649), True, 'import numpy as np\n'), ((3905, 3959), 'numpy.einsum', 'np.einsum', (['"""m,i -> mi"""', 'fields.k_x_vector', 'particles.x'], {}), "('m,i -> mi', fields.k_x_vector, particles.x)\n", (3914, 3959), True, 'import numpy as np\n'), ((3980, 4034), 'numpy.einsum', 'np.einsum', (['"""n,i -> ni"""', 'fields.k_y_vector', 'particles.y'], {}), "('n,i -> ni', fields.k_y_vector, particles.y)\n", (3989, 4034), True, 'import numpy as np\n'), ((4058, 4077), 'numpy.exp', 'np.exp', (['(-1.0j * kx4)'], {}), '(-1.0j * kx4)\n', (4064, 4077), True, 'import numpy as np\n'), ((4098, 4117), 'numpy.exp', 'np.exp', (['(-1.0j * ky4)'], {}), '(-1.0j * ky4)\n', (4104, 4117), True, 'import numpy as np\n'), ((4139, 4179), 'numpy.einsum', 'np.einsum', (['"""mp, np -> mnp"""', 'exp_x', 'exp_y'], {}), "('mp, np -> mnp', exp_x, exp_y)\n", (4148, 4179), True, 'import numpy as np\n'), ((4206, 4243), 'numpy.einsum', 'np.einsum', (['"""mn, mnp -> p"""', 'phi', 'modes'], {}), "('mn, mnp -> p', phi, modes)\n", (4215, 4243), True, 'import numpy as np\n'), ((4397, 4451), 'numpy.einsum', 'np.einsum', (['"""m,i -> mi"""', 'fields.k_x_vector', 'particles.x'], {}), "('m,i -> mi', fields.k_x_vector, particles.x)\n", (4406, 4451), True, 'import numpy as np\n'), ((4472, 4526), 'numpy.einsum', 'np.einsum', (['"""n,i -> ni"""', 'fields.k_y_vector', 'particles.y'], {}), "('n,i -> ni', fields.k_y_vector, particles.y)\n", (4481, 4526), True, 'import numpy as np\n'), ((4550, 4569), 'numpy.exp', 'np.exp', (['(-1.0j * kx4)'], {}), '(-1.0j * kx4)\n', (4556, 4569), True, 'import numpy as np\n'), ((4590, 4609), 'numpy.exp', 'np.exp', (['(-1.0j * ky4)'], {}), '(-1.0j * ky4)\n', (4596, 4609), True, 'import numpy as np\n'), ((4636, 4676), 'numpy.einsum', 'np.einsum', (['"""mp, np -> mnp"""', 'exp_x', 'exp_y'], {}), "('mp, np -> mnp', exp_x, exp_y)\n", (4645, 4676), True, 'import numpy as np\n'), ((4705, 4777), 'numpy.einsum', 'np.einsum', (['"""mn, m, mnp -> p"""', 'phi', '(-1.0j * fields.k_x_vector)', 'kick_modes'], {}), "('mn, m, mnp -> p', phi, -1.0j * fields.k_x_vector, kick_modes)\n", (4714, 4777), True, 'import numpy as np\n'), ((4809, 4881), 'numpy.einsum', 'np.einsum', (['"""mn, n, mnp -> p"""', 'phi', '(-1.0j * fields.k_y_vector)', 'kick_modes'], {}), "('mn, n, mnp -> p', phi, -1.0j * fields.k_y_vector, kick_modes)\n", (4818, 4881), True, 'import numpy as np\n'), ((4916, 4935), 'numpy.real', 'np.real', (['grad_psi_x'], {}), '(grad_psi_x)\n', (4923, 4935), True, 'import numpy as np\n'), ((4977, 4996), 'numpy.real', 'np.real', (['grad_psi_y'], {}), '(grad_psi_y)\n', (4984, 4996), True, 'import numpy as np\n'), ((6570, 6708), 'numpy.sqrt', 'np.sqrt', (['((particles.beta * particles.p_xi) ** 2 - particles.px ** 2 - particles.py **\n 2 - (particles.m_0 * particles.weight * c) ** 2)'], {}), '((particles.beta * particles.p_xi) ** 2 - particles.px ** 2 - \n particles.py ** 2 - (particles.m_0 * particles.weight * c) ** 2)\n', (6577, 6708), True, 'import numpy as np\n'), ((7287, 7306), 'numpy.std', 'np.std', (['particles.x'], {}), '(particles.x)\n', (7293, 7306), True, 'import numpy as np\n'), ((7319, 7338), 'numpy.std', 'np.std', (['particles.y'], {}), '(particles.y)\n', (7325, 7338), True, 'import numpy as np\n'), ((900, 914), 'numpy.real', 'np.real', (['tmp_x'], {}), '(tmp_x)\n', (907, 914), True, 'import numpy as np\n'), ((926, 940), 'numpy.imag', 'np.imag', (['tmp_x'], {}), '(tmp_x)\n', (933, 940), True, 'import numpy as np\n'), ((956, 968), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (965, 968), True, 'import numpy as np\n'), ((3719, 3735), 'numpy.min', 'np.min', (['phi_vals'], {}), '(phi_vals)\n', (3725, 3735), True, 'import numpy as np\n'), ((1020, 1034), 'numpy.real', 'np.real', (['tmp_y'], {}), '(tmp_y)\n', (1027, 1034), True, 'import numpy as np\n'), ((1047, 1061), 'numpy.imag', 'np.imag', (['tmp_y'], {}), '(tmp_y)\n', (1054, 1061), True, 'import numpy as np\n'), ((1078, 1097), 'numpy.sum', 'np.sum', (['(tx_i * ty_i)'], {}), '(tx_i * ty_i)\n', (1084, 1097), True, 'import numpy as np\n'), ((2151, 2169), 'numpy.exp', 'np.exp', (['(1.0j * kx1)'], {}), '(1.0j * kx1)\n', (2157, 2169), True, 'import numpy as np\n'), ((2265, 2283), 'numpy.exp', 'np.exp', (['(1.0j * ky1)'], {}), '(1.0j * ky1)\n', (2271, 2283), True, 'import numpy as np\n'), ((2633, 2643), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2640, 2643), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import inspect
import os
import sys
import unittest
import numpy as np
import pandas as pd
# Workaround to import tools module
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import metrics
class TestMetricBehavior(unittest.TestCase):
"""
Test cases:
- There should be no NaN in the output of *_ignoring_nans metrics
"""
def setUp(self):
"""
Loads data for testing
Returns:
None
"""
self.all_metrics = inspect.getmembers(metrics, inspect.isfunction)
# Tuples of vectors to test (y_true, y_estimated)
self.data_tuples = [
# Data Types
# Only numpy one
(np.arange(1, 6), np.array([6, 4, 7, 1, 2])),
# Only pandas
(pd.Series(data=[2, 3, 5, 3, 2]), pd.Series(data=[2, 13, 53, 34, 2])),
# Mixed
(np.arange(1, 6), pd.Series(data=[2, 13, 53, 34, 2])),
# Zeros
# Zeros in target
(np.array([1, 2, 0]), np.array([2, 3, 4])),
# Zeros in estimated
(np.array([1, 2, 1]), np.array([2, 0, 4])),
# Zeros in target and estimated
(np.array([1, 0, 1]), np.array([2, 0, 4])),
# NaNs
# NaNs in target
(np.array([1, 2, np.nan]), np.array([2, 3, 4])),
# NaNs in estimated
(np.array([1, 2, 4]), np.array([2, 3, np.nan])),
# NaNs in target and estimated
(np.array([1, 2, np.nan]), np.array([2, 3, np.nan])),
]
def tearDown(self):
"""
Delete loaded data
Returns:
None
"""
pass
# Test if we return np.nan
def test_all_ignoring_nans_metrics_for_numpy_nans(self):
for metric_name, metric_callable in self.all_metrics:
if 'ignoring_nans' in metric_name and metric_name != 'mean_absolute_scaled_error_ignoring_nans':
for i, (y_true, y_estimated) in enumerate(self.data_tuples):
with self.subTest(i=i):
# Percentage errors will return NaN for 0/0
# noinspection PyTypeChecker
# Find if common zeros
common_zeros_indices = np.intersect1d(np.nonzero(y_true == 0), np.nonzero(y_estimated == 0))
# Disable inspection if common zeros for percentage metrics
if 'percentage' in metric_name and common_zeros_indices.size != 0:
continue
return_is_not_nan = ~np.isnan(metric_callable(y_true=y_true,
y_estimated=y_estimated))
self.assertTrue(expr=return_is_not_nan, msg='{} returns NaN'
' for y_true: {},'
' y_estimated: {}'.format(metric_name,
y_true,
y_estimated))
# # Test if we return None
def test_all_ignoring_nans_metrics_for_None(self):
for metric_name, metric_callable in self.all_metrics:
if 'ignoring_nans' in metric_name and metric_name != 'mean_absolute_scaled_error_ignoring_nans':
for i, (y_true, y_estimated) in enumerate(self.data_tuples):
with self.subTest(i=i):
self.assertIsNotNone(obj=metric_callable(y_true=y_true,
y_estimated=y_estimated),
msg='{} returns None'
' for y_true: {},'
' y_estimated: {}'.format(metric_name,
y_true,
y_estimated))
if __name__ == '__main__':
unittest.main()
| [
"pandas.Series",
"inspect.getmembers",
"numpy.arange",
"os.path.join",
"os.getcwd",
"numpy.array",
"numpy.nonzero",
"unittest.main",
"os.path.expanduser"
] | [((4377, 4392), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4390, 4392), False, 'import unittest\n'), ((312, 352), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', 'PACKAGE_PARENT'], {}), '(SCRIPT_DIR, PACKAGE_PARENT)\n', (324, 352), False, 'import os\n'), ((662, 709), 'inspect.getmembers', 'inspect.getmembers', (['metrics', 'inspect.isfunction'], {}), '(metrics, inspect.isfunction)\n', (680, 709), False, 'import inspect\n'), ((234, 245), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (243, 245), False, 'import os\n'), ((247, 275), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (265, 275), False, 'import os\n'), ((864, 879), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (873, 879), True, 'import numpy as np\n'), ((881, 906), 'numpy.array', 'np.array', (['[6, 4, 7, 1, 2]'], {}), '([6, 4, 7, 1, 2])\n', (889, 906), True, 'import numpy as np\n'), ((948, 979), 'pandas.Series', 'pd.Series', ([], {'data': '[2, 3, 5, 3, 2]'}), '(data=[2, 3, 5, 3, 2])\n', (957, 979), True, 'import pandas as pd\n'), ((981, 1015), 'pandas.Series', 'pd.Series', ([], {'data': '[2, 13, 53, 34, 2]'}), '(data=[2, 13, 53, 34, 2])\n', (990, 1015), True, 'import pandas as pd\n'), ((1051, 1066), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (1060, 1066), True, 'import numpy as np\n'), ((1068, 1102), 'pandas.Series', 'pd.Series', ([], {'data': '[2, 13, 53, 34, 2]'}), '(data=[2, 13, 53, 34, 2])\n', (1077, 1102), True, 'import pandas as pd\n'), ((1169, 1188), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (1177, 1188), True, 'import numpy as np\n'), ((1190, 1209), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (1198, 1209), True, 'import numpy as np\n'), ((1258, 1277), 'numpy.array', 'np.array', (['[1, 2, 1]'], {}), '([1, 2, 1])\n', (1266, 1277), True, 'import numpy as np\n'), ((1279, 1298), 'numpy.array', 'np.array', (['[2, 0, 4]'], {}), '([2, 0, 4])\n', (1287, 1298), True, 'import numpy as np\n'), ((1358, 1377), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (1366, 1377), True, 'import numpy as np\n'), ((1379, 1398), 'numpy.array', 'np.array', (['[2, 0, 4]'], {}), '([2, 0, 4])\n', (1387, 1398), True, 'import numpy as np\n'), ((1463, 1487), 'numpy.array', 'np.array', (['[1, 2, np.nan]'], {}), '([1, 2, np.nan])\n', (1471, 1487), True, 'import numpy as np\n'), ((1489, 1508), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (1497, 1508), True, 'import numpy as np\n'), ((1556, 1575), 'numpy.array', 'np.array', (['[1, 2, 4]'], {}), '([1, 2, 4])\n', (1564, 1575), True, 'import numpy as np\n'), ((1577, 1601), 'numpy.array', 'np.array', (['[2, 3, np.nan]'], {}), '([2, 3, np.nan])\n', (1585, 1601), True, 'import numpy as np\n'), ((1660, 1684), 'numpy.array', 'np.array', (['[1, 2, np.nan]'], {}), '([1, 2, np.nan])\n', (1668, 1684), True, 'import numpy as np\n'), ((1686, 1710), 'numpy.array', 'np.array', (['[2, 3, np.nan]'], {}), '([2, 3, np.nan])\n', (1694, 1710), True, 'import numpy as np\n'), ((2462, 2485), 'numpy.nonzero', 'np.nonzero', (['(y_true == 0)'], {}), '(y_true == 0)\n', (2472, 2485), True, 'import numpy as np\n'), ((2487, 2515), 'numpy.nonzero', 'np.nonzero', (['(y_estimated == 0)'], {}), '(y_estimated == 0)\n', (2497, 2515), True, 'import numpy as np\n')] |
import cv2
from math import ceil
import numpy as np
def floyd_steinberg_dither(img):
'''Applies the Floyd-Steinberf dithering to img, in place.
img is expected to be a 8-bit grayscale image.
Algorithm borrowed from wikipedia.org/wiki/Floyd%E2%80%93Steinberg_dithering.
'''
h, w = img.shape
def adjust_pixel(y, x, delta):
if y < 0 or y >= h or x < 0 or x >= w:
return
img[y][x] = min(255, max(0, img[y][x] + delta))
for y in range(h):
for x in range(w):
new_val = 255 if img[y][x] > 127 else 0
err = img[y][x] - new_val
img[y][x] = new_val
adjust_pixel(y, x + 1, err * 7/16)
adjust_pixel(y + 1, x - 1, err * 3/16)
adjust_pixel(y + 1, x, err * 5/16)
adjust_pixel(y + 1, x + 1, err * 1/16)
return img
def halfdone_dither(img):
'''Applies Haltone dithering using different sized circles
Algorithm is borrowed from https://github.com/GravO8/halftone
'''
def square_avg_value(square):
'''
Calculates the average grayscale value of the pixels in a square of the
original image
Argument:
square: List of N lists, each with N integers whose value is between 0
and 255
'''
sum = 0
n = 0
for row in square:
for pixel in row:
sum += pixel
n += 1
return sum/n
side = 4
jump = 4 # Todo: make this configurable
alpha = 3
height, width = img.shape
if not jump:
jump = ceil(min(height, height)*0.007)
assert jump > 0, "jump must be greater than 0"
height_output, width_output = side*ceil(height/jump), side*ceil(width/jump)
canvas = np.zeros((height_output, width_output, 3), np.uint8)
output_square = np.zeros((side, side, 3), np.uint8)
x_output, y_output = 0, 0
for y in range(0, height, jump):
for x in range(0, width, jump):
output_square[:] = (255, 255, 255)
intensity = 1 - square_avg_value(img[y:y+jump, x:x+jump])/255
radius = int(alpha*intensity*side/2)
if radius > 0:
# draw a circle
cv2.circle(
output_square,
center=(side//2, side//2),
radius=radius,
color=(0, 0, 0),
thickness=-1,
lineType=cv2.FILLED
)
# place the square on the canvas
canvas[y_output:y_output+side,
x_output:x_output+side] = output_square
x_output += side
y_output += side
x_output = 0
return canvas
def read_img(
filename,
print_width,
logger,
img_binarization_algo,
show_preview):
im = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
height = im.shape[0]
width = im.shape[1]
factor = print_width / width
resized = cv2.resize(
im,
(
int(width * factor),
int(height * factor)
),
interpolation=cv2.INTER_AREA)
if img_binarization_algo == 'floyd-steinberg':
logger.info('⏳ Applying Floyd-Steinberg dithering to image...')
resized = floyd_steinberg_dither(resized)
logger.info('✅ Done.')
resized = resized > 127
elif img_binarization_algo == 'halftone':
logger.info('⏳ Applying halftone dithering to image...')
resized = halfdone_dither(resized)
logger.info('✅ Done.')
resized = resized > 127
elif img_binarization_algo == 'mean-threshold':
resized = resized > resized.mean()
else:
logger.error(
f'🛑 Unknown image binarization algorithm: {img_binarization_algo}')
raise RuntimeError(
f'unknown image binarization algorithm: {img_binarization_algo}')
if show_preview:
# Convert from our boolean representation to float.
preview_img = resized.astype(float)
cv2.imshow('Preview', preview_img)
logger.info('ℹ️ Displaying preview.')
# Calling waitKey(1) tells OpenCV to process its GUI events and actually display our image.
cv2.waitKey(1)
if input('🤔 Go ahead with print? [Y/n]? ').lower() == 'n':
logger.info('🛑 Aborted print.')
return None
# Invert the image before returning it.
return ~resized
| [
"math.ceil",
"cv2.imshow",
"numpy.zeros",
"cv2.circle",
"cv2.waitKey",
"cv2.imread"
] | [((1780, 1832), 'numpy.zeros', 'np.zeros', (['(height_output, width_output, 3)', 'np.uint8'], {}), '((height_output, width_output, 3), np.uint8)\n', (1788, 1832), True, 'import numpy as np\n'), ((1853, 1888), 'numpy.zeros', 'np.zeros', (['(side, side, 3)', 'np.uint8'], {}), '((side, side, 3), np.uint8)\n', (1861, 1888), True, 'import numpy as np\n'), ((2873, 2915), 'cv2.imread', 'cv2.imread', (['filename', 'cv2.IMREAD_GRAYSCALE'], {}), '(filename, cv2.IMREAD_GRAYSCALE)\n', (2883, 2915), False, 'import cv2\n'), ((4062, 4096), 'cv2.imshow', 'cv2.imshow', (['"""Preview"""', 'preview_img'], {}), "('Preview', preview_img)\n", (4072, 4096), False, 'import cv2\n'), ((4252, 4266), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4263, 4266), False, 'import cv2\n'), ((1726, 1745), 'math.ceil', 'ceil', (['(height / jump)'], {}), '(height / jump)\n', (1730, 1745), False, 'from math import ceil\n'), ((1750, 1768), 'math.ceil', 'ceil', (['(width / jump)'], {}), '(width / jump)\n', (1754, 1768), False, 'from math import ceil\n'), ((2241, 2368), 'cv2.circle', 'cv2.circle', (['output_square'], {'center': '(side // 2, side // 2)', 'radius': 'radius', 'color': '(0, 0, 0)', 'thickness': '(-1)', 'lineType': 'cv2.FILLED'}), '(output_square, center=(side // 2, side // 2), radius=radius,\n color=(0, 0, 0), thickness=-1, lineType=cv2.FILLED)\n', (2251, 2368), False, 'import cv2\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(self, y, r, use_ndcg_as_loss, use_exp_gain):
n = len(y)
def get_discounts(v):
x = np.argsort(v)
d = [0 for _ in range(n)]
for i in range(n):
d[x[i]] = 1. / np.log2(n - i + 1.)
return d
def sigm(x):
return 1 / (1 + np.exp(-x))
def log_sigm(x):
return -np.log(1 + np.exp(-x))
dy = np.zeros(n)
loss = 0
if(np.sum(np.abs(r)) < 1e-6):
return loss, dy
if use_ndcg_as_loss and (not use_exp_gain):
g = [r[i] for i in range(n)]
else:
g = [2**r[i] for i in range(n)]
d = get_discounts(r)
idcg = sum([g[i] * d[i] for i in range(n)])
if (idcg < 1e-5):
idcg = 1e-5
d = get_discounts(y)
if use_ndcg_as_loss:
dcg = sum(g[i] * d[i] for i in range(n))
loss = 1.0 - dcg / idcg
for i in range(n):
for j in range(n):
if i == j:
continue
lambda_weight = np.abs((g[i] - g[j]) * (d[i] - d[j]))
rank_loss = -log_sigm(
y[i] - y[j] if r[i] > r[j] else y[j] - y[i]
)
rank_dy = (0. if r[i] > r[j] else 1.) - sigm(-y[i] + y[j])
if(not use_ndcg_as_loss):
loss += lambda_weight * rank_loss / idcg
dy[i] += lambda_weight * rank_dy / idcg
return loss, dy
@given(n=st.integers(1, 20), k=st.integers(2, 5), m=st.integers(3, 5))
def test_lambda_rank_loss(self, n, k, m):
y = np.random.rand(n * m).astype(np.float32)
r = np.random.randint(k, size=n * m).astype(np.float32)
# m sessions of length n
session_lengths = np.repeat(n, m).astype(np.int32)
ref_loss = np.empty(0)
ref_ndcg_loss = np.empty(0)
ref_ndcg_loss_no_exp = np.empty(0)
ref_dy = np.empty(0)
ref_dy_no_exp = np.empty(0)
for i in range(m):
r_loss, r_dy = self.ref_lambda_rank_loss(
y[(i) * n:(i + 1) * n], r[(i) * n:(i + 1) * n], False, False)
r_ndcg_loss, _ = self.ref_lambda_rank_loss(
y[(i) * n:(i + 1) * n], r[(i) * n:(i + 1) * n], True, True)
r_ndcg_loss_no_exp, r_dy_no_exp = self.ref_lambda_rank_loss(
y[(i) * n:(i + 1) * n], r[(i) * n:(i + 1) * n], True, False)
ref_loss = np.append(ref_loss, r_loss)
ref_dy = np.append(ref_dy, r_dy)
ref_ndcg_loss = np.append(ref_ndcg_loss, r_ndcg_loss)
ref_ndcg_loss_no_exp = np.append(ref_ndcg_loss_no_exp, r_ndcg_loss_no_exp)
ref_dy_no_exp = np.append(ref_dy_no_exp, r_dy_no_exp)
dloss = np.random.random(m).astype(np.float32)
workspace.blobs['y'] = y
workspace.blobs['r'] = r
workspace.blobs['session_lengths'] = session_lengths
workspace.blobs['dloss'] = dloss
op = core.CreateOperator(
'LambdaRankNdcg', ['y', 'r', 'session_lengths'], ['loss', 'dy'],
use_ndcg_as_loss=False, use_exp_gain=False)
workspace.RunOperatorOnce(op)
loss = workspace.blobs['loss']
dy = workspace.blobs['dy']
np.testing.assert_allclose(loss, ref_loss, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dy, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
'LambdaRankNdcg', ['y', 'r', 'session_lengths'], ['loss', 'dy'],
use_ndcg_as_loss=True, use_exp_gain=True)
workspace.RunOperatorOnce(op)
loss = workspace.blobs['loss']
dy = workspace.blobs['dy']
np.testing.assert_allclose(loss, ref_ndcg_loss, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dy, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
'LambdaRankNdcgGradient',
['y', 'session_lengths', 'dy', 'dloss'],
['dy_back']
)
workspace.RunOperatorOnce(op)
dy_back = workspace.blobs['dy_back']
for i in range(m):
np.testing.assert_allclose(
dy_back[i * n:(i + 1) * n],
dloss[i] * ref_dy[i * n:(i + 1) * n],
rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
'LambdaRankNdcg', ['y', 'r', 'session_lengths'], ['loss', 'dy'],
use_ndcg_as_loss=True, use_exp_gain=False)
workspace.RunOperatorOnce(op)
loss = workspace.blobs['loss']
dy = workspace.blobs['dy']
np.testing.assert_allclose(loss, ref_ndcg_loss_no_exp, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dy_no_exp, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
'LambdaRankNdcgGradient',
['y', 'session_lengths', 'dy', 'dloss'],
['dy_back']
)
workspace.RunOperatorOnce(op)
dy_back = workspace.blobs['dy_back']
for i in range(m):
np.testing.assert_allclose(
dy_back[i * n:(i + 1) * n],
dloss[i] * ref_dy_no_exp[i * n:(i + 1) * n],
rtol=1e-5, atol=1e-6)
| [
"numpy.abs",
"numpy.repeat",
"caffe2.python.workspace.RunOperatorOnce",
"numpy.random.rand",
"hypothesis.strategies.integers",
"numpy.random.random",
"numpy.testing.assert_allclose",
"numpy.argsort",
"numpy.append",
"numpy.zeros",
"numpy.random.randint",
"numpy.empty",
"numpy.exp",
"caffe2... | [((843, 854), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (851, 854), True, 'import numpy as np\n'), ((2322, 2333), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2330, 2333), True, 'import numpy as np\n'), ((2359, 2370), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2367, 2370), True, 'import numpy as np\n'), ((2403, 2414), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2411, 2414), True, 'import numpy as np\n'), ((2433, 2444), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2441, 2444), True, 'import numpy as np\n'), ((2470, 2481), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2478, 2481), True, 'import numpy as np\n'), ((3498, 3631), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LambdaRankNdcg"""', "['y', 'r', 'session_lengths']", "['loss', 'dy']"], {'use_ndcg_as_loss': '(False)', 'use_exp_gain': '(False)'}), "('LambdaRankNdcg', ['y', 'r', 'session_lengths'], [\n 'loss', 'dy'], use_ndcg_as_loss=False, use_exp_gain=False)\n", (3517, 3631), False, 'from caffe2.python import core, workspace\n'), ((3663, 3692), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (3688, 3692), False, 'from caffe2.python import core, workspace\n'), ((3778, 3844), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['loss', 'ref_loss'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(loss, ref_loss, rtol=1e-05, atol=1e-06)\n', (3804, 3844), True, 'import numpy as np\n'), ((3852, 3914), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dy', 'ref_dy'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(dy, ref_dy, rtol=1e-05, atol=1e-06)\n', (3878, 3914), True, 'import numpy as np\n'), ((3929, 4060), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LambdaRankNdcg"""', "['y', 'r', 'session_lengths']", "['loss', 'dy']"], {'use_ndcg_as_loss': '(True)', 'use_exp_gain': '(True)'}), "('LambdaRankNdcg', ['y', 'r', 'session_lengths'], [\n 'loss', 'dy'], use_ndcg_as_loss=True, use_exp_gain=True)\n", (3948, 4060), False, 'from caffe2.python import core, workspace\n'), ((4092, 4121), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (4117, 4121), False, 'from caffe2.python import core, workspace\n'), ((4207, 4278), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['loss', 'ref_ndcg_loss'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(loss, ref_ndcg_loss, rtol=1e-05, atol=1e-06)\n', (4233, 4278), True, 'import numpy as np\n'), ((4286, 4348), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dy', 'ref_dy'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(dy, ref_dy, rtol=1e-05, atol=1e-06)\n', (4312, 4348), True, 'import numpy as np\n'), ((4363, 4466), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LambdaRankNdcgGradient"""', "['y', 'session_lengths', 'dy', 'dloss']", "['dy_back']"], {}), "('LambdaRankNdcgGradient', ['y', 'session_lengths', 'dy',\n 'dloss'], ['dy_back'])\n", (4382, 4466), False, 'from caffe2.python import core, workspace\n'), ((4522, 4551), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (4547, 4551), False, 'from caffe2.python import core, workspace\n'), ((4822, 4954), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LambdaRankNdcg"""', "['y', 'r', 'session_lengths']", "['loss', 'dy']"], {'use_ndcg_as_loss': '(True)', 'use_exp_gain': '(False)'}), "('LambdaRankNdcg', ['y', 'r', 'session_lengths'], [\n 'loss', 'dy'], use_ndcg_as_loss=True, use_exp_gain=False)\n", (4841, 4954), False, 'from caffe2.python import core, workspace\n'), ((4986, 5015), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (5011, 5015), False, 'from caffe2.python import core, workspace\n'), ((5101, 5179), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['loss', 'ref_ndcg_loss_no_exp'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(loss, ref_ndcg_loss_no_exp, rtol=1e-05, atol=1e-06)\n', (5127, 5179), True, 'import numpy as np\n'), ((5187, 5256), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dy', 'ref_dy_no_exp'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(dy, ref_dy_no_exp, rtol=1e-05, atol=1e-06)\n', (5213, 5256), True, 'import numpy as np\n'), ((5271, 5374), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LambdaRankNdcgGradient"""', "['y', 'session_lengths', 'dy', 'dloss']", "['dy_back']"], {}), "('LambdaRankNdcgGradient', ['y', 'session_lengths', 'dy',\n 'dloss'], ['dy_back'])\n", (5290, 5374), False, 'from caffe2.python import core, workspace\n'), ((5432, 5461), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (5457, 5461), False, 'from caffe2.python import core, workspace\n'), ((531, 544), 'numpy.argsort', 'np.argsort', (['v'], {}), '(v)\n', (541, 544), True, 'import numpy as np\n'), ((2954, 2981), 'numpy.append', 'np.append', (['ref_loss', 'r_loss'], {}), '(ref_loss, r_loss)\n', (2963, 2981), True, 'import numpy as np\n'), ((3004, 3027), 'numpy.append', 'np.append', (['ref_dy', 'r_dy'], {}), '(ref_dy, r_dy)\n', (3013, 3027), True, 'import numpy as np\n'), ((3057, 3094), 'numpy.append', 'np.append', (['ref_ndcg_loss', 'r_ndcg_loss'], {}), '(ref_ndcg_loss, r_ndcg_loss)\n', (3066, 3094), True, 'import numpy as np\n'), ((3131, 3182), 'numpy.append', 'np.append', (['ref_ndcg_loss_no_exp', 'r_ndcg_loss_no_exp'], {}), '(ref_ndcg_loss_no_exp, r_ndcg_loss_no_exp)\n', (3140, 3182), True, 'import numpy as np\n'), ((3212, 3249), 'numpy.append', 'np.append', (['ref_dy_no_exp', 'r_dy_no_exp'], {}), '(ref_dy_no_exp, r_dy_no_exp)\n', (3221, 3249), True, 'import numpy as np\n'), ((4639, 4759), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dy_back[i * n:(i + 1) * n]', '(dloss[i] * ref_dy[i * n:(i + 1) * n])'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(dy_back[i * n:(i + 1) * n], dloss[i] * ref_dy[i *\n n:(i + 1) * n], rtol=1e-05, atol=1e-06)\n', (4665, 4759), True, 'import numpy as np\n'), ((5549, 5676), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dy_back[i * n:(i + 1) * n]', '(dloss[i] * ref_dy_no_exp[i * n:(i + 1) * n])'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(dy_back[i * n:(i + 1) * n], dloss[i] *\n ref_dy_no_exp[i * n:(i + 1) * n], rtol=1e-05, atol=1e-06)\n', (5575, 5676), True, 'import numpy as np\n'), ((1980, 1998), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(20)'], {}), '(1, 20)\n', (1991, 1998), True, 'import hypothesis.strategies as st\n'), ((2002, 2019), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (2013, 2019), True, 'import hypothesis.strategies as st\n'), ((2023, 2040), 'hypothesis.strategies.integers', 'st.integers', (['(3)', '(5)'], {}), '(3, 5)\n', (2034, 2040), True, 'import hypothesis.strategies as st\n'), ((892, 901), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (898, 901), True, 'import numpy as np\n'), ((1539, 1576), 'numpy.abs', 'np.abs', (['((g[i] - g[j]) * (d[i] - d[j]))'], {}), '((g[i] - g[j]) * (d[i] - d[j]))\n', (1545, 1576), True, 'import numpy as np\n'), ((2102, 2123), 'numpy.random.rand', 'np.random.rand', (['(n * m)'], {}), '(n * m)\n', (2116, 2123), True, 'import numpy as np\n'), ((2156, 2188), 'numpy.random.randint', 'np.random.randint', (['k'], {'size': '(n * m)'}), '(k, size=n * m)\n', (2173, 2188), True, 'import numpy as np\n'), ((2269, 2284), 'numpy.repeat', 'np.repeat', (['n', 'm'], {}), '(n, m)\n', (2278, 2284), True, 'import numpy as np\n'), ((3269, 3288), 'numpy.random.random', 'np.random.random', (['m'], {}), '(m)\n', (3285, 3288), True, 'import numpy as np\n'), ((648, 668), 'numpy.log2', 'np.log2', (['(n - i + 1.0)'], {}), '(n - i + 1.0)\n', (655, 668), True, 'import numpy as np\n'), ((743, 753), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (749, 753), True, 'import numpy as np\n'), ((815, 825), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (821, 825), True, 'import numpy as np\n')] |
import bv.util.logger as log
import numpy as np
from bv.mult.worker import worker
import time
import random
class DNNBasicNetwork(worker):
def buildNet(self):
log.loginfo(" build network "+str(self.sigmoid(791)))
def sigmoid(self,z):
return 1.0/(1.0+np.exp(-z))
def run(self):
while(True):
seed = random.randint(1,10010000000)
log.loginfo("cal sigmoid:"+self.sigmoid(seed).__str__()+" seed:"+seed.__str__())
time.sleep(3)
| [
"numpy.exp",
"random.randint",
"time.sleep"
] | [((354, 384), 'random.randint', 'random.randint', (['(1)', '(10010000000)'], {}), '(1, 10010000000)\n', (368, 384), False, 'import random\n'), ((489, 502), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (499, 502), False, 'import time\n'), ((281, 291), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (287, 291), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # About this code
#
# Compute the overlap betweeen the target groups and detected groups, where
# the target groups are those identified by <NAME> and
# the detected groups are those detected by the algorithm.
#
import numpy as np
import sys
import pandas as pd
from scipy import sparse
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.colors as colors
from matplotlib import cm
def load_detected_cartels(years, cartel_dir):
cartel_table_list = []
group_id_offset = 0
for year in years:
cartel_table = pd.read_csv(
"{root}/cartels-{year}.csv".format(root=cartel_dir, year=year), sep="\t"
)
cartel_table["year"] = year
cartel_table["group_id"] += group_id_offset
group_id_offset = np.max(cartel_table["group_id"].values) + 1
cartel_table_list += [cartel_table]
cartel_table = pd.concat(cartel_table_list, ignore_index=True)
return cartel_table
def load_journal_groups_suspended_by_TR(filename):
return pd.read_csv(filename, sep="\t")
def const_membership_matrix(T, node_id_col, membership_id_col, N):
"""
Construct the membership matrix from pandas dataframe
The matrix U[i,k] = 1 if node i belongs to the kth group. Otherwise U[i,k] = 0.
"""
node_id = T[node_id_col].values
membership_id = T[membership_id_col].values
return sparse.csc_matrix(
(np.ones(len(node_id)), (node_id, membership_id)),
shape=(N, np.max(membership_id) + 1),
)
def add_id_column(Ta, Tb, node_id_col="mag_journal_id"):
"""
Add a column ,_id, to two tables, Ta and Tb,
that indicates the id for the union of the node_id_col columns for Ta and Tb
"""
# Find all the set of nodes in the table
nodes = np.unique(Ta[node_id_col].values.tolist() + Tb[node_id_col].values.tolist())
N = nodes.size # number of nodes
node2id = {x: i for i, x in enumerate(nodes)} # node label to integer id
# convert node label to node ids
Ta["_id"] = Ta[node_id_col].apply(lambda x: node2id[x])
Tb["_id"] = Tb[node_id_col].apply(lambda x: node2id[x])
return Ta, Tb, N
def calc_overlap(U_a, U_b, min_intersection=2):
"""
Calculate the overlap between the memberships, Ua and Ub, where
Ua and Ub are membership matrices given by const_membership_matrix func
"""
# Compute the intersection and the union of the detected and target groups
Intersection = (U_a.T @ U_b).toarray()
# Set Intersection to 0 if only one node is shared
Intersection[Intersection < min_intersection] = 0
sz_b = np.array(U_b.sum(axis=0)).reshape(-1)
S = Intersection @ np.diag(1.0 / sz_b)
return S
def slice_groups(T, group_ids, group_id_col):
s = T[group_id_col].apply(lambda x: x in group_ids).values
return T[s]
def make_color_map(dw, min_w, max_w):
disc_min_w = dw * np.floor(min_w / dw)
disc_max_w = dw * np.ceil(max_w / dw)
bounds = np.linspace(
disc_min_w, disc_max_w, np.round((disc_max_w - disc_min_w) / dw).astype(int) + 1
)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
cmap = cm.get_cmap("viridis")
return cmap, norm
if __name__ == "__main__":
CARTEL_DIR = sys.argv[1]
TR_GROUP_FILE = sys.argv[2]
OUTPUT = sys.argv[3]
detection_threshold = 0.4 # minimum overlap score at which we regard detected
# Load the data
groups_CI = load_detected_cartels(np.arange(2000, 2020), CARTEL_DIR)
groups_TR = load_journal_groups_suspended_by_TR(TR_GROUP_FILE)
# Assgin a new id for the mag_journal_id
groups_CI, groups_TR, N = add_id_column(groups_CI, groups_TR, "mag_journal_id")
# Construct the membership matrix
U_CI = const_membership_matrix(
groups_CI, node_id_col="_id", membership_id_col="group_id", N=N
)
U_TR = const_membership_matrix(
groups_TR, node_id_col="_id", membership_id_col="group_id", N=N
)
# Compute the overlap
O = calc_overlap(U_TR, U_CI)
O[O < detection_threshold] = 0
# Detected group pairs
gid_TR, gid_CI, o = sparse.find(O)
detected_groups = slice_groups(groups_CI, gid_CI, "group_id")
d1 = dict(zip(gid_CI, gid_TR))
d2 = dict(zip(gid_CI, o))
detected_groups["overlap"] = detected_groups["group_id"].apply(
lambda x: d2.get(x, -1)
)
detected_groups["group_id_TR"] = detected_groups["group_id"].apply(
lambda x: d1.get(x, -1)
)
# Make a colormap
cmap, norm = make_color_map(0.2, 0.4, 1)
# Plot parameters
plot_TR_params = {
"marker": "x",
"color": "black",
"edgecolor": "black",
"linewidth": 1.5,
"s": 110,
"zorder": 5,
"label": "<NAME>",
}
plot_CI_params = {
"hue": "overlap",
"edgecolor": "black",
"palette": "viridis",
"vmin": 0,
"vmax": 1,
"hue_norm": norm,
"linewidth": 1.2,
"label": "Proposed",
"s": 130,
"zorder": 2,
}
# Set up the canvas
sns.set(font_scale=1.3)
sns.set_style("white")
fig, ax = plt.subplots(figsize=(10, 6))
# Plot the points
ax = sns.scatterplot(
data=detected_groups[["year", "group_id_TR", "overlap"]].drop_duplicates(),
x="year",
y="group_id_TR",
**plot_CI_params,
ax=ax,
)
sns.scatterplot(
data=groups_TR[["year", "group_id"]].drop_duplicates(),
x="year",
y="group_id",
**plot_TR_params,
ax=ax,
)
# Colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cbar = ax.figure.colorbar(sm)
cbar.ax.set_title("Overlap", pad=20)
# X and Y labels
ax.set_ylabel("ID of the group suspended by <NAME>, $\ell$")
ax.set_xlabel("Year")
# Legends
ax.scatter(
[1990],
[0],
label="Number of within-group citations without self-citations",
color="#c5c5c5",
edgecolor="black",
s=150,
marker="s",
)
ax.scatter(
[1990], [1], label="CIDRE", color="grey", edgecolor="black", s=150, marker="o"
)
handles, labels = ax.get_legend_handles_labels()
order = np.array([6, 8, 7])
handles = [handles[i] for i in order]
labels = [labels[i] for i in order]
leg1 = ax.legend(
handles[:2],
labels[:2],
frameon=False,
loc="center",
bbox_to_anchor=(0.5, -0.18),
ncol=2,
)
ax.add_artist(leg1)
# Range
xticks = np.arange(2006, 2019)
ax.set_xlim(2006, 2018.5)
plt.xticks(np.arange(2006, 2019), ["`%02d" % d for d in xticks - 2000])
plt.yticks(np.arange(0, 23), np.arange(1, 23))
# Save figure
fig.savefig(OUTPUT, bbox_inches="tight", dpi=300)
| [
"seaborn.set",
"matplotlib.pyplot.cm.ScalarMappable",
"numpy.ceil",
"pandas.read_csv",
"numpy.round",
"numpy.floor",
"numpy.diag",
"seaborn.set_style",
"numpy.array",
"numpy.max",
"matplotlib.pyplot.subplots",
"scipy.sparse.find",
"matplotlib.colors.BoundaryNorm",
"pandas.concat",
"matpl... | [((919, 966), 'pandas.concat', 'pd.concat', (['cartel_table_list'], {'ignore_index': '(True)'}), '(cartel_table_list, ignore_index=True)\n', (928, 966), True, 'import pandas as pd\n'), ((1055, 1086), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""'}), "(filename, sep='\\t')\n", (1066, 1086), True, 'import pandas as pd\n'), ((3108, 3159), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', ([], {'boundaries': 'bounds', 'ncolors': '(256)'}), '(boundaries=bounds, ncolors=256)\n', (3127, 3159), True, 'import matplotlib.colors as colors\n'), ((3171, 3193), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (3182, 3193), False, 'from matplotlib import cm\n'), ((4122, 4136), 'scipy.sparse.find', 'sparse.find', (['O'], {}), '(O)\n', (4133, 4136), False, 'from scipy import sparse\n'), ((5079, 5102), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.3)'}), '(font_scale=1.3)\n', (5086, 5102), True, 'import seaborn as sns\n'), ((5107, 5129), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (5120, 5129), True, 'import seaborn as sns\n'), ((5144, 5173), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5156, 5173), True, 'import matplotlib.pyplot as plt\n'), ((5595, 5638), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (5616, 5638), True, 'import matplotlib.pyplot as plt\n'), ((6249, 6268), 'numpy.array', 'np.array', (['[6, 8, 7]'], {}), '([6, 8, 7])\n', (6257, 6268), True, 'import numpy as np\n'), ((6568, 6589), 'numpy.arange', 'np.arange', (['(2006)', '(2019)'], {}), '(2006, 2019)\n', (6577, 6589), True, 'import numpy as np\n'), ((2691, 2710), 'numpy.diag', 'np.diag', (['(1.0 / sz_b)'], {}), '(1.0 / sz_b)\n', (2698, 2710), True, 'import numpy as np\n'), ((2913, 2933), 'numpy.floor', 'np.floor', (['(min_w / dw)'], {}), '(min_w / dw)\n', (2921, 2933), True, 'import numpy as np\n'), ((2956, 2975), 'numpy.ceil', 'np.ceil', (['(max_w / dw)'], {}), '(max_w / dw)\n', (2963, 2975), True, 'import numpy as np\n'), ((3475, 3496), 'numpy.arange', 'np.arange', (['(2000)', '(2020)'], {}), '(2000, 2020)\n', (3484, 3496), True, 'import numpy as np\n'), ((6635, 6656), 'numpy.arange', 'np.arange', (['(2006)', '(2019)'], {}), '(2006, 2019)\n', (6644, 6656), True, 'import numpy as np\n'), ((6711, 6727), 'numpy.arange', 'np.arange', (['(0)', '(23)'], {}), '(0, 23)\n', (6720, 6727), True, 'import numpy as np\n'), ((6729, 6745), 'numpy.arange', 'np.arange', (['(1)', '(23)'], {}), '(1, 23)\n', (6738, 6745), True, 'import numpy as np\n'), ((812, 851), 'numpy.max', 'np.max', (["cartel_table['group_id'].values"], {}), "(cartel_table['group_id'].values)\n", (818, 851), True, 'import numpy as np\n'), ((1506, 1527), 'numpy.max', 'np.max', (['membership_id'], {}), '(membership_id)\n', (1512, 1527), True, 'import numpy as np\n'), ((3034, 3074), 'numpy.round', 'np.round', (['((disc_max_w - disc_min_w) / dw)'], {}), '((disc_max_w - disc_min_w) / dw)\n', (3042, 3074), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import torch
from numpy.testing import assert_array_equal
from torch_audiomentations import PolarityInversion, PeakNormalization, Gain
from torch_audiomentations import SomeOf
class TestSomeOf(unittest.TestCase):
def setUp(self):
self.sample_rate = 16000
self.audio = torch.randn(1, 1, 16000)
self.transforms = [
Gain(min_gain_in_db=-6.000001, max_gain_in_db=-2, p=1.0),
PolarityInversion(p=1.0),
PeakNormalization(p=1.0),
]
def test_someof(self):
augment = SomeOf(2, self.transforms)
self.assertEqual(len(augment.transform_indexes), 0) # no transforms applied yet
processed_samples = augment(samples=self.audio, sample_rate=self.sample_rate)
self.assertEqual(len(augment.transform_indexes), 2) # 2 transforms applied
def test_someof_with_p_zero(self):
augment = SomeOf(2, self.transforms, p=0.0)
self.assertEqual(len(augment.transform_indexes), 0) # no transforms applied yet
processed_samples = augment(samples=self.audio, sample_rate=self.sample_rate)
self.assertEqual(len(augment.transform_indexes), 0) # 0 transforms applied
def test_someof_tuple(self):
augment = SomeOf((1, None), self.transforms)
self.assertEqual(len(augment.transform_indexes), 0) # no transforms applied yet
processed_samples = augment(samples=self.audio, sample_rate=self.sample_rate)
self.assertTrue(
len(augment.transform_indexes) > 0
) # at least one transform applied
def test_someof_freeze_and_unfreeze_parameters(self):
augment = SomeOf(2, self.transforms)
samples = np.array([[[1.0, 0.5, -0.25, -0.125, 0.0]]], dtype=np.float32)
samples = torch.from_numpy(samples)
self.assertEqual(len(augment.transform_indexes), 0) # no transforms applied yet
processed_samples1 = augment(
samples=samples, sample_rate=self.sample_rate
).numpy()
transform_indexes1 = augment.transform_indexes
self.assertEqual(len(augment.transform_indexes), 2)
augment.freeze_parameters()
processed_samples2 = augment(
samples=samples, sample_rate=self.sample_rate
).numpy()
transform_indexes2 = augment.transform_indexes
assert_array_equal(processed_samples1, processed_samples2)
assert_array_equal(transform_indexes1, transform_indexes2)
| [
"torch_audiomentations.SomeOf",
"torch_audiomentations.Gain",
"torch.from_numpy",
"torch_audiomentations.PeakNormalization",
"numpy.array",
"torch_audiomentations.PolarityInversion",
"torch.randn",
"numpy.testing.assert_array_equal"
] | [((327, 351), 'torch.randn', 'torch.randn', (['(1)', '(1)', '(16000)'], {}), '(1, 1, 16000)\n', (338, 351), False, 'import torch\n'), ((583, 609), 'torch_audiomentations.SomeOf', 'SomeOf', (['(2)', 'self.transforms'], {}), '(2, self.transforms)\n', (589, 609), False, 'from torch_audiomentations import SomeOf\n'), ((928, 961), 'torch_audiomentations.SomeOf', 'SomeOf', (['(2)', 'self.transforms'], {'p': '(0.0)'}), '(2, self.transforms, p=0.0)\n', (934, 961), False, 'from torch_audiomentations import SomeOf\n'), ((1274, 1308), 'torch_audiomentations.SomeOf', 'SomeOf', (['(1, None)', 'self.transforms'], {}), '((1, None), self.transforms)\n', (1280, 1308), False, 'from torch_audiomentations import SomeOf\n'), ((1678, 1704), 'torch_audiomentations.SomeOf', 'SomeOf', (['(2)', 'self.transforms'], {}), '(2, self.transforms)\n', (1684, 1704), False, 'from torch_audiomentations import SomeOf\n'), ((1724, 1786), 'numpy.array', 'np.array', (['[[[1.0, 0.5, -0.25, -0.125, 0.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 0.5, -0.25, -0.125, 0.0]]], dtype=np.float32)\n', (1732, 1786), True, 'import numpy as np\n'), ((1805, 1830), 'torch.from_numpy', 'torch.from_numpy', (['samples'], {}), '(samples)\n', (1821, 1830), False, 'import torch\n'), ((2365, 2423), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['processed_samples1', 'processed_samples2'], {}), '(processed_samples1, processed_samples2)\n', (2383, 2423), False, 'from numpy.testing import assert_array_equal\n'), ((2432, 2490), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['transform_indexes1', 'transform_indexes2'], {}), '(transform_indexes1, transform_indexes2)\n', (2450, 2490), False, 'from numpy.testing import assert_array_equal\n'), ((393, 449), 'torch_audiomentations.Gain', 'Gain', ([], {'min_gain_in_db': '(-6.000001)', 'max_gain_in_db': '(-2)', 'p': '(1.0)'}), '(min_gain_in_db=-6.000001, max_gain_in_db=-2, p=1.0)\n', (397, 449), False, 'from torch_audiomentations import PolarityInversion, PeakNormalization, Gain\n'), ((463, 487), 'torch_audiomentations.PolarityInversion', 'PolarityInversion', ([], {'p': '(1.0)'}), '(p=1.0)\n', (480, 487), False, 'from torch_audiomentations import PolarityInversion, PeakNormalization, Gain\n'), ((501, 525), 'torch_audiomentations.PeakNormalization', 'PeakNormalization', ([], {'p': '(1.0)'}), '(p=1.0)\n', (518, 525), False, 'from torch_audiomentations import PolarityInversion, PeakNormalization, Gain\n')] |
EMPTY = 0
BLACK = 1
WHITE = 2
BORDER = 3
FLOODFILL = 4
import numpy as np
from pattern import pat3set
import sys
import random
class GoBoardUtil(object):
@staticmethod
def playGame(board, color, **kwargs):
komi = kwargs.pop('komi', 0)
limit = kwargs.pop('limit', 1000)
check_selfatari = kwargs.pop('selfatari', True)
pattern = kwargs.pop('pattern', True)
AC = kwargs.pop('AC', True)
AD = kwargs.pop('AD', True)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
numPass = 0
for _ in range(limit):
move = GoBoardUtil.generate_move_with_filter(board,pattern,AC,AD,check_selfatari)
if move != None:
isLegalMove = board.move(move,color)
assert isLegalMove
numPass = 0
else:
board.move(move,color)
numPass += 1
if numPass == 2:
break
color = GoBoardUtil.opponent(color)
winner = board.get_winner(komi)
return winner
@staticmethod
def generate_legal_moves(board, color):
"""
generate a list of legal moves
Arguments
---------
board : np.array
a SIZExSIZE array representing the board
color : {'b','w'}
the color to generate the move for.
"""
empty = board.get_empty_points()
legal_moves = []
for move in empty:
if board.check_legal(move, color):
legal_moves.append(move)
return legal_moves
@staticmethod
def sorted_point_string(points, ns):
result = []
for point in points:
x, y = GoBoardUtil.point_to_coord(point, ns)
result.append(GoBoardUtil.format_point((x, y)))
return ' '.join(sorted(result))
@staticmethod
def generate_pattern_moves(board):
color = board.current_player
pattern_checking_set = board.last_moves_empty_neighbors()
moves = []
for p in pattern_checking_set:
if (board.neighborhood_33(p) in pat3set):
assert p not in moves
assert board.board[p] == EMPTY
moves.append(p)
return moves
@staticmethod
def generate_all_policy_moves(board,pattern,check_selfatari):
"""
generate a list of policy moves on board for board.current_player.
Use in UI only. For playing, use generate_move_with_filter
which is more efficient
"""
if board.last_move != None:
opponent_color = board.get_color( board.last_move )
player_color = GoBoardUtil.opponent( opponent_color )
num_lib = board.block_liberty( board.last_move )
if num_lib == 1:
moves = board.last_move_empty_neighbors()
moves = GoBoardUtil.filter_moves(board, moves, check_selfatari)
if len( moves ) != 0:
return moves, "AtariCapture"
neighbors = board._neighbors( board.last_move )
moves = []
for n in neighbors:
if board.get_color( n ) == player_color:
liberties = board.block_liberty_list( n )
if len( liberties ) == 1:
temp_move = liberties[0]
if board._liberty( temp_move, player_color ) > 1:
moves.append( temp_move )
opponent_blocks = board.block_opponent_neighbors( n )
for block in opponent_blocks:
liberty = board.block_liberty_list( block[0] )
if len( liberty ) == 1:
moves.append( liberty[0] )
moves = GoBoardUtil.filter_moves(board, moves, check_selfatari)
if len( moves ) > 0:
return moves, "AtariDefense"
pattern_moves = GoBoardUtil.generate_pattern_moves(board)
pattern_moves = GoBoardUtil.filter_moves(board, pattern_moves, check_selfatari)
if len(pattern_moves) > 0:
return pattern_moves, "Pattern"
return GoBoardUtil.generate_random_moves(board), "Random"
@staticmethod
def generate_random_moves(board):
empty_points = board.get_empty_points()
color = board.current_player
moves = []
for move in empty_points:
if board.check_legal(move, color) and not board.is_eye(move, color):
moves.append(move)
return moves
@staticmethod
def generate_random_move(board):
color = board.current_player
moves = board.get_empty_points()
while len(moves) > 0:
index = random.randint(0,len(moves) - 1)
move = moves[index]
if board.check_legal(move, color) and not board.is_eye(move, color):
return move
else:
# delete moves[index] by overwriting with last in list
lastIndex = len(moves) - 1
if index < lastIndex:
moves[index] = moves[lastIndex]
moves.pop()
return None
@staticmethod
def filter_moves(board, moves, check_selfatari):
color = board.current_player
good_moves = []
for move in moves:
if not GoBoardUtil.filter(board,move,color,check_selfatari):
good_moves.append(move)
return good_moves
# return True if move should be filtered
@staticmethod
def filleye_filter(board, move, color):
assert move != None
return not board.check_legal(move, color) or board.is_eye(move, color)
# return True if move should be filtered
@staticmethod
def selfatari_filter(board, move, color):
return ( GoBoardUtil.filleye_filter(board, move, color)
or GoBoardUtil.selfatari(board, move, color)
)
# return True if move should be filtered
@staticmethod
def filter(board, move, color, check_selfatari):
if check_selfatari:
return GoBoardUtil.selfatari_filter(board, move, color)
else:
return GoBoardUtil.filleye_filter(board, move, color)
@staticmethod
def filter_moves_and_generate(board, moves, check_selfatari):
color = board.current_player
while len(moves) > 0:
candidate = random.choice(moves)
if GoBoardUtil.filter(board, candidate, color, check_selfatari):
moves.remove(candidate)
else:
return candidate
return None
@staticmethod
def generate_move_with_filter(board, use_pattern, AC, AD, check_selfatari):
"""
Arguments
---------
check_selfatari: filter selfatari moves?
Note that even if True, this filter only applies to pattern moves
use_pattern: Use pattern policy?
"""
move = None
if AC and board.last_move != None:
opponent_color = board.get_color( board.last_move )
player_color = GoBoardUtil.opponent( opponent_color )
num_lib = board.block_liberty( board.last_move )
if num_lib == 1:
moves = board.last_move_empty_neighbors()
move = GoBoardUtil.filter_moves_and_generate( board, moves,
check_selfatari )
if move == None and AD and board.last_move != None:
neighbors = board._neighbors( board.last_move )
moves = []
for n in neighbors:
if board.get_color( n ) == player_color:
liberties = board.block_liberty_list( n )
if len( liberties ) == 1:
temp_move = liberties[0]
if board._liberty( temp_move, player_color ) > 1:
moves.append( temp_move )
opponent_blocks = board.block_opponent_neighbors( n )
for block in opponent_blocks:
liberty = board.block_liberty_list( block[0] )
if len( liberty ) == 1:
moves.append( liberty[0] )
if len( moves ) > 0:
move = GoBoardUtil.filter_moves_and_generate( board, moves,
check_selfatari )
if move == None and use_pattern:
moves = GoBoardUtil.generate_pattern_moves(board)
move = GoBoardUtil.filter_moves_and_generate(board, moves,
check_selfatari)
if move == None:
move = GoBoardUtil.generate_random_move(board)
return move
@staticmethod
def selfatari(board, move, color):
max_old_liberty = GoBoardUtil.blocks_max_liberty(board, move, color, 2)
if max_old_liberty > 2:
return False
cboard = board.copy()
# swap out true board for simulation board, and try to play the move
isLegal = cboard.move(move, color)
if isLegal:
new_liberty = cboard._liberty(move,color)
if new_liberty==1:
return True
return False
@staticmethod
def blocks_max_liberty(board, point, color, limit):
assert board.board[point] == EMPTY
max_lib = -1 # will return this value if this point is a new block
neighbors = board._neighbors(point)
for n in neighbors:
if board.board[n] == color:
num_lib = board._liberty(n,color)
if num_lib > limit:
return num_lib
if num_lib > max_lib:
max_lib = num_lib
return max_lib
@staticmethod
def format_point(move):
"""
Return coordinates as a string like 'a1', or 'pass'.
Arguments
---------
move : (row, col), or None for pass
Returns
-------
The move converted from a tuple to a Go position (e.g. d4)
"""
column_letters = "abcdefghjklmnopqrstuvwxyz"
if move is None:
return "pass"
row, col = move
if not 0 <= row < 25 or not 0 <= col < 25:
raise ValueError
return column_letters[col - 1] + str(row)
@staticmethod
def move_to_coord(point, board_size):
"""
Interpret a string representing a point, as specified by GTP.
Arguments
---------
point : str
the point to convert to a tuple
board_size : int
size of the board
Returns
-------
a pair of coordinates (row, col) in range(1, board_size+1)
Raises
------
ValueError : 'point' isn't a valid GTP point specification for a board of size 'board_size'.
"""
if not 0 < board_size <= 25:
raise ValueError("board_size out of range")
try:
s = point.lower()
except Exception:
raise ValueError("invalid point")
if s == "pass":
return None
try:
col_c = s[0]
if (not "a" <= col_c <= "z") or col_c == "i":
raise ValueError
if col_c > "i":
col = ord(col_c) - ord("a")
else:
col = ord(col_c) - ord("a") + 1
row = int(s[1:])
if row < 1:
raise ValueError
except (IndexError, ValueError):
raise ValueError("wrong coordinate")
if not (col <= board_size and row <= board_size):
raise ValueError("wrong coordinate")
return row, col
@staticmethod
def opponent(color):
opponent = {WHITE:BLACK, BLACK:WHITE}
try:
return opponent[color]
except:
raise ValueError("Wrong color provided for opponent function")
@staticmethod
def color_to_int(c):
"""convert character representing player color to the appropriate number"""
color_to_int = {"b": BLACK , "w": WHITE, "e":EMPTY, "BORDER":BORDER, "FLOODFILL":FLOODFILL}
try:
return color_to_int[c]
except:
raise ValueError("wrong color")
@staticmethod
def int_to_color(i):
"""convert number representing player color to the appropriate character """
int_to_color = {BLACK:"b", WHITE:"w", EMPTY:"e", BORDER:"BORDER", FLOODFILL:"FLOODFILL"}
try:
return int_to_color[i]
except:
raise ValueError("Provided integer value for color is invalid")
@staticmethod
def copyb2b(board, copy_board):
"""Return an independent copy of this Board."""
copy_board.board = np.copy(board.board)
copy_board.suicide = board.suicide # checking for suicide move
copy_board.winner = board.winner
copy_board.NS = board.NS
copy_board.WE = board.WE
copy_board._is_empty = board._is_empty
copy_board.passes_black = board.passes_black
copy_board.passes_white = board.passes_white
copy_board.current_player = board.current_player
copy_board.ko_constraint = board.ko_constraint
copy_board.white_captures = board.white_captures
copy_board.black_captures = board.black_captures
@staticmethod
def point_to_coord(point, ns):
"""
Transform one dimensional point presentation to two dimensional.
Arguments
---------
point
Returns
-------
x , y : int
coordinates of the point 1 <= x, y <= size
"""
if point is None:
return 'pass'
row, col = divmod(point, ns)
return row,col
| [
"numpy.copy",
"random.choice"
] | [((13124, 13144), 'numpy.copy', 'np.copy', (['board.board'], {}), '(board.board)\n', (13131, 13144), True, 'import numpy as np\n'), ((6549, 6569), 'random.choice', 'random.choice', (['moves'], {}), '(moves)\n', (6562, 6569), False, 'import random\n')] |
import os
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from toolkit import (generate_master_flat_and_dark, photometry,
PhotometryResults, PCA_light_curve, params_b,
transit_model_b)
# Image paths
image_paths = sorted(glob('/Users/bmmorris/data/Q2UW01/UT170502/cleaned/hat11*.fits'))
dark_paths = glob('/Users/bmmorris/data/Q2UW01/UT170502/dark_10s_2x2.????.fits')
flat_paths = glob('/Users/bmmorris/data/Q2UW01/UT170502/domeflat_r.????.fits')
master_flat_path = 'outputs/masterflat_20170502.fits'
master_dark_path = 'outputs/masterdark_20170502.fits'
# Photometry settings
target_centroid = np.array([[613], [750]])
comparison_flux_threshold = 0.01
aperture_radii = np.arange(45, 70, 1)
centroid_stamp_half_width = 30
psf_stddev_init = 30
aperture_annulus_radius = 10
transit_parameters = params_b
output_path = 'outputs/hat11_20170502.npz'
force_recompute_photometry = False#True
# Calculate master dark/flat:
if not os.path.exists(master_dark_path) or not os.path.exists(master_flat_path):
print('Calculating master flat:')
generate_master_flat_and_dark(flat_paths, dark_paths,
master_flat_path, master_dark_path)
# Do photometry:
if not os.path.exists(output_path) or force_recompute_photometry:
print('Calculating photometry:')
phot_results = photometry(image_paths, master_dark_path, master_flat_path,
target_centroid, comparison_flux_threshold,
aperture_radii, centroid_stamp_half_width,
psf_stddev_init, aperture_annulus_radius,
output_path)
else:
phot_results = PhotometryResults.load(output_path)
print('Calculating PCA...')
light_curve = PCA_light_curve(phot_results, transit_parameters, plots=False,
validation_duration_fraction=0.05,
buffer_time=0*u.min, flux_threshold=0.5,
validation_time=-0.55, plot_validation=False)
target_flux = phot_results.fluxes[:, 0, 0]
not_cloudy = target_flux > 0.1*np.median(target_flux)
# further de-trend with airmass:
X = np.array([light_curve[not_cloudy],
phot_results.airmass[not_cloudy],
phot_results.ycentroids[:, 0]]).T
c = np.linalg.lstsq(X, transit_model_b(phot_results.times[not_cloudy]))[0]
detrended_light_curve = np.dot(X, c)
plt.plot(phot_results.times[not_cloudy], detrended_light_curve, '.', color='gray')
from scipy.stats import binned_statistic
bs = binned_statistic(phot_results.times[not_cloudy], detrended_light_curve, bins=50)
bin_centers = 0.5*(bs.bin_edges[1:] + bs.bin_edges[:-1])
plt.plot(bin_centers, bs.statistic, 'rs')
plt.plot(phot_results.times, transit_model_b(phot_results.times), 'r')
#egress = 2457777.01
#post_egress_std = np.std(light_curve[phot_results.times > egress])
#plt.axvline(egress)
plt.xlabel('Time [JD]')
plt.ylabel('Flux')
plt.show()
output_lc = 'outputs/hat11_20170502.txt'
np.savetxt(output_lc, np.vstack([phot_results.times,
detrended_light_curve,
phot_results.fluxes[:, 0, 0]]).T)
#
# plt.figure()
# plt.plot(phot_results.times, light_curve, '.', color='gray')
# plt.plot(phot_results.times, transit_model_b(phot_results.times), 'r')
#
# from scipy.stats import binned_statistic
#
# bs = binned_statistic(phot_results.times, light_curve, bins=50)
# bin_centers = 0.5*(bs.bin_edges[1:] + bs.bin_edges[:-1])
# plt.plot(bin_centers, bs.statistic, 'rs')
#
# plt.plot(phot_results.times, transit_model_b(phot_results.times), 'r')
#
#
# #egress = 2457777.01
# #post_egress_std = np.std(light_curve[phot_results.times > egress])
# #plt.axvline(egress)
# plt.xlabel('Time [JD]')
# plt.ylabel('Flux')
# plt.title('rms = {0}'.format(np.std(light_curve - transit_model_b(phot_results.times))))
# plt.show() | [
"os.path.exists",
"toolkit.photometry",
"toolkit.transit_model_b",
"scipy.stats.binned_statistic",
"numpy.median",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"toolkit.generate_master_flat_and_dark",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.dot",
"toolkit.P... | [((394, 461), 'glob.glob', 'glob', (['"""/Users/bmmorris/data/Q2UW01/UT170502/dark_10s_2x2.????.fits"""'], {}), "('/Users/bmmorris/data/Q2UW01/UT170502/dark_10s_2x2.????.fits')\n", (398, 461), False, 'from glob import glob\n'), ((475, 540), 'glob.glob', 'glob', (['"""/Users/bmmorris/data/Q2UW01/UT170502/domeflat_r.????.fits"""'], {}), "('/Users/bmmorris/data/Q2UW01/UT170502/domeflat_r.????.fits')\n", (479, 540), False, 'from glob import glob\n'), ((690, 714), 'numpy.array', 'np.array', (['[[613], [750]]'], {}), '([[613], [750]])\n', (698, 714), True, 'import numpy as np\n'), ((765, 785), 'numpy.arange', 'np.arange', (['(45)', '(70)', '(1)'], {}), '(45, 70, 1)\n', (774, 785), True, 'import numpy as np\n'), ((1828, 2022), 'toolkit.PCA_light_curve', 'PCA_light_curve', (['phot_results', 'transit_parameters'], {'plots': '(False)', 'validation_duration_fraction': '(0.05)', 'buffer_time': '(0 * u.min)', 'flux_threshold': '(0.5)', 'validation_time': '(-0.55)', 'plot_validation': '(False)'}), '(phot_results, transit_parameters, plots=False,\n validation_duration_fraction=0.05, buffer_time=0 * u.min,\n flux_threshold=0.5, validation_time=-0.55, plot_validation=False)\n', (1843, 2022), False, 'from toolkit import generate_master_flat_and_dark, photometry, PhotometryResults, PCA_light_curve, params_b, transit_model_b\n'), ((2472, 2484), 'numpy.dot', 'np.dot', (['X', 'c'], {}), '(X, c)\n', (2478, 2484), True, 'import numpy as np\n'), ((2486, 2573), 'matplotlib.pyplot.plot', 'plt.plot', (['phot_results.times[not_cloudy]', 'detrended_light_curve', '"""."""'], {'color': '"""gray"""'}), "(phot_results.times[not_cloudy], detrended_light_curve, '.', color=\n 'gray')\n", (2494, 2573), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2702), 'scipy.stats.binned_statistic', 'binned_statistic', (['phot_results.times[not_cloudy]', 'detrended_light_curve'], {'bins': '(50)'}), '(phot_results.times[not_cloudy], detrended_light_curve, bins=50\n )\n', (2633, 2702), False, 'from scipy.stats import binned_statistic\n'), ((2755, 2796), 'matplotlib.pyplot.plot', 'plt.plot', (['bin_centers', 'bs.statistic', '"""rs"""'], {}), "(bin_centers, bs.statistic, 'rs')\n", (2763, 2796), True, 'import matplotlib.pyplot as plt\n'), ((2979, 3002), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [JD]"""'], {}), "('Time [JD]')\n", (2989, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3021), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (3013, 3021), True, 'import matplotlib.pyplot as plt\n'), ((3022, 3032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3030, 3032), True, 'import matplotlib.pyplot as plt\n'), ((315, 379), 'glob.glob', 'glob', (['"""/Users/bmmorris/data/Q2UW01/UT170502/cleaned/hat11*.fits"""'], {}), "('/Users/bmmorris/data/Q2UW01/UT170502/cleaned/hat11*.fits')\n", (319, 379), False, 'from glob import glob\n'), ((1136, 1229), 'toolkit.generate_master_flat_and_dark', 'generate_master_flat_and_dark', (['flat_paths', 'dark_paths', 'master_flat_path', 'master_dark_path'], {}), '(flat_paths, dark_paths, master_flat_path,\n master_dark_path)\n', (1165, 1229), False, 'from toolkit import generate_master_flat_and_dark, photometry, PhotometryResults, PCA_light_curve, params_b, transit_model_b\n'), ((1401, 1610), 'toolkit.photometry', 'photometry', (['image_paths', 'master_dark_path', 'master_flat_path', 'target_centroid', 'comparison_flux_threshold', 'aperture_radii', 'centroid_stamp_half_width', 'psf_stddev_init', 'aperture_annulus_radius', 'output_path'], {}), '(image_paths, master_dark_path, master_flat_path, target_centroid,\n comparison_flux_threshold, aperture_radii, centroid_stamp_half_width,\n psf_stddev_init, aperture_annulus_radius, output_path)\n', (1411, 1610), False, 'from toolkit import generate_master_flat_and_dark, photometry, PhotometryResults, PCA_light_curve, params_b, transit_model_b\n'), ((1749, 1784), 'toolkit.PhotometryResults.load', 'PhotometryResults.load', (['output_path'], {}), '(output_path)\n', (1771, 1784), False, 'from toolkit import generate_master_flat_and_dark, photometry, PhotometryResults, PCA_light_curve, params_b, transit_model_b\n'), ((2240, 2344), 'numpy.array', 'np.array', (['[light_curve[not_cloudy], phot_results.airmass[not_cloudy], phot_results.\n ycentroids[:, 0]]'], {}), '([light_curve[not_cloudy], phot_results.airmass[not_cloudy],\n phot_results.ycentroids[:, 0]])\n', (2248, 2344), True, 'import numpy as np\n'), ((2827, 2862), 'toolkit.transit_model_b', 'transit_model_b', (['phot_results.times'], {}), '(phot_results.times)\n', (2842, 2862), False, 'from toolkit import generate_master_flat_and_dark, photometry, PhotometryResults, PCA_light_curve, params_b, transit_model_b\n'), ((1020, 1052), 'os.path.exists', 'os.path.exists', (['master_dark_path'], {}), '(master_dark_path)\n', (1034, 1052), False, 'import os\n'), ((1060, 1092), 'os.path.exists', 'os.path.exists', (['master_flat_path'], {}), '(master_flat_path)\n', (1074, 1092), False, 'import os\n'), ((1286, 1313), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (1300, 1313), False, 'import os\n'), ((2178, 2200), 'numpy.median', 'np.median', (['target_flux'], {}), '(target_flux)\n', (2187, 2200), True, 'import numpy as np\n'), ((2395, 2442), 'toolkit.transit_model_b', 'transit_model_b', (['phot_results.times[not_cloudy]'], {}), '(phot_results.times[not_cloudy])\n', (2410, 2442), False, 'from toolkit import generate_master_flat_and_dark, photometry, PhotometryResults, PCA_light_curve, params_b, transit_model_b\n'), ((3097, 3185), 'numpy.vstack', 'np.vstack', (['[phot_results.times, detrended_light_curve, phot_results.fluxes[:, 0, 0]]'], {}), '([phot_results.times, detrended_light_curve, phot_results.fluxes[:,\n 0, 0]])\n', (3106, 3185), True, 'import numpy as np\n')] |
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Numba-compiled functions.
Provides an arsenal of Numba-compiled functions that are used by accessors
and in many other parts of the backtesting pipeline, such as technical indicators.
These only accept NumPy arrays and other Numba-compatible types.
```pycon
>>> import numpy as np
>>> import vectorbt as vbt
>>> # vectorbt.signals.nb.pos_rank_nb
>>> vbt.signals.nb.pos_rank_nb(np.array([False, True, True, True, False])[:, None])[:, 0]
[-1 0 1 2 -1]
```
!!! note
vectorbt treats matrices as first-class citizens and expects input arrays to be
2-dim, unless function has suffix `_1d` or is meant to be input to another function.
Data is processed along index (axis 0).
All functions passed as argument should be Numba-compiled.
Returned indices should be absolute."""
import numpy as np
from numba import njit
from vectorbt import _typing as tp
from vectorbt.base.reshape_fns import flex_select_auto_nb
from vectorbt.generic.enums import range_dt, RangeStatus
from vectorbt.signals.enums import StopType
from vectorbt.utils.array_ import uniform_summing_to_one_nb, rescale_float_to_int_nb, renormalize_nb
# ############# Generation ############# #
@njit
def generate_nb(shape: tp.Shape,
pick_first: bool,
choice_func_nb: tp.ChoiceFunc, *args) -> tp.Array2d:
"""Create a boolean matrix of `shape` and pick signals using `choice_func_nb`.
Args:
shape (array): Target shape.
pick_first (bool): Whether to pick the first signal out of all returned by `choice_func_nb`.
choice_func_nb (callable): Choice function.
`choice_func_nb` should accept index of the start of the range `from_i`,
index of the end of the range `to_i`, index of the column `col`, and `*args`.
It should return an array of indices from `[from_i, to_i)` (can be empty).
*args: Arguments passed to `choice_func_nb`.
Usage:
```pycon
>>> from numba import njit
>>> import numpy as np
>>> from vectorbt.signals.nb import generate_nb
>>> @njit
... def choice_func_nb(from_i, to_i, col):
... return np.array([from_i + col])
>>> generate_nb((5, 3), choice_func_nb)
[[ True False False]
[False True False]
[False False True]
[False False False]
[False False False]]
```
"""
out = np.full(shape, False, dtype=np.bool_)
for col in range(out.shape[1]):
idxs = choice_func_nb(0, shape[0], col, *args)
if len(idxs) == 0:
continue
if pick_first:
first_i = idxs[0]
if first_i < 0 or first_i >= shape[0]:
raise ValueError("First returned index is out of bounds")
out[first_i, col] = True
else:
if np.any(idxs < 0) or np.any(idxs >= shape[0]):
raise ValueError("Returned indices are out of bounds")
out[idxs, col] = True
return out
@njit
def generate_ex_nb(entries: tp.Array2d,
wait: int,
until_next: bool,
skip_until_exit: bool,
pick_first: bool,
exit_choice_func_nb: tp.ChoiceFunc, *args) -> tp.Array2d:
"""Pick exit signals using `exit_choice_func_nb` after each signal in `entries`.
Args:
entries (array): Boolean array with entry signals.
wait (int): Number of ticks to wait before placing exits.
!!! note
Setting `wait` to 0 or False may result in two signals at one bar.
until_next (int): Whether to place signals up to the next entry signal.
!!! note
Setting it to False makes it difficult to tell which exit belongs to which entry.
skip_until_exit (bool): Whether to skip processing entry signals until the next exit.
Has only effect when `until_next` is disabled.
!!! note
Setting it to True makes it difficult to tell which exit belongs to which entry.
pick_first (bool): Whether to pick the first signal out of all returned by `exit_choice_func_nb`.
exit_choice_func_nb (callable): Exit choice function.
See `choice_func_nb` in `generate_nb`.
*args (callable): Arguments passed to `exit_choice_func_nb`.
"""
exits = np.full_like(entries, False)
for col in range(entries.shape[1]):
entry_idxs = np.flatnonzero(entries[:, col])
last_exit_i = -1
for i in range(entry_idxs.shape[0]):
# Calculate the range to choose from
if skip_until_exit and entry_idxs[i] <= last_exit_i:
continue
from_i = entry_idxs[i] + wait
if i < entry_idxs.shape[0] - 1 and until_next:
to_i = entry_idxs[i + 1]
else:
to_i = entries.shape[0]
if to_i > from_i:
# Run the UDF
idxs = exit_choice_func_nb(from_i, to_i, col, *args)
if len(idxs) == 0:
continue
if pick_first:
first_i = idxs[0]
if first_i < from_i or first_i >= to_i:
raise ValueError("First returned index is out of bounds")
exits[first_i, col] = True
last_exit_i = first_i
else:
if np.any(idxs < from_i) or np.any(idxs >= to_i):
raise ValueError("Returned indices are out of bounds")
exits[idxs, col] = True
last_exit_i = idxs[-1]
return exits
@njit
def generate_enex_nb(shape: tp.Shape,
entry_wait: int,
exit_wait: int,
entry_pick_first: bool,
exit_pick_first: bool,
entry_choice_func_nb: tp.ChoiceFunc,
entry_args: tp.Args,
exit_choice_func_nb: tp.ChoiceFunc,
exit_args: tp.Args) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Pick entry signals using `entry_choice_func_nb` and exit signals using
`exit_choice_func_nb` one after another.
Args:
shape (array): Target shape.
entry_wait (int): Number of ticks to wait before placing entries.
!!! note
Setting `entry_wait` to 0 or False assumes that both entry and exit can be processed
within the same bar, and exit can be processed before entry.
exit_wait (int): Number of ticks to wait before placing exits.
!!! note
Setting `exit_wait` to 0 or False assumes that both entry and exit can be processed
within the same bar, and entry can be processed before exit.
entry_pick_first (bool): Whether to pick the first entry out of all returned by `entry_choice_func_nb`.
exit_pick_first (bool): Whether to pick the first exit out of all returned by `exit_choice_func_nb`.
Setting it to False acts similarly to setting `skip_until_exit` to True in `generate_ex_nb`.
entry_choice_func_nb (callable): Entry choice function.
See `choice_func_nb` in `generate_nb`.
entry_args (tuple): Arguments unpacked and passed to `entry_choice_func_nb`.
exit_choice_func_nb (callable): Exit choice function.
See `choice_func_nb` in `generate_nb`.
exit_args (tuple): Arguments unpacked and passed to `exit_choice_func_nb`.
"""
entries = np.full(shape, False)
exits = np.full(shape, False)
if entry_wait == 0 and exit_wait == 0:
raise ValueError("entry_wait and exit_wait cannot be both 0")
for col in range(shape[1]):
prev_prev_i = -2
prev_i = -1
i = 0
while True:
to_i = shape[0]
# Cannot assign two functions to a var in numba
if i % 2 == 0:
if i == 0:
from_i = 0
else:
from_i = prev_i + entry_wait
if from_i >= to_i:
break
idxs = entry_choice_func_nb(from_i, to_i, col, *entry_args)
a = entries
pick_first = entry_pick_first
else:
from_i = prev_i + exit_wait
if from_i >= to_i:
break
idxs = exit_choice_func_nb(from_i, to_i, col, *exit_args)
a = exits
pick_first = exit_pick_first
if len(idxs) == 0:
break
first_i = idxs[0]
if first_i == prev_i == prev_prev_i:
raise ValueError("Infinite loop detected")
if first_i < from_i:
raise ValueError("First index is out of bounds")
if pick_first:
# Consider only the first signal
if first_i >= to_i:
raise ValueError("First index is out of bounds")
a[first_i, col] = True
prev_prev_i = prev_i
prev_i = first_i
i += 1
else:
# Consider all signals
last_i = idxs[-1]
if last_i >= to_i:
raise ValueError("Last index is out of bounds")
a[idxs, col] = True
prev_prev_i = prev_i
prev_i = last_i
i += 1
return entries, exits
# ############# Filtering ############# #
@njit(cache=True)
def clean_enex_1d_nb(entries: tp.Array1d,
exits: tp.Array1d,
entry_first: bool) -> tp.Tuple[tp.Array1d, tp.Array1d]:
"""Clean entry and exit arrays by picking the first signal out of each.
Entry signal must be picked first. If both signals are present, selects none."""
entries_out = np.full(entries.shape, False, dtype=np.bool_)
exits_out = np.full(exits.shape, False, dtype=np.bool_)
phase = -1
for i in range(entries.shape[0]):
if entries[i] and exits[i]:
continue
if entries[i]:
if phase == -1 or phase == 0:
phase = 1
entries_out[i] = True
if exits[i]:
if (not entry_first and phase == -1) or phase == 1:
phase = 0
exits_out[i] = True
return entries_out, exits_out
@njit(cache=True)
def clean_enex_nb(entries: tp.Array2d,
exits: tp.Array2d,
entry_first: bool) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""2-dim version of `clean_enex_1d_nb`."""
entries_out = np.empty(entries.shape, dtype=np.bool_)
exits_out = np.empty(exits.shape, dtype=np.bool_)
for col in range(entries.shape[1]):
entries_out[:, col], exits_out[:, col] = clean_enex_1d_nb(entries[:, col], exits[:, col], entry_first)
return entries_out, exits_out
# ############# Random ############# #
@njit(cache=True)
def rand_choice_nb(from_i: int, to_i: int, col: int, n: tp.MaybeArray[int]) -> tp.Array1d:
"""`choice_func_nb` to randomly pick `n` values from range `[from_i, to_i)`.
`n` uses flexible indexing."""
ns = np.asarray(n)
size = min(to_i - from_i, flex_select_auto_nb(ns, 0, col, True))
return from_i + np.random.choice(to_i - from_i, size=size, replace=False)
@njit
def generate_rand_nb(shape: tp.Shape, n: tp.MaybeArray[int], seed: tp.Optional[int] = None) -> tp.Array2d:
"""Create a boolean matrix of `shape` and pick a number of signals randomly.
Specify `seed` to make output deterministic.
See `rand_choice_nb`."""
if seed is not None:
np.random.seed(seed)
return generate_nb(
shape,
False,
rand_choice_nb, n
)
@njit(cache=True)
def rand_by_prob_choice_nb(from_i: int,
to_i: int,
col: int,
prob: tp.MaybeArray[float],
pick_first: bool,
temp_idx_arr: tp.Array1d,
flex_2d: bool) -> tp.Array1d:
"""`choice_func_nb` to randomly pick values from range `[from_i, to_i)` with probability `prob`.
`prob` uses flexible indexing."""
probs = np.asarray(prob)
j = 0
for i in range(from_i, to_i):
if np.random.uniform(0, 1) < flex_select_auto_nb(probs, i, col, flex_2d): # [0, 1)
temp_idx_arr[j] = i
j += 1
if pick_first:
break
return temp_idx_arr[:j]
@njit
def generate_rand_by_prob_nb(shape: tp.Shape,
prob: tp.MaybeArray[float],
pick_first: bool,
flex_2d: bool,
seed: tp.Optional[int] = None) -> tp.Array2d:
"""Create a boolean matrix of `shape` and pick signals randomly by probability `prob`.
`prob` should be a 2-dim array of shape `shape`.
Specify `seed` to make output deterministic.
See `rand_by_prob_choice_nb`."""
if seed is not None:
np.random.seed(seed)
temp_idx_arr = np.empty((shape[0],), dtype=np.int_)
return generate_nb(
shape,
pick_first,
rand_by_prob_choice_nb, prob, pick_first, temp_idx_arr, flex_2d
)
# ############# Random exits ############# #
@njit
def generate_rand_ex_nb(entries: tp.Array2d,
wait: int,
until_next: bool,
skip_until_exit: bool,
seed: tp.Optional[int] = None) -> tp.Array2d:
"""Pick an exit after each entry in `entries`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
return generate_ex_nb(
entries,
wait,
until_next,
skip_until_exit,
True,
rand_choice_nb, 1
)
@njit
def generate_rand_ex_by_prob_nb(entries: tp.Array2d,
prob: tp.MaybeArray[float],
wait: int,
until_next: bool,
skip_until_exit: bool,
flex_2d: bool,
seed: tp.Optional[int] = None) -> tp.Array2d:
"""Pick an exit after each entry in `entries` by probability `prob`.
`prob` should be a 2-dim array of shape `shape`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_ex_nb(
entries,
wait,
until_next,
skip_until_exit,
True,
rand_by_prob_choice_nb, prob, True, temp_idx_arr, flex_2d
)
@njit
def generate_rand_enex_nb(shape: tp.Shape,
n: tp.MaybeArray[int],
entry_wait: int,
exit_wait: int,
seed: tp.Optional[int] = None) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Pick a number of entries and the same number of exits one after another.
Respects `entry_wait` and `exit_wait` constraints through a number of tricks.
Tries to mimic a uniform distribution as much as possible.
The idea is the following: with constraints, there is some fixed amount of total
space required between first entry and last exit. Upscale this space in a way that
distribution of entries and exit is similar to a uniform distribution. This means
randomizing the position of first entry, last exit, and all signals between them.
`n` uses flexible indexing.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
entries = np.full(shape, False)
exits = np.full(shape, False)
if entry_wait == 0 and exit_wait == 0:
raise ValueError("entry_wait and exit_wait cannot be both 0")
ns = np.asarray(n)
if entry_wait == 1 and exit_wait == 1:
# Basic case
both = generate_rand_nb(shape, ns * 2, seed=None)
for col in range(both.shape[1]):
both_idxs = np.flatnonzero(both[:, col])
entries[both_idxs[0::2], col] = True
exits[both_idxs[1::2], col] = True
else:
for col in range(shape[1]):
_n = flex_select_auto_nb(ns, 0, col, True)
if _n == 1:
entry_idx = np.random.randint(0, shape[0] - exit_wait)
entries[entry_idx, col] = True
else:
# Minimum range between two entries
min_range = entry_wait + exit_wait
# Minimum total range between first and last entry
min_total_range = min_range * (_n - 1)
if shape[0] < min_total_range + exit_wait + 1:
raise ValueError("Cannot take a larger sample than population")
# We should decide how much space should be allocate before first and after last entry
# Maximum space outside of min_total_range
max_free_space = shape[0] - min_total_range - 1
# If min_total_range is tiny compared to max_free_space, limit it
# otherwise we would have huge space before first and after last entry
# Limit it such as distribution of entries mimics uniform
free_space = min(max_free_space, 3 * shape[0] // (_n + 1))
# What about last exit? it requires exit_wait space
free_space -= exit_wait
# Now we need to distribute free space among three ranges:
# 1) before first, 2) between first and last added to min_total_range, 3) after last
# We do 2) such that min_total_range can freely expand to maximum
# We allocate twice as much for 3) as for 1) because an exit is missing
rand_floats = uniform_summing_to_one_nb(6)
chosen_spaces = rescale_float_to_int_nb(rand_floats, (0, free_space), free_space)
first_idx = chosen_spaces[0]
last_idx = shape[0] - np.sum(chosen_spaces[-2:]) - exit_wait - 1
# Selected range between first and last entry
total_range = last_idx - first_idx
# Maximum range between two entries within total_range
max_range = total_range - (_n - 2) * min_range
# Select random ranges within total_range
rand_floats = uniform_summing_to_one_nb(_n - 1)
chosen_ranges = rescale_float_to_int_nb(rand_floats, (min_range, max_range), total_range)
# Translate them into entries
entry_idxs = np.empty(_n, dtype=np.int_)
entry_idxs[0] = first_idx
entry_idxs[1:] = chosen_ranges
entry_idxs = np.cumsum(entry_idxs)
entries[entry_idxs, col] = True
# Generate exits
for col in range(shape[1]):
entry_idxs = np.flatnonzero(entries[:, col])
for j in range(len(entry_idxs)):
entry_i = entry_idxs[j] + exit_wait
if j < len(entry_idxs) - 1:
exit_i = entry_idxs[j + 1] - entry_wait
else:
exit_i = entries.shape[0] - 1
i = np.random.randint(exit_i - entry_i + 1)
exits[entry_i + i, col] = True
return entries, exits
def rand_enex_apply_nb(input_shape: tp.Shape,
n: tp.MaybeArray[int],
entry_wait: int,
exit_wait: int) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""`apply_func_nb` that calls `generate_rand_enex_nb`."""
return generate_rand_enex_nb(input_shape, n, entry_wait, exit_wait)
@njit
def generate_rand_enex_by_prob_nb(shape: tp.Shape,
entry_prob: tp.MaybeArray[float],
exit_prob: tp.MaybeArray[float],
entry_wait: int,
exit_wait: int,
entry_pick_first: bool,
exit_pick_first: bool,
flex_2d: bool,
seed: tp.Optional[int] = None) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Pick entries by probability `entry_prob` and exits by probability `exit_prob` one after another.
`entry_prob` and `exit_prob` should be 2-dim arrays of shape `shape`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
temp_idx_arr = np.empty((shape[0],), dtype=np.int_)
return generate_enex_nb(
shape,
entry_wait,
exit_wait,
entry_pick_first,
exit_pick_first,
rand_by_prob_choice_nb, (entry_prob, entry_pick_first, temp_idx_arr, flex_2d),
rand_by_prob_choice_nb, (exit_prob, exit_pick_first, temp_idx_arr, flex_2d)
)
# ############# Stop exits ############# #
@njit(cache=True)
def first_choice_nb(from_i: int, to_i: int, col: int, a: tp.Array2d) -> tp.Array1d:
"""`choice_func_nb` that returns the index of the first signal in `a`."""
out = np.empty((1,), dtype=np.int_)
for i in range(from_i, to_i):
if a[i, col]:
out[0] = i
return out
return out[:0] # empty
@njit(cache=True)
def stop_choice_nb(from_i: int,
to_i: int,
col: int,
ts: tp.ArrayLike,
stop: tp.MaybeArray[float],
trailing: tp.MaybeArray[bool],
wait: int,
pick_first: bool,
temp_idx_arr: tp.Array1d,
flex_2d: bool) -> tp.Array1d:
"""`choice_func_nb` that returns the indices of the stop being hit.
Args:
from_i (int): Index to start generation from (inclusive).
to_i (int): Index to run generation to (exclusive).
col (int): Current column.
ts (array of float): 2-dim time series array such as price.
stop (float or array_like): Stop value for stop loss.
Can be per frame, column, row, or element-wise. Set to `np.nan` to disable.
trailing (bool or array_like): Whether to use trailing stop.
Can be per frame, column, row, or element-wise. Set to False to disable.
wait (int): Number of ticks to wait before placing exits.
Setting False or 0 may result in two signals at one bar.
!!! note
If `wait` is greater than 0, trailing stop won't update at bars that come before `from_i`.
pick_first (bool): Whether to stop as soon as the first exit signal is found.
temp_idx_arr (array of int): Empty integer array used to temporarily store indices.
flex_2d (bool): See `vectorbt.base.reshape_fns.flex_select_auto_nb`."""
j = 0
init_i = from_i - wait
init_ts = flex_select_auto_nb(ts, init_i, col, flex_2d)
init_stop = flex_select_auto_nb(np.asarray(stop), init_i, col, flex_2d)
init_trailing = flex_select_auto_nb(np.asarray(trailing), init_i, col, flex_2d)
max_high = min_low = init_ts
for i in range(from_i, to_i):
if not np.isnan(init_stop):
if init_trailing:
if init_stop >= 0:
# Trailing stop buy
curr_stop_price = min_low * (1 + abs(init_stop))
else:
# Trailing stop sell
curr_stop_price = max_high * (1 - abs(init_stop))
else:
curr_stop_price = init_ts * (1 + init_stop)
# Check if stop price is within bar
curr_ts = flex_select_auto_nb(ts, i, col, flex_2d)
if not np.isnan(init_stop):
if init_stop >= 0:
exit_signal = curr_ts >= curr_stop_price
else:
exit_signal = curr_ts <= curr_stop_price
if exit_signal:
temp_idx_arr[j] = i
j += 1
if pick_first:
return temp_idx_arr[:1]
# Keep track of lowest low and highest high if trailing
if init_trailing:
if curr_ts < min_low:
min_low = curr_ts
elif curr_ts > max_high:
max_high = curr_ts
return temp_idx_arr[:j]
@njit
def generate_stop_ex_nb(entries: tp.Array2d,
ts: tp.ArrayLike,
stop: tp.MaybeArray[float],
trailing: tp.MaybeArray[bool],
wait: int,
until_next: bool,
skip_until_exit: bool,
pick_first: bool,
flex_2d: bool) -> tp.Array2d:
"""Generate using `generate_ex_nb` and `stop_choice_nb`.
Usage:
* Generate trailing stop loss and take profit signals for 10%.
```pycon
>>> import numpy as np
>>> from vectorbt.signals.nb import generate_stop_ex_nb
>>> entries = np.asarray([False, True, False, False, False])[:, None]
>>> ts = np.asarray([1, 2, 3, 2, 1])[:, None]
>>> generate_stop_ex_nb(entries, ts, -0.1, True, 1, True, True)
array([[False],
[False],
[False],
[ True],
[False]])
>>> generate_stop_ex_nb(entries, ts, 0.1, False, 1, True, True)
array([[False],
[False],
[ True],
[False],
[False]])
```
"""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_ex_nb(
entries,
wait,
until_next,
skip_until_exit,
pick_first,
stop_choice_nb,
ts,
stop,
trailing,
wait,
pick_first,
temp_idx_arr,
flex_2d
)
@njit
def generate_stop_enex_nb(entries: tp.Array2d,
ts: tp.Array,
stop: tp.MaybeArray[float],
trailing: tp.MaybeArray[bool],
entry_wait: int,
exit_wait: int,
pick_first: bool,
flex_2d: bool) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Generate one after another using `generate_enex_nb` and `stop_choice_nb`.
Returns two arrays: new entries and exits.
!!! note
Has the same logic as calling `generate_stop_ex_nb` with `skip_until_exit=True`, but
removes all entries that come before the next exit."""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_enex_nb(
entries.shape,
entry_wait,
exit_wait,
True,
pick_first,
first_choice_nb, (entries,),
stop_choice_nb, (ts, stop, trailing, exit_wait, pick_first, temp_idx_arr, flex_2d)
)
@njit(cache=True)
def ohlc_stop_choice_nb(from_i: int,
to_i: int,
col: int,
open: tp.ArrayLike,
high: tp.ArrayLike,
low: tp.ArrayLike,
close: tp.ArrayLike,
stop_price_out: tp.Array2d,
stop_type_out: tp.Array2d,
sl_stop: tp.MaybeArray[float],
sl_trail: tp.MaybeArray[bool],
tp_stop: tp.MaybeArray[float],
reverse: tp.MaybeArray[bool],
is_open_safe: bool,
wait: int,
pick_first: bool,
temp_idx_arr: tp.Array1d,
flex_2d: bool) -> tp.Array1d:
"""`choice_func_nb` that returns the indices of the stop price being hit within OHLC.
Compared to `stop_choice_nb`, takes into account the whole bar, can check for both
(trailing) stop loss and take profit simultaneously, and tracks hit price and stop type.
!!! note
We don't have intra-candle data. If there was a huge price fluctuation in both directions,
we can't determine whether SL was triggered before TP and vice versa. So some assumptions
need to be made: 1) trailing stop can only be based on previous close/high, and
2) we pessimistically assume that SL comes before TP.
Args:
col (int): Current column.
from_i (int): Index to start generation from (inclusive).
to_i (int): Index to run generation to (exclusive).
open (array of float): Entry price such as open or previous close.
high (array of float): High price.
low (array of float): Low price.
close (array of float): Close price.
stop_price_out (array of float): Array where hit price of each exit will be stored.
stop_type_out (array of int): Array where stop type of each exit will be stored.
0 for stop loss, 1 for take profit.
sl_stop (float or array_like): Percentage value for stop loss.
Can be per frame, column, row, or element-wise. Set to `np.nan` to disable.
sl_trail (bool or array_like): Whether `sl_stop` is trailing.
Can be per frame, column, row, or element-wise. Set to False to disable.
tp_stop (float or array_like): Percentage value for take profit.
Can be per frame, column, row, or element-wise. Set to `np.nan` to disable.
reverse (bool or array_like): Whether to do the opposite, i.e.: prices are followed downwards.
is_open_safe (bool): Whether entry price comes right at or before open.
If True and wait is 0, can use high/low at entry bar. Otherwise uses only close.
wait (int): Number of ticks to wait before placing exits.
Setting False or 0 may result in entry and exit signal at one bar.
!!! note
If `wait` is greater than 0, even with `is_open_safe` set to True,
trailing stop won't update at bars that come before `from_i`.
pick_first (bool): Whether to stop as soon as the first exit signal is found.
temp_idx_arr (array of int): Empty integer array used to temporarily store indices.
flex_2d (bool): See `vectorbt.base.reshape_fns.flex_select_auto_nb`.
"""
init_i = from_i - wait
init_open = flex_select_auto_nb(open, init_i, col, flex_2d)
init_sl_stop = flex_select_auto_nb(np.asarray(sl_stop), init_i, col, flex_2d)
if init_sl_stop < 0:
raise ValueError("Stop value must be 0 or greater")
init_sl_trail = flex_select_auto_nb(np.asarray(sl_trail), init_i, col, flex_2d)
init_tp_stop = flex_select_auto_nb(np.asarray(tp_stop), init_i, col, flex_2d)
if init_tp_stop < 0:
raise ValueError("Stop value must be 0 or greater")
init_reverse = flex_select_auto_nb(np.asarray(reverse), init_i, col, flex_2d)
max_p = min_p = init_open
j = 0
for i in range(from_i, to_i):
# Resolve current bar
_open = flex_select_auto_nb(open, i, col, flex_2d)
_high = flex_select_auto_nb(high, i, col, flex_2d)
_low = flex_select_auto_nb(low, i, col, flex_2d)
_close = flex_select_auto_nb(close, i, col, flex_2d)
if np.isnan(_open):
_open = _close
if np.isnan(_low):
_low = min(_open, _close)
if np.isnan(_high):
_high = max(_open, _close)
# Calculate stop price
if not np.isnan(init_sl_stop):
if init_sl_trail:
if init_reverse:
curr_sl_stop_price = min_p * (1 + init_sl_stop)
else:
curr_sl_stop_price = max_p * (1 - init_sl_stop)
else:
if init_reverse:
curr_sl_stop_price = init_open * (1 + init_sl_stop)
else:
curr_sl_stop_price = init_open * (1 - init_sl_stop)
if not np.isnan(init_tp_stop):
if init_reverse:
curr_tp_stop_price = init_open * (1 - init_tp_stop)
else:
curr_tp_stop_price = init_open * (1 + init_tp_stop)
# Check if stop price is within bar
if i > init_i or is_open_safe:
# is_open_safe means open is either open or any other price before it
# so it's safe to use high/low at entry bar
curr_high = _high
curr_low = _low
else:
# Otherwise, we can only use close price at entry bar
curr_high = curr_low = _close
exit_signal = False
if not np.isnan(init_sl_stop):
if (not init_reverse and curr_low <= curr_sl_stop_price) or \
(init_reverse and curr_high >= curr_sl_stop_price):
exit_signal = True
stop_price_out[i, col] = curr_sl_stop_price
if init_sl_trail:
stop_type_out[i, col] = StopType.TrailStop
else:
stop_type_out[i, col] = StopType.StopLoss
if not exit_signal and not np.isnan(init_tp_stop):
if (not init_reverse and curr_high >= curr_tp_stop_price) or \
(init_reverse and curr_low <= curr_tp_stop_price):
exit_signal = True
stop_price_out[i, col] = curr_tp_stop_price
stop_type_out[i, col] = StopType.TakeProfit
if exit_signal:
temp_idx_arr[j] = i
j += 1
if pick_first:
return temp_idx_arr[:1]
# Keep track of highest high if trailing
if init_sl_trail:
if curr_low < min_p:
min_p = curr_low
if curr_high > max_p:
max_p = curr_high
return temp_idx_arr[:j]
@njit
def generate_ohlc_stop_ex_nb(entries: tp.Array2d,
open: tp.ArrayLike,
high: tp.ArrayLike,
low: tp.ArrayLike,
close: tp.ArrayLike,
stop_price_out: tp.Array2d,
stop_type_out: tp.Array2d,
sl_stop: tp.MaybeArray[float],
sl_trail: tp.MaybeArray[bool],
tp_stop: tp.MaybeArray[float],
reverse: tp.MaybeArray[bool],
is_open_safe: bool,
wait: int,
until_next: bool,
skip_until_exit: bool,
pick_first: bool,
flex_2d: bool) -> tp.Array2d:
"""Generate using `generate_ex_nb` and `ohlc_stop_choice_nb`.
Usage:
* Generate trailing stop loss and take profit signals for 10%.
Illustrates how exit signal can be generated within the same bar as entry.
```pycon
>>> import numpy as np
>>> from vectorbt.signals.nb import generate_ohlc_stop_ex_nb
>>> entries = np.asarray([True, False, True, False, False])[:, None]
>>> entry_price = np.asarray([10, 11, 12, 11, 10])[:, None]
>>> high_price = entry_price + 1
>>> low_price = entry_price - 1
>>> close_price = entry_price
>>> stop_price_out = np.full_like(entries, np.nan, dtype=np.float_)
>>> stop_type_out = np.full_like(entries, -1, dtype=np.int_)
>>> generate_ohlc_stop_ex_nb(
... entries=entries,
... open=entry_price,
... high=high_price,
... low=low_price,
... close=close_price,
... stop_price_out=stop_price_out,
... stop_type_out=stop_type_out,
... sl_stop=0.1,
... sl_trail=True,
... tp_stop=0.1,
... reverse=False,
... is_open_safe=True,
... wait=1,
... until_next=True,
... skip_until_exit=False,
... pick_first=True,
... flex_2d=True
... )
array([[ True],
[False],
[False],
[ True],
[False]])
>>> stop_price_out
array([[ 9. ], << trailing SL from 10 (entry_price)
[ nan],
[ nan],
[11.7], << trailing SL from 13 (high_price)
[ nan]])
>>> stop_type_out
array([[ 1],
[-1],
[-1],
[ 1],
[-1]])
```
Note that if `is_open_safe` was False, the first exit would be executed at the second bar.
This is because we don't know whether the entry price comes before the high and low price
at the first bar, and so the trailing stop isn't triggered for the low price of 9.0.
"""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_ex_nb(
entries,
wait,
until_next,
skip_until_exit,
pick_first,
ohlc_stop_choice_nb,
open,
high,
low,
close,
stop_price_out,
stop_type_out,
sl_stop,
sl_trail,
tp_stop,
reverse,
is_open_safe,
wait,
pick_first,
temp_idx_arr,
flex_2d
)
@njit
def generate_ohlc_stop_enex_nb(entries: tp.Array2d,
open: tp.ArrayLike,
high: tp.ArrayLike,
low: tp.ArrayLike,
close: tp.ArrayLike,
stop_price_out: tp.Array2d,
stop_type_out: tp.Array2d,
sl_stop: tp.MaybeArray[float],
sl_trail: tp.MaybeArray[bool],
tp_stop: tp.MaybeArray[float],
reverse: tp.MaybeArray[bool],
is_open_safe: bool,
entry_wait: int,
exit_wait: int,
pick_first: bool,
flex_2d: bool) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Generate one after another using `generate_enex_nb` and `ohlc_stop_choice_nb`.
Returns two arrays: new entries and exits.
!!! note
Has the same logic as calling `generate_ohlc_stop_ex_nb` with `skip_until_exit=True`, but
removes all entries that come before the next exit."""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_enex_nb(
entries.shape,
entry_wait,
exit_wait,
True,
pick_first,
first_choice_nb, (entries,),
ohlc_stop_choice_nb, (
open,
high,
low,
close,
stop_price_out,
stop_type_out,
sl_stop,
sl_trail,
tp_stop,
reverse,
is_open_safe,
exit_wait,
pick_first,
temp_idx_arr,
flex_2d
)
)
# ############# Map and reduce ranges ############# #
@njit(cache=True)
def between_ranges_nb(a: tp.Array2d) -> tp.RecordArray:
"""Create a record of type `vectorbt.generic.enums.range_dt` for each range between two signals in `a`."""
range_records = np.empty(a.shape[0] * a.shape[1], dtype=range_dt)
ridx = 0
for col in range(a.shape[1]):
a_idxs = np.flatnonzero(a[:, col])
if a_idxs.shape[0] > 1:
for j in range(1, a_idxs.shape[0]):
from_i = a_idxs[j - 1]
to_i = a_idxs[j]
range_records[ridx]['id'] = ridx
range_records[ridx]['col'] = col
range_records[ridx]['start_idx'] = from_i
range_records[ridx]['end_idx'] = to_i
range_records[ridx]['status'] = RangeStatus.Closed
ridx += 1
return range_records[:ridx]
@njit(cache=True)
def between_two_ranges_nb(a: tp.Array2d, b: tp.Array2d, from_other: bool = False) -> tp.RecordArray:
"""Create a record of type `vectorbt.generic.enums.range_dt` for each range between two signals in `a` and `b`.
If `from_other` is False, returns ranges from each in `a` to the succeeding in `b`.
Otherwise, returns ranges from each in `b` to the preceding in `a`.
When `a` and `b` overlap (two signals at the same time), the distance between overlapping
signals is still considered and `from_i` would match `to_i`."""
range_records = np.empty(a.shape[0] * a.shape[1], dtype=range_dt)
ridx = 0
for col in range(a.shape[1]):
a_idxs = np.flatnonzero(a[:, col])
if a_idxs.shape[0] > 0:
b_idxs = np.flatnonzero(b[:, col])
if b_idxs.shape[0] > 0:
if from_other:
for j, to_i in enumerate(b_idxs):
valid_a_idxs = a_idxs[a_idxs <= to_i]
if len(valid_a_idxs) > 0:
from_i = valid_a_idxs[-1] # preceding in a
range_records[ridx]['id'] = ridx
range_records[ridx]['col'] = col
range_records[ridx]['start_idx'] = from_i
range_records[ridx]['end_idx'] = to_i
range_records[ridx]['status'] = RangeStatus.Closed
ridx += 1
else:
for j, from_i in enumerate(a_idxs):
valid_b_idxs = b_idxs[b_idxs >= from_i]
if len(valid_b_idxs) > 0:
to_i = valid_b_idxs[0] # succeeding in b
range_records[ridx]['id'] = ridx
range_records[ridx]['col'] = col
range_records[ridx]['start_idx'] = from_i
range_records[ridx]['end_idx'] = to_i
range_records[ridx]['status'] = RangeStatus.Closed
ridx += 1
return range_records[:ridx]
@njit(cache=True)
def partition_ranges_nb(a: tp.Array2d) -> tp.RecordArray:
"""Create a record of type `vectorbt.generic.enums.range_dt` for each partition of signals in `a`."""
range_records = np.empty(a.shape[0] * a.shape[1], dtype=range_dt)
ridx = 0
for col in range(a.shape[1]):
is_partition = False
from_i = -1
for i in range(a.shape[0]):
if a[i, col]:
if not is_partition:
from_i = i
is_partition = True
elif is_partition:
to_i = i
range_records[ridx]['id'] = ridx
range_records[ridx]['col'] = col
range_records[ridx]['start_idx'] = from_i
range_records[ridx]['end_idx'] = to_i
range_records[ridx]['status'] = RangeStatus.Closed
ridx += 1
is_partition = False
if i == a.shape[0] - 1:
if is_partition:
to_i = a.shape[0] - 1
range_records[ridx]['id'] = ridx
range_records[ridx]['col'] = col
range_records[ridx]['start_idx'] = from_i
range_records[ridx]['end_idx'] = to_i
range_records[ridx]['status'] = RangeStatus.Open
ridx += 1
return range_records[:ridx]
@njit(cache=True)
def between_partition_ranges_nb(a: tp.Array2d) -> tp.RecordArray:
"""Create a record of type `vectorbt.generic.enums.range_dt` for each range between two partitions in `a`."""
range_records = np.empty(a.shape[0] * a.shape[1], dtype=range_dt)
ridx = 0
for col in range(a.shape[1]):
is_partition = False
from_i = -1
for i in range(a.shape[0]):
if a[i, col]:
if not is_partition and from_i != -1:
to_i = i
range_records[ridx]['id'] = ridx
range_records[ridx]['col'] = col
range_records[ridx]['start_idx'] = from_i
range_records[ridx]['end_idx'] = to_i
range_records[ridx]['status'] = RangeStatus.Closed
ridx += 1
is_partition = True
from_i = i
else:
is_partition = False
return range_records[:ridx]
# ############# Ranking ############# #
@njit
def rank_nb(a: tp.Array2d,
reset_by: tp.Optional[tp.Array1d],
after_false: bool,
rank_func_nb: tp.RankFunc, *args) -> tp.Array2d:
"""Rank each signal using `rank_func_nb`.
Applies `rank_func_nb` on each True value. Should accept index of the row,
index of the column, index of the last reset signal, index of the end of the previous partition,
index of the start of the current partition, and `*args`. Should return -1 for no rank, otherwise 0 or greater.
Setting `after_false` to True will disregard the first partition of True values
if there is no False value before them."""
out = np.full(a.shape, -1, dtype=np.int_)
for col in range(a.shape[1]):
reset_i = 0
prev_part_end_i = -1
part_start_i = -1
in_partition = False
false_seen = not after_false
for i in range(a.shape[0]):
if reset_by is not None:
if reset_by[i, col]:
reset_i = i
if a[i, col] and not (after_false and not false_seen):
if not in_partition:
part_start_i = i
in_partition = True
out[i, col] = rank_func_nb(i, col, reset_i, prev_part_end_i, part_start_i, *args)
elif not a[i, col]:
if in_partition:
prev_part_end_i = i - 1
in_partition = False
false_seen = True
return out
@njit(cache=True)
def sig_pos_rank_nb(i: int, col: int, reset_i: int, prev_part_end_i: int, part_start_i: int,
sig_pos_temp: tp.Array1d, allow_gaps: bool) -> int:
"""`rank_func_nb` that returns the rank of each signal by its position in the partition."""
if reset_i > prev_part_end_i and max(reset_i, part_start_i) == i:
sig_pos_temp[col] = -1
elif not allow_gaps and part_start_i == i:
sig_pos_temp[col] = -1
sig_pos_temp[col] += 1
return sig_pos_temp[col]
@njit(cache=True)
def part_pos_rank_nb(i: int, col: int, reset_i: int, prev_part_end_i: int, part_start_i: int,
part_pos_temp: tp.Array1d) -> int:
"""`rank_func_nb` that returns the rank of each partition by its position in the series."""
if reset_i > prev_part_end_i and max(reset_i, part_start_i) == i:
part_pos_temp[col] = 0
elif part_start_i == i:
part_pos_temp[col] += 1
return part_pos_temp[col]
# ############# Index ############# #
@njit(cache=True)
def nth_index_1d_nb(a: tp.Array1d, n: int) -> int:
"""Get the index of the n-th True value.
!!! note
`n` starts with 0 and can be negative."""
if n >= 0:
found = -1
for i in range(a.shape[0]):
if a[i]:
found += 1
if found == n:
return i
else:
found = 0
for i in range(a.shape[0] - 1, -1, -1):
if a[i]:
found -= 1
if found == n:
return i
return -1
@njit(cache=True)
def nth_index_nb(a: tp.Array2d, n: int) -> tp.Array1d:
"""2-dim version of `nth_index_1d_nb`."""
out = np.empty(a.shape[1], dtype=np.int_)
for col in range(a.shape[1]):
out[col] = nth_index_1d_nb(a[:, col], n)
return out
@njit(cache=True)
def norm_avg_index_1d_nb(a: tp.Array1d) -> float:
"""Get mean index normalized to (-1, 1)."""
mean_index = np.mean(np.flatnonzero(a))
return renormalize_nb(mean_index, (0, len(a) - 1), (-1, 1))
@njit(cache=True)
def norm_avg_index_nb(a: tp.Array2d) -> tp.Array1d:
"""2-dim version of `norm_avg_index_1d_nb`."""
out = np.empty(a.shape[1], dtype=np.float_)
for col in range(a.shape[1]):
out[col] = norm_avg_index_1d_nb(a[:, col])
return out
| [
"numpy.full_like",
"numpy.random.choice",
"numpy.flatnonzero",
"numba.njit",
"numpy.asarray",
"numpy.any",
"vectorbt.utils.array_.uniform_summing_to_one_nb",
"numpy.sum",
"numpy.random.randint",
"numpy.empty",
"numpy.random.seed",
"numpy.isnan",
"numpy.random.uniform",
"vectorbt.utils.arra... | [((9752, 9768), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (9756, 9768), False, 'from numba import njit\n'), ((10639, 10655), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (10643, 10655), False, 'from numba import njit\n'), ((11195, 11211), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (11199, 11211), False, 'from numba import njit\n'), ((12009, 12025), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (12013, 12025), False, 'from numba import njit\n'), ((21347, 21363), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (21351, 21363), False, 'from numba import njit\n'), ((21699, 21715), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (21703, 21715), False, 'from numba import njit\n'), ((27304, 27320), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (27308, 27320), False, 'from numba import njit\n'), ((39656, 39672), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (39660, 39672), False, 'from numba import njit\n'), ((40491, 40507), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (40495, 40507), False, 'from numba import njit\n'), ((42641, 42657), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (42645, 42657), False, 'from numba import njit\n'), ((44022, 44038), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (44026, 44038), False, 'from numba import njit\n'), ((46536, 46552), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (46540, 46552), False, 'from numba import njit\n'), ((47052, 47068), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (47056, 47068), False, 'from numba import njit\n'), ((47549, 47565), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (47553, 47565), False, 'from numba import njit\n'), ((48105, 48121), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (48109, 48121), False, 'from numba import njit\n'), ((48370, 48386), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (48374, 48386), False, 'from numba import njit\n'), ((48596, 48612), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (48600, 48612), False, 'from numba import njit\n'), ((2575, 2612), 'numpy.full', 'np.full', (['shape', '(False)'], {'dtype': 'np.bool_'}), '(shape, False, dtype=np.bool_)\n', (2582, 2612), True, 'import numpy as np\n'), ((4542, 4570), 'numpy.full_like', 'np.full_like', (['entries', '(False)'], {}), '(entries, False)\n', (4554, 4570), True, 'import numpy as np\n'), ((7750, 7771), 'numpy.full', 'np.full', (['shape', '(False)'], {}), '(shape, False)\n', (7757, 7771), True, 'import numpy as np\n'), ((7784, 7805), 'numpy.full', 'np.full', (['shape', '(False)'], {}), '(shape, False)\n', (7791, 7805), True, 'import numpy as np\n'), ((10108, 10153), 'numpy.full', 'np.full', (['entries.shape', '(False)'], {'dtype': 'np.bool_'}), '(entries.shape, False, dtype=np.bool_)\n', (10115, 10153), True, 'import numpy as np\n'), ((10170, 10213), 'numpy.full', 'np.full', (['exits.shape', '(False)'], {'dtype': 'np.bool_'}), '(exits.shape, False, dtype=np.bool_)\n', (10177, 10213), True, 'import numpy as np\n'), ((10871, 10910), 'numpy.empty', 'np.empty', (['entries.shape'], {'dtype': 'np.bool_'}), '(entries.shape, dtype=np.bool_)\n', (10879, 10910), True, 'import numpy as np\n'), ((10927, 10964), 'numpy.empty', 'np.empty', (['exits.shape'], {'dtype': 'np.bool_'}), '(exits.shape, dtype=np.bool_)\n', (10935, 10964), True, 'import numpy as np\n'), ((11429, 11442), 'numpy.asarray', 'np.asarray', (['n'], {}), '(n)\n', (11439, 11442), True, 'import numpy as np\n'), ((12503, 12519), 'numpy.asarray', 'np.asarray', (['prob'], {}), '(prob)\n', (12513, 12519), True, 'import numpy as np\n'), ((13366, 13402), 'numpy.empty', 'np.empty', (['(shape[0],)'], {'dtype': 'np.int_'}), '((shape[0],), dtype=np.int_)\n', (13374, 13402), True, 'import numpy as np\n'), ((14786, 14830), 'numpy.empty', 'np.empty', (['(entries.shape[0],)'], {'dtype': 'np.int_'}), '((entries.shape[0],), dtype=np.int_)\n', (14794, 14830), True, 'import numpy as np\n'), ((16023, 16044), 'numpy.full', 'np.full', (['shape', '(False)'], {}), '(shape, False)\n', (16030, 16044), True, 'import numpy as np\n'), ((16057, 16078), 'numpy.full', 'np.full', (['shape', '(False)'], {}), '(shape, False)\n', (16064, 16078), True, 'import numpy as np\n'), ((16201, 16214), 'numpy.asarray', 'np.asarray', (['n'], {}), '(n)\n', (16211, 16214), True, 'import numpy as np\n'), ((20951, 20987), 'numpy.empty', 'np.empty', (['(shape[0],)'], {'dtype': 'np.int_'}), '((shape[0],), dtype=np.int_)\n', (20959, 20987), True, 'import numpy as np\n'), ((21536, 21565), 'numpy.empty', 'np.empty', (['(1,)'], {'dtype': 'np.int_'}), '((1,), dtype=np.int_)\n', (21544, 21565), True, 'import numpy as np\n'), ((23294, 23339), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['ts', 'init_i', 'col', 'flex_2d'], {}), '(ts, init_i, col, flex_2d)\n', (23313, 23339), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((25952, 25996), 'numpy.empty', 'np.empty', (['(entries.shape[0],)'], {'dtype': 'np.int_'}), '((entries.shape[0],), dtype=np.int_)\n', (25960, 25996), True, 'import numpy as np\n'), ((26997, 27041), 'numpy.empty', 'np.empty', (['(entries.shape[0],)'], {'dtype': 'np.int_'}), '((entries.shape[0],), dtype=np.int_)\n', (27005, 27041), True, 'import numpy as np\n'), ((30782, 30829), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['open', 'init_i', 'col', 'flex_2d'], {}), '(open, init_i, col, flex_2d)\n', (30801, 30829), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((37309, 37353), 'numpy.empty', 'np.empty', (['(entries.shape[0],)'], {'dtype': 'np.int_'}), '((entries.shape[0],), dtype=np.int_)\n', (37317, 37353), True, 'import numpy as np\n'), ((39012, 39056), 'numpy.empty', 'np.empty', (['(entries.shape[0],)'], {'dtype': 'np.int_'}), '((entries.shape[0],), dtype=np.int_)\n', (39020, 39056), True, 'import numpy as np\n'), ((39860, 39909), 'numpy.empty', 'np.empty', (['(a.shape[0] * a.shape[1])'], {'dtype': 'range_dt'}), '(a.shape[0] * a.shape[1], dtype=range_dt)\n', (39868, 39909), True, 'import numpy as np\n'), ((41069, 41118), 'numpy.empty', 'np.empty', (['(a.shape[0] * a.shape[1])'], {'dtype': 'range_dt'}), '(a.shape[0] * a.shape[1], dtype=range_dt)\n', (41077, 41118), True, 'import numpy as np\n'), ((42842, 42891), 'numpy.empty', 'np.empty', (['(a.shape[0] * a.shape[1])'], {'dtype': 'range_dt'}), '(a.shape[0] * a.shape[1], dtype=range_dt)\n', (42850, 42891), True, 'import numpy as np\n'), ((44239, 44288), 'numpy.empty', 'np.empty', (['(a.shape[0] * a.shape[1])'], {'dtype': 'range_dt'}), '(a.shape[0] * a.shape[1], dtype=range_dt)\n', (44247, 44288), True, 'import numpy as np\n'), ((45709, 45744), 'numpy.full', 'np.full', (['a.shape', '(-1)'], {'dtype': 'np.int_'}), '(a.shape, -1, dtype=np.int_)\n', (45716, 45744), True, 'import numpy as np\n'), ((48233, 48268), 'numpy.empty', 'np.empty', (['a.shape[1]'], {'dtype': 'np.int_'}), '(a.shape[1], dtype=np.int_)\n', (48241, 48268), True, 'import numpy as np\n'), ((48726, 48763), 'numpy.empty', 'np.empty', (['a.shape[1]'], {'dtype': 'np.float_'}), '(a.shape[1], dtype=np.float_)\n', (48734, 48763), True, 'import numpy as np\n'), ((4633, 4664), 'numpy.flatnonzero', 'np.flatnonzero', (['entries[:, col]'], {}), '(entries[:, col])\n', (4647, 4664), True, 'import numpy as np\n'), ((11473, 11510), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['ns', '(0)', 'col', '(True)'], {}), '(ns, 0, col, True)\n', (11492, 11510), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((11532, 11589), 'numpy.random.choice', 'np.random.choice', (['(to_i - from_i)'], {'size': 'size', 'replace': '(False)'}), '(to_i - from_i, size=size, replace=False)\n', (11548, 11589), True, 'import numpy as np\n'), ((11899, 11919), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11913, 11919), True, 'import numpy as np\n'), ((13326, 13346), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13340, 13346), True, 'import numpy as np\n'), ((13970, 13990), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13984, 13990), True, 'import numpy as np\n'), ((14746, 14766), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (14760, 14766), True, 'import numpy as np\n'), ((15988, 16008), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16002, 16008), True, 'import numpy as np\n'), ((20911, 20931), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (20925, 20931), True, 'import numpy as np\n'), ((23376, 23392), 'numpy.asarray', 'np.asarray', (['stop'], {}), '(stop)\n', (23386, 23392), True, 'import numpy as np\n'), ((23456, 23476), 'numpy.asarray', 'np.asarray', (['trailing'], {}), '(trailing)\n', (23466, 23476), True, 'import numpy as np\n'), ((24052, 24092), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['ts', 'i', 'col', 'flex_2d'], {}), '(ts, i, col, flex_2d)\n', (24071, 24092), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((30869, 30888), 'numpy.asarray', 'np.asarray', (['sl_stop'], {}), '(sl_stop)\n', (30879, 30888), True, 'import numpy as np\n'), ((31037, 31057), 'numpy.asarray', 'np.asarray', (['sl_trail'], {}), '(sl_trail)\n', (31047, 31057), True, 'import numpy as np\n'), ((31120, 31139), 'numpy.asarray', 'np.asarray', (['tp_stop'], {}), '(tp_stop)\n', (31130, 31139), True, 'import numpy as np\n'), ((31287, 31306), 'numpy.asarray', 'np.asarray', (['reverse'], {}), '(reverse)\n', (31297, 31306), True, 'import numpy as np\n'), ((31451, 31493), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['open', 'i', 'col', 'flex_2d'], {}), '(open, i, col, flex_2d)\n', (31470, 31493), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((31510, 31552), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['high', 'i', 'col', 'flex_2d'], {}), '(high, i, col, flex_2d)\n', (31529, 31552), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((31568, 31609), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['low', 'i', 'col', 'flex_2d'], {}), '(low, i, col, flex_2d)\n', (31587, 31609), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((31627, 31670), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['close', 'i', 'col', 'flex_2d'], {}), '(close, i, col, flex_2d)\n', (31646, 31670), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((31682, 31697), 'numpy.isnan', 'np.isnan', (['_open'], {}), '(_open)\n', (31690, 31697), True, 'import numpy as np\n'), ((31737, 31751), 'numpy.isnan', 'np.isnan', (['_low'], {}), '(_low)\n', (31745, 31751), True, 'import numpy as np\n'), ((31802, 31817), 'numpy.isnan', 'np.isnan', (['_high'], {}), '(_high)\n', (31810, 31817), True, 'import numpy as np\n'), ((39975, 40000), 'numpy.flatnonzero', 'np.flatnonzero', (['a[:, col]'], {}), '(a[:, col])\n', (39989, 40000), True, 'import numpy as np\n'), ((41184, 41209), 'numpy.flatnonzero', 'np.flatnonzero', (['a[:, col]'], {}), '(a[:, col])\n', (41198, 41209), True, 'import numpy as np\n'), ((48510, 48527), 'numpy.flatnonzero', 'np.flatnonzero', (['a'], {}), '(a)\n', (48524, 48527), True, 'import numpy as np\n'), ((12575, 12598), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (12592, 12598), True, 'import numpy as np\n'), ((12601, 12644), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['probs', 'i', 'col', 'flex_2d'], {}), '(probs, i, col, flex_2d)\n', (12620, 12644), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((16403, 16431), 'numpy.flatnonzero', 'np.flatnonzero', (['both[:, col]'], {}), '(both[:, col])\n', (16417, 16431), True, 'import numpy as np\n'), ((16591, 16628), 'vectorbt.base.reshape_fns.flex_select_auto_nb', 'flex_select_auto_nb', (['ns', '(0)', 'col', '(True)'], {}), '(ns, 0, col, True)\n', (16610, 16628), False, 'from vectorbt.base.reshape_fns import flex_select_auto_nb\n'), ((19304, 19335), 'numpy.flatnonzero', 'np.flatnonzero', (['entries[:, col]'], {}), '(entries[:, col])\n', (19318, 19335), True, 'import numpy as np\n'), ((23583, 23602), 'numpy.isnan', 'np.isnan', (['init_stop'], {}), '(init_stop)\n', (23591, 23602), True, 'import numpy as np\n'), ((24108, 24127), 'numpy.isnan', 'np.isnan', (['init_stop'], {}), '(init_stop)\n', (24116, 24127), True, 'import numpy as np\n'), ((31905, 31927), 'numpy.isnan', 'np.isnan', (['init_sl_stop'], {}), '(init_sl_stop)\n', (31913, 31927), True, 'import numpy as np\n'), ((32382, 32404), 'numpy.isnan', 'np.isnan', (['init_tp_stop'], {}), '(init_tp_stop)\n', (32390, 32404), True, 'import numpy as np\n'), ((33035, 33057), 'numpy.isnan', 'np.isnan', (['init_sl_stop'], {}), '(init_sl_stop)\n', (33043, 33057), True, 'import numpy as np\n'), ((41263, 41288), 'numpy.flatnonzero', 'np.flatnonzero', (['b[:, col]'], {}), '(b[:, col])\n', (41277, 41288), True, 'import numpy as np\n'), ((2997, 3013), 'numpy.any', 'np.any', (['(idxs < 0)'], {}), '(idxs < 0)\n', (3003, 3013), True, 'import numpy as np\n'), ((3017, 3041), 'numpy.any', 'np.any', (['(idxs >= shape[0])'], {}), '(idxs >= shape[0])\n', (3023, 3041), True, 'import numpy as np\n'), ((16681, 16723), 'numpy.random.randint', 'np.random.randint', (['(0)', '(shape[0] - exit_wait)'], {}), '(0, shape[0] - exit_wait)\n', (16698, 16723), True, 'import numpy as np\n'), ((18194, 18222), 'vectorbt.utils.array_.uniform_summing_to_one_nb', 'uniform_summing_to_one_nb', (['(6)'], {}), '(6)\n', (18219, 18222), False, 'from vectorbt.utils.array_ import uniform_summing_to_one_nb, rescale_float_to_int_nb, renormalize_nb\n'), ((18255, 18320), 'vectorbt.utils.array_.rescale_float_to_int_nb', 'rescale_float_to_int_nb', (['rand_floats', '(0, free_space)', 'free_space'], {}), '(rand_floats, (0, free_space), free_space)\n', (18278, 18320), False, 'from vectorbt.utils.array_ import uniform_summing_to_one_nb, rescale_float_to_int_nb, renormalize_nb\n'), ((18785, 18818), 'vectorbt.utils.array_.uniform_summing_to_one_nb', 'uniform_summing_to_one_nb', (['(_n - 1)'], {}), '(_n - 1)\n', (18810, 18818), False, 'from vectorbt.utils.array_ import uniform_summing_to_one_nb, rescale_float_to_int_nb, renormalize_nb\n'), ((18851, 18924), 'vectorbt.utils.array_.rescale_float_to_int_nb', 'rescale_float_to_int_nb', (['rand_floats', '(min_range, max_range)', 'total_range'], {}), '(rand_floats, (min_range, max_range), total_range)\n', (18874, 18924), False, 'from vectorbt.utils.array_ import uniform_summing_to_one_nb, rescale_float_to_int_nb, renormalize_nb\n'), ((19001, 19028), 'numpy.empty', 'np.empty', (['_n'], {'dtype': 'np.int_'}), '(_n, dtype=np.int_)\n', (19009, 19028), True, 'import numpy as np\n'), ((19147, 19168), 'numpy.cumsum', 'np.cumsum', (['entry_idxs'], {}), '(entry_idxs)\n', (19156, 19168), True, 'import numpy as np\n'), ((19629, 19668), 'numpy.random.randint', 'np.random.randint', (['(exit_i - entry_i + 1)'], {}), '(exit_i - entry_i + 1)\n', (19646, 19668), True, 'import numpy as np\n'), ((33516, 33538), 'numpy.isnan', 'np.isnan', (['init_tp_stop'], {}), '(init_tp_stop)\n', (33524, 33538), True, 'import numpy as np\n'), ((5612, 5633), 'numpy.any', 'np.any', (['(idxs < from_i)'], {}), '(idxs < from_i)\n', (5618, 5633), True, 'import numpy as np\n'), ((5637, 5657), 'numpy.any', 'np.any', (['(idxs >= to_i)'], {}), '(idxs >= to_i)\n', (5643, 5657), True, 'import numpy as np\n'), ((18404, 18430), 'numpy.sum', 'np.sum', (['chosen_spaces[-2:]'], {}), '(chosen_spaces[-2:])\n', (18410, 18430), True, 'import numpy as np\n')] |
import unittest
from constitutive.cpp import ModMisesEeq, Constraint, q_dim
import numpy as np
def cdf(f, x, delta):
f_cdf = np.empty_like(x)
for i in range(len(x.T)):
d = np.zeros_like(x)
d[i] = delta
f_cdf[i] = (f(x + d) - f(x - d)) / (2 * delta)
return f_cdf
class TestMises(unittest.TestCase):
def random_cdf(self, constraint, k=10.0, nu=0.2):
np.random.seed(6174)
norm = ModMisesEeq(k, nu, constraint)
def only_eeq(x):
return norm.evaluate(x)[0]
for i in range(100):
strain = np.random.random(q_dim(constraint))
eeq, deeq = norm.evaluate(strain)
deeq_cdf = cdf(only_eeq, strain, 1.0e-6)
self.assertLess(np.linalg.norm(deeq - deeq_cdf), 1.0e-6)
def test_uniaxial_strain(self):
for c in [
Constraint.UNIAXIAL_STRAIN,
Constraint.UNIAXIAL_STRESS,
Constraint.PLANE_STRESS,
Constraint.PLANE_STRAIN,
Constraint.FULL,
]:
self.random_cdf(c)
def test_zero(self):
norm = ModMisesEeq(10, 0.2, Constraint.PLANE_STRESS)
eeq, deeq = norm.evaluate([0, 0, 0])
self.assertLess(eeq, 1.0e-10)
self.assertFalse(np.any(np.isnan(deeq)))
def test_1D(self):
norm = ModMisesEeq(10, 0.2, Constraint.UNIAXIAL_STRESS)
eeq, _ = norm.evaluate([42])
self.assertAlmostEqual(eeq, 42.0)
eeq_compression, _ = norm.evaluate([-42.0])
self.assertAlmostEqual(eeq_compression, 42.0 / 10.0)
def test_2D(self):
norm = ModMisesEeq(10, 0.2, Constraint.PLANE_STRESS)
eeq, _ = norm.evaluate([42.0, -0.2 * 42.0, 0])
self.assertAlmostEqual(eeq, 42.0)
eeq_compression, _ = norm.evaluate([-42.0, 0.2 * 42, 0])
self.assertAlmostEqual(eeq_compression, 42.0 / 10.0)
def test_3D(self):
k, nu = 10.0, 0.2
norm = ModMisesEeq(k, nu, Constraint.FULL)
eeq, _ = norm.evaluate([42.0, -nu * 42.0, -nu * 42, 0, 0, 0])
self.assertAlmostEqual(eeq, 42.0)
eeq_compression, _ = norm.evaluate([nu * 42.0, nu * 42, -42, 0, 0, 0])
self.assertAlmostEqual(eeq_compression, 42.0 / k)
if __name__ == "__main__":
unittest.main()
| [
"constitutive.cpp.q_dim",
"constitutive.cpp.ModMisesEeq",
"numpy.empty_like",
"numpy.random.seed",
"numpy.isnan",
"numpy.linalg.norm",
"unittest.main",
"numpy.zeros_like"
] | [((131, 147), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (144, 147), True, 'import numpy as np\n'), ((2261, 2276), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2274, 2276), False, 'import unittest\n'), ((190, 206), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (203, 206), True, 'import numpy as np\n'), ((400, 420), 'numpy.random.seed', 'np.random.seed', (['(6174)'], {}), '(6174)\n', (414, 420), True, 'import numpy as np\n'), ((436, 466), 'constitutive.cpp.ModMisesEeq', 'ModMisesEeq', (['k', 'nu', 'constraint'], {}), '(k, nu, constraint)\n', (447, 466), False, 'from constitutive.cpp import ModMisesEeq, Constraint, q_dim\n'), ((1109, 1154), 'constitutive.cpp.ModMisesEeq', 'ModMisesEeq', (['(10)', '(0.2)', 'Constraint.PLANE_STRESS'], {}), '(10, 0.2, Constraint.PLANE_STRESS)\n', (1120, 1154), False, 'from constitutive.cpp import ModMisesEeq, Constraint, q_dim\n'), ((1326, 1374), 'constitutive.cpp.ModMisesEeq', 'ModMisesEeq', (['(10)', '(0.2)', 'Constraint.UNIAXIAL_STRESS'], {}), '(10, 0.2, Constraint.UNIAXIAL_STRESS)\n', (1337, 1374), False, 'from constitutive.cpp import ModMisesEeq, Constraint, q_dim\n'), ((1607, 1652), 'constitutive.cpp.ModMisesEeq', 'ModMisesEeq', (['(10)', '(0.2)', 'Constraint.PLANE_STRESS'], {}), '(10, 0.2, Constraint.PLANE_STRESS)\n', (1618, 1652), False, 'from constitutive.cpp import ModMisesEeq, Constraint, q_dim\n'), ((1942, 1977), 'constitutive.cpp.ModMisesEeq', 'ModMisesEeq', (['k', 'nu', 'Constraint.FULL'], {}), '(k, nu, Constraint.FULL)\n', (1953, 1977), False, 'from constitutive.cpp import ModMisesEeq, Constraint, q_dim\n'), ((600, 617), 'constitutive.cpp.q_dim', 'q_dim', (['constraint'], {}), '(constraint)\n', (605, 617), False, 'from constitutive.cpp import ModMisesEeq, Constraint, q_dim\n'), ((746, 777), 'numpy.linalg.norm', 'np.linalg.norm', (['(deeq - deeq_cdf)'], {}), '(deeq - deeq_cdf)\n', (760, 777), True, 'import numpy as np\n'), ((1270, 1284), 'numpy.isnan', 'np.isnan', (['deeq'], {}), '(deeq)\n', (1278, 1284), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from typing import TypeVar
BoundingBox = TypeVar('BoundingBox')
class BoundingBox:
def __init__(self, x: float, y: float, w: float, h: float) -> None:
self._x = x
self._y = y
self._w = w
self._h = h
self._center = np.array((x + w / 2, y + h / 2))
self._area = w * h
@property
def x(self) -> float:
return self._x
@property
def y(self) -> float:
return self._y
@property
def w(self) -> float:
return self._w
@property
def h(self) -> float:
return self._h
@property
def center(self) -> np.ndarray:
return self._center
def iou(self, other: BoundingBox) -> float:
dx = max(0, min(self.x + self.w, other.x + other.w) - max(self.x, other.x))
dy = max(0, min(self.y + self.h, other.y + other.h) - max(self.y, other.y))
return dx * dy / self._area
def distance(self, other: BoundingBox) -> float:
return np.linalg.norm(self.center - other.center)
def merge(self, other: BoundingBox) -> float:
x = min(self.x, other.x)
y = min(self.y, other.y)
w = max(self.x + self.w, other.x + other.w) - x
h = max(self.y + self.h, other.y + other.h) - y
return BoundingBox(x, y, w, h)
def draw(self, img: np.ndarray, color: np.ndarray, thickness: float) -> None:
pos = (int(self.x), int(self.y))
size = (int(self.x + self.w), int(self.y + self.h))
cv2.rectangle(img, pos, size, color, thickness) | [
"cv2.rectangle",
"numpy.array",
"numpy.linalg.norm",
"typing.TypeVar"
] | [((72, 94), 'typing.TypeVar', 'TypeVar', (['"""BoundingBox"""'], {}), "('BoundingBox')\n", (79, 94), False, 'from typing import TypeVar\n'), ((290, 322), 'numpy.array', 'np.array', (['(x + w / 2, y + h / 2)'], {}), '((x + w / 2, y + h / 2))\n', (298, 322), True, 'import numpy as np\n'), ((1019, 1061), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.center - other.center)'], {}), '(self.center - other.center)\n', (1033, 1061), True, 'import numpy as np\n'), ((1522, 1569), 'cv2.rectangle', 'cv2.rectangle', (['img', 'pos', 'size', 'color', 'thickness'], {}), '(img, pos, size, color, thickness)\n', (1535, 1569), False, 'import cv2\n')] |
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...types import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data. """
def load_uri_to_video_blob(self: 'T', only_keyframes: bool = False) -> 'T':
"""Convert a :attr:`.uri` to a video ndarray :attr:`.blob`.
:param only_keyframes: only keep the keyframes in the video
:return: Document itself after processed
"""
import av
with av.open(self.uri) as container:
if only_keyframes:
stream = container.streams.video[0]
stream.codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
img = frame.to_image()
frames.append(np.asarray(img))
self.blob = np.moveaxis(np.stack(frames), 1, 2)
return self
def save_video_blob_to_file(
self: 'T', file: Union[str, BinaryIO], frame_rate: int = 30, codec: str = 'h264'
) -> 'T':
"""Save :attr:`.blob` as a video mp4/h264 file.
:param file: The file to open, which can be either a string or a file-like object.
:param frame_rate: frames per second
:param codec: the name of a decoder/encoder
:return: itself after processed
"""
if (
self.blob.ndim != 4
or self.blob.shape[-1] != 3
or self.blob.dtype != np.uint8
):
raise ValueError(
f'expects `.blob` with dtype=uint8 and ndim=4 and the last dimension is 3, '
f'but receiving {self.blob.shape} in {self.blob.dtype}'
)
video_blob = np.moveaxis(np.clip(self.blob, 0, 255), 1, 2)
import av
with av.open(file, mode='w') as container:
stream = container.add_stream(codec, rate=frame_rate)
stream.width = self.blob.shape[1]
stream.height = self.blob.shape[2]
stream.pix_fmt = 'yuv420p'
for b in video_blob:
frame = av.VideoFrame.from_ndarray(b, format='rgb24')
for packet in stream.encode(frame):
container.mux(packet)
for packet in stream.encode():
container.mux(packet)
return self
| [
"numpy.clip",
"numpy.asarray",
"av.VideoFrame.from_ndarray",
"av.open",
"numpy.stack"
] | [((532, 549), 'av.open', 'av.open', (['self.uri'], {}), '(self.uri)\n', (539, 549), False, 'import av\n'), ((902, 918), 'numpy.stack', 'np.stack', (['frames'], {}), '(frames)\n', (910, 918), True, 'import numpy as np\n'), ((1762, 1788), 'numpy.clip', 'np.clip', (['self.blob', '(0)', '(255)'], {}), '(self.blob, 0, 255)\n', (1769, 1788), True, 'import numpy as np\n'), ((1829, 1852), 'av.open', 'av.open', (['file'], {'mode': '"""w"""'}), "(file, mode='w')\n", (1836, 1852), False, 'import av\n'), ((2123, 2168), 'av.VideoFrame.from_ndarray', 'av.VideoFrame.from_ndarray', (['b'], {'format': '"""rgb24"""'}), "(b, format='rgb24')\n", (2149, 2168), False, 'import av\n'), ((852, 867), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (862, 867), True, 'import numpy as np\n')] |
import collections
from collections import defaultdict as dd
import sqlite3, sys
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
plt.style.use('seaborn-whitegrid')
#plt.style.use('grayscale')
###plt.rcParams['image.cmap'] = 'gray'
dbfile = "docs/epigraph.db"
conn = sqlite3.connect(dbfile) # loads dbfile as con
c = conn.cursor()
def ncolor (nationality):
if nationality in ('American', 'US'):
return 'red'
elif nationality in ('British', 'Irish', 'English', 'Scottish'):
return 'blue'
else:
return 'black'
def nshape (nationality):
if nationality in ('American', 'US'):
return '*'
elif nationality in ('British', 'Irish', 'English', 'Scottish'):
return 'x'
else:
return '.'
def timediff (WFROM,WTO,efrom, eto, future=True, color=True):
if future: ### allow citations into the future
c.execute("""
SELECT wyear, eyear, count (eyear), wnationality
FROM clean
WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)
AND WYEAR >= ? and WYEAR <= ?
AND eyear >= ? AND eyear <= ?
GROUP BY wyear, eyear
ORDER BY wyear, eyear""", (WFROM, WTO, efrom, eto))
else:
c.execute("""
SELECT wyear, eyear, count (eyear), wnationality
FROM clean
WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)
AND WYEAR >= ? and WYEAR <= ?
AND eyear >= ? AND eyear <= ?
AND wyear > eyear
GROUP BY wyear, eyear
ORDER BY wyear, eyear""", (WFROM, WTO, efrom, eto))
years = c.fetchall()
epigraphtotal = sum (s for (x,y,s,n) in years)
plt.xlim(WFROM, WTO)
plt.ylim(efrom, eto)
if color:
plt.scatter([int(x) for (x,y,s,n) in years],
[int(y) for (x,y,s,n) in years],
s=[int(s) for (x,y,s,n) in years],
color=[ncolor(n) for (x,y,s,n) in years],
marker='.');
else:
plt.scatter([int(x) for (x,y,s,n) in years],
[int(y) for (x,y,s,n) in years],
s=[int(s) for (x,y,s,n) in years],
c='black',
marker='.');
# uk = [ (x,y,s,n) for (x,y,s,n) in years if ncolor(n) == 'blue']
# plt.scatter([int(x) for (x,y,s,n) in uk],
# [int(y) for (x,y,s,n) in uk],
# s=[int(s) for (x,y,s,n) in uk],
# c='black',
# marker='x');
# us = [ (x,y,s,n) for (x,y,s,n) in years if ncolor(n) == 'red']
# plt.scatter([int(x) for (x,y,s,n) in us],
# [int(y) for (x,y,s,n) in us],
# s=[int(s) for (x,y,s,n) in us],
# c='black',
# marker='*');
plt.xlabel('Year of Work')
plt.ylabel('Year of Epigraph')
plt.title(f'Year of Epigraph vs Year of Work ({epigraphtotal} epigraphs)')
plt.savefig(f"figs/eg-timediff-{WFROM}:{WTO}-{efrom}:{eto}-{future}-{color}.png")
plt.close()
def generation(x, g):
"""return how many generations have passed"""
return int(x/g)
def rat(d,n):
if d == 0:
return 0
elif n ==0:
print('n=', d , n)
else:
return d/n
def forebears (WFROM,WTO,efrom, eto, g=25):
"""graph the average distance back people cite
g is the generation size
"""
c.execute("""
SELECT wyear, eyear, count (eyear), wnationality
FROM clean
WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)
AND WYEAR >= ? and WYEAR <= ?
AND eyear >= ? AND eyear <= ?
GROUP BY wyear, eyear
ORDER BY wyear, eyear""", (WFROM, WTO, efrom, eto))
years = c.fetchall()
epigraphtotal = sum (s for (x,y,s,n) in years)
#plt.xlim(WFROM, WTO)
#plt.ylim(100, -1500)
#colors = list(mcolors.TABLEAU_COLORS.keys()) *20
#print(colors)
gen =dd(lambda: dd(int))
gentotal= dd(int)
for (x,y,s,n) in years:
gen[generation(x,g)][generation(y-x,g)] += 1
gentotal[generation(x,g)] +=1
for x in gen:
for y in gen[x]:
print(x, y, gen[x][y], gentotal[x])
plt.figure(figsize=(10, 5))
ax=plt.axes()
#df.plot(colormap=gray)
cumtotal = [0]*len(gen)
for d in range(0,-200, -1):
#for d in range(min(gen.keys()),max(gen.keys()),-1):
xv = list(gen.keys())
yv = [rat(gen[x][d],gentotal[x]) for x in xv]
plt.bar(xv, yv, bottom=cumtotal,
tick_label=[x*g for x in xv])
cumtotal = [x + y for x, y in zip(yv, cumtotal)]
#colors.pop()
#print(d, cumtotal)
plt.xlabel('Year of Work (in generations)')
plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')
plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')
plt.savefig(f"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png")
plt.close()
# plt.bar(gen.keys(), gen[x][1].values()),bottom=gen[x][0].values()))
# plt.scatter([int(x) for (x,y,s,n) in years],
# [int((y-x)) for (x,y,s,n) in years],
# s=[int(s) for (x,y,s,n) in years],
# color=[ncolor(n) for (x,y,s,n) in years]);
# plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')
# plt.savefig(f"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}.png")
# plt.close()
def timelen (WFROM, WTO):
# for characters: avg(length(epigraph))
c.execute("""
SELECT wyear, avg(length(trim(epigraph)) - length(replace(trim(epigraph), ' ', '')) + 1)
FROM clean
WHERE wyear IS NOT Null
AND WYEAR >= ? and WYEAR <= ?
GROUP BY wyear
ORDER BY wyear, wyear""", (WFROM, WTO))
years = c.fetchall()
c.execute ("""SELECT count(wyear)
FROM clean
WHERE wyear IS NOT Null
AND WYEAR >= ? and WYEAR <= ?""", (WFROM, WTO))
epigraphtotal, = c.fetchone()
plt.xlim(WFROM, WTO)
xd = [int(x) for (x,y) in years]
yd = [int(y) for (x,y) in years]
par = np.polyfit(xd, yd, 1, full=True)
slope=par[0][0]
intercept=par[0][1]
yl = [slope*xx + intercept for xx in xd]
plt.scatter(xd,
yd,
color='black');
plt.plot(xd, yl)
plt.xlabel('Year of Work')
plt.ylabel('Epigraph Length')
plt.title(f'Average Length of Epigraph (in words) vs \n Year of Work (for {epigraphtotal} epigraphs), from {WFROM} to {WTO}')
plt.savefig("figs/eg-year-length.png")
plt.close()
timediff(1650,2020,-500,2020)
timediff(1900,2020,1500,2020)
timediff(1650,2020,-500,2020, future=False)
timediff(1900,2020,1500,2020, future=False)
timediff(1900,2020,1500,2020, future=False, color=False)
# forebears(1650,2020,-500,2020)
forebears(1800,2020,-500,2020)
# forebears(1900,2020,-500,2020)
# forebears(1600,2020,-500,2020,g=50)
# forebears(1800,2020,-500,2020,g=50)
# forebears(1900,2020,-500,2020,g=50)
# timelen(1650, 2020)
| [
"matplotlib.pyplot.title",
"sqlite3.connect",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"collecti... | [((168, 202), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (181, 202), True, 'import matplotlib.pyplot as plt\n'), ((306, 329), 'sqlite3.connect', 'sqlite3.connect', (['dbfile'], {}), '(dbfile)\n', (321, 329), False, 'import sqlite3, sys\n'), ((1679, 1699), 'matplotlib.pyplot.xlim', 'plt.xlim', (['WFROM', 'WTO'], {}), '(WFROM, WTO)\n', (1687, 1699), True, 'import matplotlib.pyplot as plt\n'), ((1704, 1724), 'matplotlib.pyplot.ylim', 'plt.ylim', (['efrom', 'eto'], {}), '(efrom, eto)\n', (1712, 1724), True, 'import matplotlib.pyplot as plt\n'), ((2843, 2869), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year of Work"""'], {}), "('Year of Work')\n", (2853, 2869), True, 'import matplotlib.pyplot as plt\n'), ((2874, 2904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Year of Epigraph"""'], {}), "('Year of Epigraph')\n", (2884, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2909, 2983), 'matplotlib.pyplot.title', 'plt.title', (['f"""Year of Epigraph vs Year of Work ({epigraphtotal} epigraphs)"""'], {}), "(f'Year of Epigraph vs Year of Work ({epigraphtotal} epigraphs)')\n", (2918, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3074), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""figs/eg-timediff-{WFROM}:{WTO}-{efrom}:{eto}-{future}-{color}.png"""'], {}), "(\n f'figs/eg-timediff-{WFROM}:{WTO}-{efrom}:{eto}-{future}-{color}.png')\n", (2999, 3074), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3085), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3083, 3085), True, 'import matplotlib.pyplot as plt\n'), ((3984, 3991), 'collections.defaultdict', 'dd', (['int'], {}), '(int)\n', (3986, 3991), True, 'from collections import defaultdict as dd\n'), ((4222, 4249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4232, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4257, 4267), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (4265, 4267), True, 'import matplotlib.pyplot as plt\n'), ((4716, 4759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year of Work (in generations)"""'], {}), "('Year of Work (in generations)')\n", (4726, 4759), True, 'import matplotlib.pyplot as plt\n'), ((4764, 4834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""Share of Distance to forebear (in {g} year generations)"""'], {}), "(f'Share of Distance to forebear (in {g} year generations)')\n", (4774, 4834), True, 'import matplotlib.pyplot as plt\n'), ((4839, 4910), 'matplotlib.pyplot.title', 'plt.title', (['f"""Distance back vs Year of Work ({epigraphtotal} epigraphs)"""'], {}), "(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n", (4848, 4910), True, 'import matplotlib.pyplot as plt\n'), ((4915, 4983), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png"""'], {}), "(f'figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png')\n", (4926, 4983), True, 'import matplotlib.pyplot as plt\n'), ((4988, 4999), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4997, 4999), True, 'import matplotlib.pyplot as plt\n'), ((6016, 6036), 'matplotlib.pyplot.xlim', 'plt.xlim', (['WFROM', 'WTO'], {}), '(WFROM, WTO)\n', (6024, 6036), True, 'import matplotlib.pyplot as plt\n'), ((6123, 6155), 'numpy.polyfit', 'np.polyfit', (['xd', 'yd', '(1)'], {'full': '(True)'}), '(xd, yd, 1, full=True)\n', (6133, 6155), True, 'import numpy as np\n'), ((6255, 6289), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xd', 'yd'], {'color': '"""black"""'}), "(xd, yd, color='black')\n", (6266, 6289), True, 'import matplotlib.pyplot as plt\n'), ((6327, 6343), 'matplotlib.pyplot.plot', 'plt.plot', (['xd', 'yl'], {}), '(xd, yl)\n', (6335, 6343), True, 'import matplotlib.pyplot as plt\n'), ((6348, 6374), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year of Work"""'], {}), "('Year of Work')\n", (6358, 6374), True, 'import matplotlib.pyplot as plt\n'), ((6379, 6408), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Epigraph Length"""'], {}), "('Epigraph Length')\n", (6389, 6408), True, 'import matplotlib.pyplot as plt\n'), ((6413, 6551), 'matplotlib.pyplot.title', 'plt.title', (['f"""Average Length of Epigraph (in words) vs \n Year of Work (for {epigraphtotal} epigraphs), from {WFROM} to {WTO}"""'], {}), '(\n f"""Average Length of Epigraph (in words) vs \n Year of Work (for {epigraphtotal} epigraphs), from {WFROM} to {WTO}"""\n )\n', (6422, 6551), True, 'import matplotlib.pyplot as plt\n'), ((6543, 6581), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figs/eg-year-length.png"""'], {}), "('figs/eg-year-length.png')\n", (6554, 6581), True, 'import matplotlib.pyplot as plt\n'), ((6586, 6597), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6595, 6597), True, 'import matplotlib.pyplot as plt\n'), ((4526, 4592), 'matplotlib.pyplot.bar', 'plt.bar', (['xv', 'yv'], {'bottom': 'cumtotal', 'tick_label': '[(x * g) for x in xv]'}), '(xv, yv, bottom=cumtotal, tick_label=[(x * g) for x in xv])\n', (4533, 4592), True, 'import matplotlib.pyplot as plt\n'), ((3961, 3968), 'collections.defaultdict', 'dd', (['int'], {}), '(int)\n', (3963, 3968), True, 'from collections import defaultdict as dd\n')] |
from collections import deque
import enum
import time
import os
import numpy as np
from skspatial.objects import Line
from skspatial.objects import Points
class MotionLineDetector:
MAX_POINT = 60
def __init__(self):
self.past_points = deque([])
self.last_time = None
def get_next_line(self, point):
self.past_points.append(point)
if len(self.past_points) > self.MAX_POINT:
self.past_points.popleft()
self.last_time = time.time()
if len(self.past_points) == self.MAX_POINT:
max_movement = 0
for pt2, pt1 in zip(list(self.past_points)[1:], list(self.past_points)[:-1]):
movement = np.linalg.norm(pt2 - pt1)
if movement > max_movement:
max_movement = movement
if (max_movement / (time.time() - self.last_time)) < 0.1:
return None
points = Points(list(self.past_points))
line_fit = Line.best_fit(points)
direction = np.array(line_fit.direction)
# I defined this side will be the positive direction.
if direction[0] < 0:
direction *= -1
direction = direction / np.linalg.norm(direction)
return direction
else:
return None
from pydub import AudioSegment
from pydub.playback import play
import threading
import glob
# Load mp3s.
songs = [AudioSegment.from_mp3(sound_path) for sound_path in glob.glob("sounds/*.mp3")]
def play_ex():
song_index = np.random.randint(0, len(songs))
play(songs[song_index])
class OnahoStateEstimator:
MAX_FRAME = 30 * 2
WAIT_TIME = 30
def __init__(self):
self.previous_center = None
self.current_position = 0
self.recent_positions = deque([])
self.remaining_wait = 0
def add_current_state(self, line, center):
if self.previous_center is not None:
move_distance = np.dot(line, center - self.previous_center)
self.current_position += move_distance
self.previous_center = center
if len(self.recent_positions) == self.MAX_FRAME:
self.recent_positions.popleft()
self.recent_positions.append(self.current_position)
min_pos = min(self.recent_positions)
max_pos = max(self.recent_positions)
rate = (max_pos - min_pos) * 0.5
print(max_pos - min_pos)
if (max_pos - min_pos) > 0.05:
if min_pos > self.current_position - rate and self.remaining_wait <= 0:
t = threading.Thread(target=play_ex)
t.start()
self.remaining_wait = 30
self.remaining_wait -= 1
from dataclasses import dataclass
@dataclass
class VelocityBasedInsertionEstimatorOption:
forward_backward_velocity_threashold: float = 0.35
no_motion_velocity_threashold: float = 0.20
sound_wait_time: int = 5
class VelocityBasedInsertionEstimator:
class OnahoState(enum.Enum):
INSERTING = "inserting"
OUTGOING = "outgoing"
NO_MOTION = "no_motion"
def __init__(self, sound_dir, option=VelocityBasedInsertionEstimatorOption()):
self.previous_center = None
self.previous_time = None
self.remaining_wait = 0
self.state = self.OnahoState.NO_MOTION
self.option = option
self.songs = [
AudioSegment.from_mp3(sound_path)
for sound_path in glob.glob(os.path.join(sound_dir, "*.mp3"))]
# For Debug.
self.velocities = []
self.timestamps = []
def play_ex(self):
song_index = np.random.randint(0, len(self.songs))
play(self.songs[song_index])
def add_current_state(self, line, center):
# Just skip at the first frame.
if self.previous_center is not None and self.previous_time is not None:
delta_t = time.time() - self.previous_time
move_distance = np.dot(line, center - self.previous_center)
velocity = move_distance / delta_t
if velocity > self.option.forward_backward_velocity_threashold:
self.state = self.OnahoState.INSERTING
elif velocity < -self.option.forward_backward_velocity_threashold:
self.state = self.OnahoState.OUTGOING
else:
if abs(velocity) < self.option.no_motion_velocity_threashold:
if self.state == self.OnahoState.INSERTING:
if self.remaining_wait <= 0:
t = threading.Thread(target=self.play_ex)
t.start()
self.remaining_wait = self.option.sound_wait_time
print(self.state)
self.state = self.OnahoState.NO_MOTION
self.velocities.append(velocity)
self.timestamps.append(time.time())
self.previous_center = center
self.previous_time = time.time()
self.remaining_wait -= 1 | [
"collections.deque",
"pydub.playback.play",
"pydub.AudioSegment.from_mp3",
"os.path.join",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm",
"threading.Thread",
"time.time",
"skspatial.objects.Line.best_fit",
"glob.glob"
] | [((1455, 1488), 'pydub.AudioSegment.from_mp3', 'AudioSegment.from_mp3', (['sound_path'], {}), '(sound_path)\n', (1476, 1488), False, 'from pydub import AudioSegment\n'), ((1604, 1627), 'pydub.playback.play', 'play', (['songs[song_index]'], {}), '(songs[song_index])\n', (1608, 1627), False, 'from pydub.playback import play\n'), ((255, 264), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (260, 264), False, 'from collections import deque\n'), ((488, 499), 'time.time', 'time.time', ([], {}), '()\n', (497, 499), False, 'import time\n'), ((1507, 1532), 'glob.glob', 'glob.glob', (['"""sounds/*.mp3"""'], {}), "('sounds/*.mp3')\n", (1516, 1532), False, 'import glob\n'), ((1824, 1833), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (1829, 1833), False, 'from collections import deque\n'), ((3714, 3742), 'pydub.playback.play', 'play', (['self.songs[song_index]'], {}), '(self.songs[song_index])\n', (3718, 3742), False, 'from pydub.playback import play\n'), ((5008, 5019), 'time.time', 'time.time', ([], {}), '()\n', (5017, 5019), False, 'import time\n'), ((989, 1010), 'skspatial.objects.Line.best_fit', 'Line.best_fit', (['points'], {}), '(points)\n', (1002, 1010), False, 'from skspatial.objects import Line\n'), ((1035, 1063), 'numpy.array', 'np.array', (['line_fit.direction'], {}), '(line_fit.direction)\n', (1043, 1063), True, 'import numpy as np\n'), ((1987, 2030), 'numpy.dot', 'np.dot', (['line', '(center - self.previous_center)'], {}), '(line, center - self.previous_center)\n', (1993, 2030), True, 'import numpy as np\n'), ((3433, 3466), 'pydub.AudioSegment.from_mp3', 'AudioSegment.from_mp3', (['sound_path'], {}), '(sound_path)\n', (3454, 3466), False, 'from pydub import AudioSegment\n'), ((3995, 4038), 'numpy.dot', 'np.dot', (['line', '(center - self.previous_center)'], {}), '(line, center - self.previous_center)\n', (4001, 4038), True, 'import numpy as np\n'), ((699, 724), 'numpy.linalg.norm', 'np.linalg.norm', (['(pt2 - pt1)'], {}), '(pt2 - pt1)\n', (713, 724), True, 'import numpy as np\n'), ((1233, 1258), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (1247, 1258), True, 'import numpy as np\n'), ((2603, 2635), 'threading.Thread', 'threading.Thread', ([], {'target': 'play_ex'}), '(target=play_ex)\n', (2619, 2635), False, 'import threading\n'), ((3933, 3944), 'time.time', 'time.time', ([], {}), '()\n', (3942, 3944), False, 'import time\n'), ((4927, 4938), 'time.time', 'time.time', ([], {}), '()\n', (4936, 4938), False, 'import time\n'), ((3508, 3540), 'os.path.join', 'os.path.join', (['sound_dir', '"""*.mp3"""'], {}), "(sound_dir, '*.mp3')\n", (3520, 3540), False, 'import os\n'), ((846, 857), 'time.time', 'time.time', ([], {}), '()\n', (855, 857), False, 'import time\n'), ((4595, 4632), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.play_ex'}), '(target=self.play_ex)\n', (4611, 4632), False, 'import threading\n')] |
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import numpy as np
def fakeBootStrapper(n):
'''
This is just a placeholder for the user's method of
bootstrapping the median and its confidence intervals.
Returns an arbitrary median and confidence intervals
packed into a tuple
'''
if n == 1:
med = 0.1
CI = (-0.25, 0.25)
else:
med = 0.2
CI = (-0.35, 0.50)
return med, CI
np.random.seed(2)
inc = 0.1
e1 = np.random.normal(0, 1, size=(500,))
e2 = np.random.normal(0, 1, size=(500,))
e3 = np.random.normal(0, 1 + inc, size=(500,))
e4 = np.random.normal(0, 1 + 2*inc, size=(500,))
treatments = [e1,e2,e3,e4]
med1, CI1 = fakeBootStrapper(1)
med2, CI2 = fakeBootStrapper(2)
medians = [None, None, med1, med2]
conf_intervals = [None, None, CI1, CI2]
fig = plt.figure()
ax = fig.add_subplot(111)
pos = np.array(range(len(treatments)))+1
bp = ax.boxplot(treatments, sym='k+', positions=pos,
notch=1, bootstrap=5000,
usermedians=medians,
conf_intervals=conf_intervals)
ax.set_xlabel('treatment')
ax.set_ylabel('response')
plt.setp(bp['whiskers'], color='k', linestyle='-' )
plt.setp(bp['fliers'], markersize=3.0)
plt.show()
| [
"numpy.random.normal",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"matplotlib.pyplot.show"
] | [((472, 489), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (486, 489), True, 'import numpy as np\n'), ((505, 540), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(500,)'}), '(0, 1, size=(500,))\n', (521, 540), True, 'import numpy as np\n'), ((546, 581), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(500,)'}), '(0, 1, size=(500,))\n', (562, 581), True, 'import numpy as np\n'), ((587, 628), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1 + inc)'], {'size': '(500,)'}), '(0, 1 + inc, size=(500,))\n', (603, 628), True, 'import numpy as np\n'), ((634, 679), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1 + 2 * inc)'], {'size': '(500,)'}), '(0, 1 + 2 * inc, size=(500,))\n', (650, 679), True, 'import numpy as np\n'), ((852, 864), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (862, 864), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1214), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers']"], {'color': '"""k"""', 'linestyle': '"""-"""'}), "(bp['whiskers'], color='k', linestyle='-')\n", (1172, 1214), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1255), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'markersize': '(3.0)'}), "(bp['fliers'], markersize=3.0)\n", (1225, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1264, 1266), True, 'import matplotlib.pyplot as plt\n')] |
"""Common utils."""
import functools
import flax.linen as nn
import jax
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
pytorch_kernel_init = functools.partial(initializers.variance_scaling,
1. / 3., 'fan_in', 'uniform')
def uniform_initializer(minval, maxval, dtype=jnp.float32):
def init(key, shape, dtype=dtype):
return jax.random.uniform(key, shape, dtype, minval=minval, maxval=maxval)
return init
def dense(inputs, output_dim, dtype, kernel_init=None):
bias_range = 1. / np.sqrt(inputs.shape[-1])
if kernel_init is None:
kernel_init = pytorch_kernel_init(dtype=dtype)
return nn.Dense(
output_dim,
kernel_init=kernel_init,
bias_init=uniform_initializer(
-bias_range, bias_range, dtype),
dtype=dtype)(inputs)
def create_output(output_model, params, aux_loss=False, layout_model_pamp=None):
"""Creates the output dict."""
output = {}
multimodal_outputs = params['multimodal_outputs']
if not aux_loss:
output.update(output_model(params))
return output
# Currently only layout has intermediate losses
layout_model_pamp_partial = functools.partial(
layout_model_pamp, train=params['train'])
pred_dict = jax.vmap(layout_model_pamp_partial)(multimodal_outputs)
for key in pred_dict:
output[key] = pred_dict[key][-1]
# Append intermediate layer logits.
output['aux_outputs'] = []
num_layers = multimodal_outputs.shape[0]
for layer in range(num_layers - 1):
lgt_dict = {}
for key in pred_dict:
logts = pred_dict[key][layer]
lgt_dict.update({key: logts})
output['aux_outputs'].append(lgt_dict)
return output
| [
"numpy.sqrt",
"jax.vmap",
"functools.partial",
"jax.random.uniform"
] | [((170, 255), 'functools.partial', 'functools.partial', (['initializers.variance_scaling', '(1.0 / 3.0)', '"""fan_in"""', '"""uniform"""'], {}), "(initializers.variance_scaling, 1.0 / 3.0, 'fan_in', 'uniform'\n )\n", (187, 255), False, 'import functools\n'), ((1178, 1237), 'functools.partial', 'functools.partial', (['layout_model_pamp'], {'train': "params['train']"}), "(layout_model_pamp, train=params['train'])\n", (1195, 1237), False, 'import functools\n'), ((399, 466), 'jax.random.uniform', 'jax.random.uniform', (['key', 'shape', 'dtype'], {'minval': 'minval', 'maxval': 'maxval'}), '(key, shape, dtype, minval=minval, maxval=maxval)\n', (417, 466), False, 'import jax\n'), ((559, 584), 'numpy.sqrt', 'np.sqrt', (['inputs.shape[-1]'], {}), '(inputs.shape[-1])\n', (566, 584), True, 'import numpy as np\n'), ((1259, 1294), 'jax.vmap', 'jax.vmap', (['layout_model_pamp_partial'], {}), '(layout_model_pamp_partial)\n', (1267, 1294), False, 'import jax\n')] |
from GitMarco.tf import utils, metrics, basic
import numpy as np
from GitMarco.tf.losses import chamfer_distance, euclidian_dist_loss
from GitMarco.tf.optimization import OptiLoss, GradientOptimizer
from GitMarco.tf.pointnet import Pointnet, PointnetAe
from GitMarco.tf.utils import limit_memory, random_dataset
import pandas as pd
from GitMarco.tf.basic import basic_dense_model
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
def test_limit_memory():
limit_memory()
def test_random_dataset():
utils.random_dataset()
def test_r_squared():
y = np.random.rand(100)
predictions = np.random.rand(100)
metrics.r_squared(y, predictions)
def test_basic_dense_model():
model = basic.basic_dense_model(input_shape=(10,),
output_shape=1,
optimizer='adadelta')
model.summary()
def test_chamfer_loss():
x = utils.random_dataset(shape=(32, 1024, 3))
y = utils.random_dataset(shape=(32, 1024, 3))
chamfer_distance(x, y)
def test_pointnet():
data = random_dataset(shape=(32, 4096, 3))
test_data = random_dataset(shape=(32, 4096, 3))
field = random_dataset(shape=(32, 4096, 2))
field_test = random_dataset(shape=(32, 4096, 2))
model = Pointnet(n_points=4096,)
model = model.create_model()
# model.model_2_image()
model.summary()
model.compile(loss='mse', optimizer='adam')
model.evaluate(data, field)
def test_pointnet_ae():
data = random_dataset(shape=(32, 400, 3))
global_v = random_dataset(shape=(32, 1))
global_v_2 = random_dataset(shape=(32, 1))
local_v = random_dataset(shape=(32, 400, 3))
model = PointnetAe(n_geometry_points=data.shape[1],
n_global_variables=2,
n_local_variables=3,
type_decoder='cnn',
n_cnn_dec_layer=4,
dfferent_out_for_globals=True,
cnn_dec_filters=[64, 32, 32, 16]
)
model = model.create_model()
model.summary()
model.compile(loss=[chamfer_distance, ['mse', 'mse'], 'mse'], optimizer='adam')
with tf.device('CPU:0'):
model.evaluate(data, [data, [global_v, global_v_2], local_v])
def test_euclidean_distance():
x = utils.random_dataset(shape=(32, 1024, 3))
y = utils.random_dataset(shape=(32, 1024, 3))
euclidian_dist_loss(x, y, correction=True)
class Loss(OptiLoss):
def __init__(self, params=None):
super(Loss, self).__init__(params)
def __call__(self, sample):
return self.model(sample)[0][0]
def test_gradient_optimizer():
with tf.device('CPU:0'):
df = pd.DataFrame(random_dataset(shape=(32, 4)).numpy())
df.columns = ['x1', 'x2', 'y1', 'y2']
model = basic_dense_model(input_shape=(2,), output_shape=2)
model.compile(optimizer='Adam')
model.fit(random_dataset(shape=(32, 2)), random_dataset(shape=(32, 2)), epochs=1)
optimizer = GradientOptimizer(
model,
df,
StandardScaler(),
Loss(),
n_features=2,
n_labels=2,
iterations=10
)
optimizer.run()
optimizer.history()
optimizer.get_best_sample()
optimizer.get_results()
optimizer.compare_bounds()
optimizer.reset()
optimizer.iterations = 100
optimizer.run()
optimizer.history()
optimizer.get_best_sample()
optimizer.get_results()
| [
"tensorflow.device",
"GitMarco.tf.utils.limit_memory",
"numpy.random.rand",
"GitMarco.tf.pointnet.Pointnet",
"GitMarco.tf.pointnet.PointnetAe",
"GitMarco.tf.losses.euclidian_dist_loss",
"sklearn.preprocessing.StandardScaler",
"GitMarco.tf.losses.chamfer_distance",
"GitMarco.tf.utils.random_dataset",... | [((484, 498), 'GitMarco.tf.utils.limit_memory', 'limit_memory', ([], {}), '()\n', (496, 498), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((532, 554), 'GitMarco.tf.utils.random_dataset', 'utils.random_dataset', ([], {}), '()\n', (552, 554), False, 'from GitMarco.tf import utils, metrics, basic\n'), ((587, 606), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (601, 606), True, 'import numpy as np\n'), ((625, 644), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (639, 644), True, 'import numpy as np\n'), ((649, 682), 'GitMarco.tf.metrics.r_squared', 'metrics.r_squared', (['y', 'predictions'], {}), '(y, predictions)\n', (666, 682), False, 'from GitMarco.tf import utils, metrics, basic\n'), ((727, 812), 'GitMarco.tf.basic.basic_dense_model', 'basic.basic_dense_model', ([], {'input_shape': '(10,)', 'output_shape': '(1)', 'optimizer': '"""adadelta"""'}), "(input_shape=(10,), output_shape=1, optimizer='adadelta'\n )\n", (750, 812), False, 'from GitMarco.tf import utils, metrics, basic\n'), ((935, 976), 'GitMarco.tf.utils.random_dataset', 'utils.random_dataset', ([], {'shape': '(32, 1024, 3)'}), '(shape=(32, 1024, 3))\n', (955, 976), False, 'from GitMarco.tf import utils, metrics, basic\n'), ((985, 1026), 'GitMarco.tf.utils.random_dataset', 'utils.random_dataset', ([], {'shape': '(32, 1024, 3)'}), '(shape=(32, 1024, 3))\n', (1005, 1026), False, 'from GitMarco.tf import utils, metrics, basic\n'), ((1031, 1053), 'GitMarco.tf.losses.chamfer_distance', 'chamfer_distance', (['x', 'y'], {}), '(x, y)\n', (1047, 1053), False, 'from GitMarco.tf.losses import chamfer_distance, euclidian_dist_loss\n'), ((1088, 1123), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 4096, 3)'}), '(shape=(32, 4096, 3))\n', (1102, 1123), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1140, 1175), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 4096, 3)'}), '(shape=(32, 4096, 3))\n', (1154, 1175), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1188, 1223), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 4096, 2)'}), '(shape=(32, 4096, 2))\n', (1202, 1223), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1241, 1276), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 4096, 2)'}), '(shape=(32, 4096, 2))\n', (1255, 1276), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1290, 1313), 'GitMarco.tf.pointnet.Pointnet', 'Pointnet', ([], {'n_points': '(4096)'}), '(n_points=4096)\n', (1298, 1313), False, 'from GitMarco.tf.pointnet import Pointnet, PointnetAe\n'), ((1513, 1547), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 400, 3)'}), '(shape=(32, 400, 3))\n', (1527, 1547), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1563, 1592), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 1)'}), '(shape=(32, 1))\n', (1577, 1592), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1610, 1639), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 1)'}), '(shape=(32, 1))\n', (1624, 1639), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1654, 1688), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 400, 3)'}), '(shape=(32, 400, 3))\n', (1668, 1688), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((1702, 1900), 'GitMarco.tf.pointnet.PointnetAe', 'PointnetAe', ([], {'n_geometry_points': 'data.shape[1]', 'n_global_variables': '(2)', 'n_local_variables': '(3)', 'type_decoder': '"""cnn"""', 'n_cnn_dec_layer': '(4)', 'dfferent_out_for_globals': '(True)', 'cnn_dec_filters': '[64, 32, 32, 16]'}), "(n_geometry_points=data.shape[1], n_global_variables=2,\n n_local_variables=3, type_decoder='cnn', n_cnn_dec_layer=4,\n dfferent_out_for_globals=True, cnn_dec_filters=[64, 32, 32, 16])\n", (1712, 1900), False, 'from GitMarco.tf.pointnet import Pointnet, PointnetAe\n'), ((2333, 2374), 'GitMarco.tf.utils.random_dataset', 'utils.random_dataset', ([], {'shape': '(32, 1024, 3)'}), '(shape=(32, 1024, 3))\n', (2353, 2374), False, 'from GitMarco.tf import utils, metrics, basic\n'), ((2383, 2424), 'GitMarco.tf.utils.random_dataset', 'utils.random_dataset', ([], {'shape': '(32, 1024, 3)'}), '(shape=(32, 1024, 3))\n', (2403, 2424), False, 'from GitMarco.tf import utils, metrics, basic\n'), ((2429, 2471), 'GitMarco.tf.losses.euclidian_dist_loss', 'euclidian_dist_loss', (['x', 'y'], {'correction': '(True)'}), '(x, y, correction=True)\n', (2448, 2471), False, 'from GitMarco.tf.losses import chamfer_distance, euclidian_dist_loss\n'), ((2202, 2220), 'tensorflow.device', 'tf.device', (['"""CPU:0"""'], {}), "('CPU:0')\n", (2211, 2220), True, 'import tensorflow as tf\n'), ((2691, 2709), 'tensorflow.device', 'tf.device', (['"""CPU:0"""'], {}), "('CPU:0')\n", (2700, 2709), True, 'import tensorflow as tf\n'), ((2838, 2889), 'GitMarco.tf.basic.basic_dense_model', 'basic_dense_model', ([], {'input_shape': '(2,)', 'output_shape': '(2)'}), '(input_shape=(2,), output_shape=2)\n', (2855, 2889), False, 'from GitMarco.tf.basic import basic_dense_model\n'), ((2948, 2977), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 2)'}), '(shape=(32, 2))\n', (2962, 2977), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((2979, 3008), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 2)'}), '(shape=(32, 2))\n', (2993, 3008), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n'), ((3107, 3123), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3121, 3123), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2737, 2766), 'GitMarco.tf.utils.random_dataset', 'random_dataset', ([], {'shape': '(32, 4)'}), '(shape=(32, 4))\n', (2751, 2766), False, 'from GitMarco.tf.utils import limit_memory, random_dataset\n')] |
import numpy as np
import tqdm
import yaml
from loky import get_reusable_executor
import ngmix
import galsim
import copy
import sys
from metadetect.detect import MEDSifier
from ngmix.gaussmom import GaussMom
from pizza_cutter.slice_utils.symmetrize import symmetrize_bmask
from pizza_cutter.slice_utils.interpolate import interpolate_image_and_noise
CONFIG = yaml.safe_load("""\
metacal:
psf: fitgauss
types: [noshear, 1p, 1m, 2p, 2m]
use_noise_image: True
psf:
lm_pars:
maxfev: 2000
ftol: 1.0e-05
xtol: 1.0e-05
model: gauss
# we try many times because if this fails we get no psf info
# for the entire patch
ntry: 10
sx:
weight:
fwhm: 1.2 # arcsec
meds:
box_padding: 2
box_type: iso_radius
max_box_size: 53
min_box_size: 33
rad_fac: 2
rad_min: 4
# check for an edge hit
bmask_flags: 1610612736 # 2**29 || 2**30
""")
def symmetrize_bmask_nfold(*, bmask, nfolds):
"""symmetrize a bit mask to have N-fold rotational symmetry.
Parameters
----------
bmask : array-like
The bit mask.
nfolds : int
The desired number of folds in rotational symmetry.
Returns
-------
sym_bmask : array-like
The symmetrized bit mask
"""
sym_bmask = bmask.copy()
if nfolds == 1:
sym_bmask |= np.rot90(sym_bmask)
else:
angles = np.arange(nfolds)[1:] * 360/nfolds
for angle in angles:
bmask_rot = bmask.copy()
symmetrize_bmask(
bmask=bmask_rot,
angle=angle,
)
sym_bmask |= bmask_rot
return sym_bmask
def cut_nones(presults, mresults):
"""Cut entries that are None in a pair of lists. Any entry that is None
in either list will exclude the item in the other.
Parameters
----------
presults : list
One the list of things.
mresults : list
The other list of things.
Returns
-------
pcut : list
The cut list.
mcut : list
The cut list.
"""
prr_keep = []
mrr_keep = []
for pr, mr in zip(presults, mresults):
if pr is None or mr is None:
continue
prr_keep.append(pr)
mrr_keep.append(mr)
return prr_keep, mrr_keep
def _run_boostrap(x1, y1, x2, y2, wgts, verbose):
rng = np.random.RandomState(seed=100)
mvals = []
cvals = []
if verbose:
itrl = tqdm.trange(500, leave=False, desc='running bootstrap', ncols=79)
else:
itrl = range(500)
for _ in itrl:
ind = rng.choice(len(y1), replace=True, size=len(y1))
_wgts = wgts[ind].copy()
_wgts /= np.sum(_wgts)
mvals.append(np.mean(y1[ind] * _wgts) / np.mean(x1[ind] * _wgts) - 1)
cvals.append(np.mean(y2[ind] * _wgts) / np.mean(x2[ind] * _wgts))
return (
np.mean(y1 * wgts) / np.mean(x1 * wgts) - 1, np.std(mvals),
np.mean(y2 * wgts) / np.mean(x2 * wgts), np.std(cvals))
def _run_jackknife(x1, y1, x2, y2, wgts, jackknife, verbose):
n_per = x1.shape[0] // jackknife
n = n_per * jackknife
x1j = np.zeros(jackknife)
y1j = np.zeros(jackknife)
x2j = np.zeros(jackknife)
y2j = np.zeros(jackknife)
wgtsj = np.zeros(jackknife)
loc = 0
if verbose:
itrl = tqdm.trange(
jackknife, desc='running jackknife sums', leave=False, ncols=79
)
else:
itrl = range(jackknife)
for i in itrl:
wgtsj[i] = np.sum(wgts[loc:loc+n_per])
x1j[i] = np.sum(x1[loc:loc+n_per] * wgts[loc:loc+n_per]) / wgtsj[i]
y1j[i] = np.sum(y1[loc:loc+n_per] * wgts[loc:loc+n_per]) / wgtsj[i]
x2j[i] = np.sum(x2[loc:loc+n_per] * wgts[loc:loc+n_per]) / wgtsj[i]
y2j[i] = np.sum(y2[loc:loc+n_per] * wgts[loc:loc+n_per]) / wgtsj[i]
loc += n_per
mbar = np.mean(y1 * wgts) / np.mean(x1 * wgts) - 1
cbar = np.mean(y2 * wgts) / np.mean(x2 * wgts)
mvals = np.zeros(jackknife)
cvals = np.zeros(jackknife)
if verbose:
itrl = tqdm.trange(
jackknife, desc='running jackknife estimates', leave=False, ncols=79
)
else:
itrl = range(jackknife)
for i in itrl:
_wgts = np.delete(wgtsj, i)
mvals[i] = (
np.sum(np.delete(y1j, i) * _wgts) / np.sum(np.delete(x1j, i) * _wgts)
- 1
)
cvals[i] = (
np.sum(np.delete(y2j, i) * _wgts) / np.sum(np.delete(x2j, i) * _wgts)
)
return (
mbar,
np.sqrt((n - n_per) / n * np.sum((mvals-mbar)**2)),
cbar,
np.sqrt((n - n_per) / n * np.sum((cvals-cbar)**2)),
)
def estimate_m_and_c(
presults,
mresults,
g_true,
swap12=False,
step=0.01,
weights=None,
jackknife=None,
verbose=False,
):
"""Estimate m and c from paired lensing simulations.
Parameters
----------
presults : list of iterables or np.ndarray
A list of iterables, each with g1p, g1m, g1, g2p, g2m, g2
from running metadetect with a `g1` shear in the 1-component and
0 true shear in the 2-component. If an array, it should have the named
columns.
mresults : list of iterables or np.ndarray
A list of iterables, each with g1p, g1m, g1, g2p, g2m, g2
from running metadetect with a -`g1` shear in the 1-component and
0 true shear in the 2-component. If an array, it should have the named
columns.
g_true : float
The true value of the shear on the 1-axis in the simulation. The other
axis is assumd to havea true value of zero.
swap12 : bool, optional
If True, swap the roles of the 1- and 2-axes in the computation.
step : float, optional
The step used in metadetect for estimating the response. Default is
0.01.
weights : list of weights, optional
Weights to apply to each sample. Will be normalized if not already.
jackknife : int, optional
The number of jackknife sections to use for error estimation. Default of
None will do no jackknife and default to bootstrap error bars.
verbose : bool, optional
If True, print progress. Default is False.
Returns
-------
m : float
Estimate of the multiplicative bias.
merr : float
Estimat of the 1-sigma standard error in `m`.
c : float
Estimate of the additive bias.
cerr : float
Estimate of the 1-sigma standard error in `c`.
"""
if isinstance(presults, list) or isinstance(mresults, list):
prr_keep, mrr_keep = cut_nones(presults, mresults)
def _get_stuff(rr):
_a = np.vstack(rr)
g1p = _a[:, 0]
g1m = _a[:, 1]
g1 = _a[:, 2]
g2p = _a[:, 3]
g2m = _a[:, 4]
g2 = _a[:, 5]
if swap12:
g1p, g1m, g1, g2p, g2m, g2 = g2p, g2m, g2, g1p, g1m, g1
return (
g1, (g1p - g1m) / 2 / step * g_true,
g2, (g2p - g2m) / 2 / step)
g1p, R11p, g2p, R22p = _get_stuff(prr_keep)
g1m, R11m, g2m, R22m = _get_stuff(mrr_keep)
else:
if swap12:
g1p = presults["g2"]
R11p = (presults["g2p"] - presults["g2m"]) / 2 / step * g_true
g2p = presults["g1"]
R22p = (presults["g1p"] - presults["g1m"]) / 2 / step
g1m = mresults["g2"]
R11m = (mresults["g2p"] - mresults["g2m"]) / 2 / step * g_true
g2m = mresults["g1"]
R22m = (mresults["g1p"] - mresults["g1m"]) / 2 / step
else:
g1p = presults["g1"]
R11p = (presults["g1p"] - presults["g1m"]) / 2 / step * g_true
g2p = presults["g2"]
R22p = (presults["g2p"] - presults["g2m"]) / 2 / step
g1m = mresults["g1"]
R11m = (mresults["g1p"] - mresults["g1m"]) / 2 / step * g_true
g2m = mresults["g2"]
R22m = (mresults["g2p"] - mresults["g2m"]) / 2 / step
if weights is not None:
wgts = np.array(weights).astype(np.float64)
else:
wgts = np.ones(len(g1p)).astype(np.float64)
wgts /= np.sum(wgts)
msk = (
np.isfinite(g1p) &
np.isfinite(R11p) &
np.isfinite(g1m) &
np.isfinite(R11m) &
np.isfinite(g2p) &
np.isfinite(R22p) &
np.isfinite(g2m) &
np.isfinite(R22m))
g1p = g1p[msk]
R11p = R11p[msk]
g1m = g1m[msk]
R11m = R11m[msk]
g2p = g2p[msk]
R22p = R22p[msk]
g2m = g2m[msk]
R22m = R22m[msk]
wgts = wgts[msk]
x1 = (R11p + R11m)/2
y1 = (g1p - g1m) / 2
x2 = (R22p + R22m) / 2
y2 = (g2p + g2m) / 2
if jackknife:
return _run_jackknife(x1, y1, x2, y2, wgts, jackknife, verbose)
else:
return _run_boostrap(x1, y1, x2, y2, wgts, verbose)
def make_obs(
*,
n_grid=6,
dim=235,
buff=20,
scale=0.2,
psf_fwhm=0.9,
hlr=0.5,
nse=1e-7,
star_dxdy=117,
star_rad=1,
n_stars=5,
seed=10,
shear=(0.02, 0.0),
mcal_shear=(0.0, 0.0)
):
rng = np.random.RandomState(seed=seed)
n_gals = n_grid**2
tot_dim = dim + 2*buff
tot_cen = (tot_dim-1)/2
gloc = (np.arange(n_grid) + 0.5) * (dim / n_grid) - dim/2
gloc *= scale
dx, dy = np.meshgrid(gloc, gloc)
dx = dx.ravel() + rng.uniform(low=-0.5, high=0.5, size=n_gals) * scale
dy = dy.ravel() + rng.uniform(low=-0.5, high=0.5, size=n_gals) * scale
ds = np.arange(n_gals) / (n_gals-1) * 0 + 1
gals = galsim.Sum([
galsim.Exponential(
half_light_radius=hlr * _ds
).shift(
_dx, _dy
).shear(
g1=shear[0], g2=shear[1]
).shear(
g1=mcal_shear[0], g2=mcal_shear[1]
)
for _ds, _dx, _dy in zip(ds, dx, dy)
])
psf = galsim.Gaussian(fwhm=psf_fwhm)
objs = galsim.Convolve([gals, psf])
im = objs.drawImage(nx=tot_dim, ny=tot_dim, scale=scale).array
im += rng.normal(size=im.shape, scale=nse)
nim = rng.normal(size=im.shape, scale=nse)
psf_dim = 53
psf_cen = (psf_dim-1)/2
psf_im = psf.drawImage(nx=psf_dim, ny=psf_dim, scale=scale).array
# make bmask
bmask = np.zeros_like(im, dtype=np.int32)
x, y = np.meshgrid(np.arange(tot_dim), np.arange(tot_dim))
sdata = []
for _ in range(n_stars):
sr2 = np.power(10.0, rng.uniform(low=star_rad, high=star_rad+0.2))**2
sx = rng.uniform(low=-star_dxdy, high=star_dxdy) + tot_cen
sy = rng.uniform(low=-star_dxdy, high=star_dxdy) + tot_cen
dr2 = (x - sx)**2 + (y - sy)**2
msk = dr2 < sr2
bmask[msk] |= 2**0
im[msk] = 0
sdata.append((sx, sy, np.sqrt(sr2)))
psf_obs = ngmix.Observation(
image=psf_im,
weight=np.ones_like(psf_im) / nse**2,
jacobian=ngmix.DiagonalJacobian(scale=scale, row=psf_cen, col=psf_cen)
)
wgt = np.ones_like(im) / nse**2
msk = bmask != 0
wgt[msk] = 0.0
mfrac = np.zeros_like(im)
mfrac[msk] = 1.0
obs = ngmix.Observation(
image=im,
noise=nim,
weight=wgt,
bmask=bmask,
ormask=bmask,
jacobian=ngmix.DiagonalJacobian(scale=scale, row=tot_cen, col=tot_cen),
psf=psf_obs
)
obs.mfrac = mfrac
mbobs = ngmix.MultiBandObsList()
obsl = ngmix.ObsList()
obsl.append(obs)
mbobs.append(obsl)
mbobs.meta["sdata"] = sdata
return mbobs
def meas_mbmeds(mbobs, *, mask_width, sym, maskflags=1, meds_config=None):
# meas PSF
mom = GaussMom(fwhm=1.2)
res = mom.go(obs=mbobs[0][0].psf)
psf_T = res['T']
if meds_config is None:
meds_config = copy.deepcopy(CONFIG["meds"])
mfier = MEDSifier(
mbobs,
sx_config=None,
meds_config=meds_config,
maskflags=maskflags
)
mbmeds = mfier.get_multiband_meds()
d = []
dt = [
("flags", "i4"),
("g1", "f8"),
("g2", "f8"),
("s2n", "f8"),
("x", "f8"),
("y", "f8"),
('T_ratio', 'f8'),
]
mw = mask_width
for i, _mbobs in enumerate(mbmeds.get_mbobs_list()):
if len(_mbobs) > 0 and len(_mbobs[0]) > 0:
obs = _mbobs[0][0]
cen = int((obs.bmask.shape[0]-1)/2)
if np.any((obs.bmask[cen-mw:cen+mw+1, cen-mw:cen+mw+1] & maskflags) != 0):
continue
if sym:
bmask = obs.bmask.copy()
if isinstance(sym, list):
for angle in sym:
bmask_rot = obs.bmask.copy()
symmetrize_bmask(
bmask=bmask_rot,
angle=angle,
)
bmask |= bmask_rot
elif isinstance(sym, int):
if sym in [2, 4, 8]:
angles = np.arange(sym)[1:] * 360/sym
for angle in angles:
bmask_rot = obs.bmask.copy()
symmetrize_bmask(
bmask=bmask_rot,
angle=angle,
)
bmask |= bmask_rot
else:
bmask_rot = obs.bmask.copy()
for _ in range(sym):
bmask_rot = np.rot90(bmask_rot)
bmask |= bmask_rot
msk = (bmask & maskflags) != 0
wgt = obs.weight.copy()
wgt[msk] = 0
obs.bmask = bmask
obs.weight = wgt
if False:
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(obs.weight)
import pdb
pdb.set_trace()
mom = GaussMom(fwhm=1.2)
res = mom.go(obs=obs)
if res["flags"] == 0:
d.append((
res["flags"],
res["e"][0], res["e"][1],
res["s2n"],
mfier.cat["x"][i], mfier.cat["y"][i],
res['T'] / psf_T
))
else:
d.append((
res["flags"],
-9999, -9999,
-9999,
mfier.cat["x"][i], mfier.cat["y"][i],
-9999,
))
return np.array(d, dtype=dt), mbobs
def _cut_cat(d):
return d[
(d["flags"] == 0)
& (d["s2n"] > 1e4)
& (d["T_ratio"] > 1.2)
& (np.abs(d["g1"]) < 1.0)
& (np.abs(d["g2"]) < 1.0)
& ((d["g1"]**2 + d["g2"]**2) < 1.0)
]
def _run_one_shear(*, shear, mask_width, sym, **kwargs):
step = 0.01
_d, mbobs = meas_mbmeds(
make_obs(shear=shear, mcal_shear=(0, 0), **kwargs),
mask_width=mask_width, sym=sym,
)
_d1p, mbobs1p = meas_mbmeds(
make_obs(shear=shear, mcal_shear=(step, 0), **kwargs),
mask_width=mask_width, sym=sym,
)
_d1m, mbobs1m = meas_mbmeds(
make_obs(shear=shear, mcal_shear=(-step, 0), **kwargs),
mask_width=mask_width, sym=sym,
)
_d2p, mbobs1p = meas_mbmeds(
make_obs(shear=shear, mcal_shear=(0, step), **kwargs),
mask_width=mask_width, sym=sym,
)
_d2m, mbobs1m = meas_mbmeds(
make_obs(shear=shear, mcal_shear=(0, -step), **kwargs),
mask_width=mask_width, sym=sym,
)
_d = _cut_cat(_d)
_d1p = _cut_cat(_d1p)
_d1m = _cut_cat(_d1m)
_d2p = _cut_cat(_d2p)
_d2m = _cut_cat(_d2m)
if (
len(_d) > 0
and len(_d1p) > 0 and len(_d1m) > 0
and len(_d2p) > 0 and len(_d2m) > 0
):
g1 = np.mean(_d["g1"])
g1p = np.mean(_d1p["g1"])
g1m = np.mean(_d1m["g1"])
g2 = np.mean(_d["g2"])
g2p = np.mean(_d2p["g2"])
g2m = np.mean(_d2m["g2"])
return g1p, g1m, g1, g2p, g2m, g2
else:
return None
def _meas_m(*, mask_width, sym, **kwargs):
pres = _run_one_shear(
shear=(0.02, 0),
mask_width=mask_width,
sym=sym,
**kwargs,
)
if pres is None:
return None, None, None, None
mres = _run_one_shear(
shear=(-0.02, 0),
mask_width=mask_width,
sym=sym,
**kwargs,
)
if mres is None:
return None, None, None, None
spres = _run_one_shear(
shear=(0, 0.02),
mask_width=mask_width,
sym=sym,
**kwargs,
)
if spres is None:
return None, None, None, None
smres = _run_one_shear(
shear=(0, -0.02),
mask_width=mask_width,
sym=sym,
**kwargs,
)
if smres is None:
return None, None, None, None
return pres, mres, spres, smres
def meas_m(*, mask_width, sym, n_stars, n_jobs, seed, n_print=500):
seeds = np.random.RandomState(seed=seed).randint(size=n_jobs, low=1, high=2**28)
if n_jobs == 1:
_meas_m(n_stars=n_stars, seed=seed, mask_width=mask_width, sym=sym)
else:
exe = get_reusable_executor()
futs = [
exe.submit(_meas_m, n_stars=n_stars, seed=s, mask_width=mask_width, sym=sym)
for s in seeds
]
pres = []
mres = []
spres = []
smres = []
n_done = 0
with tqdm.tqdm(
futs, total=len(futs), ncols=79, file=sys.stdout,
desc="running sims",
) as itrl:
for fut in itrl:
n_done += 1
try:
res = fut.result()
pres.append(res[0])
mres.append(res[1])
spres.append(res[2])
smres.append(res[3])
except Exception as e:
print(e)
if n_done % n_print == 0:
m, merr, c, cerr = estimate_m_and_c(
pres,
mres,
0.02,
jackknife=200 if n_done > 1000 else None,
)
mstr = "m1 +/- merr: %0.6f +/- %0.6f [10^(-3), 3sigma]" % (
m/1e-3, 3*merr/1e-3)
itrl.write(mstr, file=sys.stdout)
cstr = "c2 +/- cerr: %0.6f +/- %0.6f [10^(-5), 3sigma]" % (
c/1e-3, 3*cerr/1e-3)
itrl.write(cstr, file=sys.stdout)
m, merr, c, cerr = estimate_m_and_c(
spres,
smres,
0.02,
jackknife=200 if n_done > 1000 else None,
swap12=True,
)
mstr = "m2 +/- merr: %0.6f +/- %0.6f [10^(-3), 3sigma]" % (
m/1e-3, 3*merr/1e-3)
itrl.write(mstr, file=sys.stdout)
cstr = "c1 +/- cerr: %0.6f +/- %0.6f [10^(-5), 3sigma]" % (
c/1e-3, 3*cerr/1e-3)
itrl.write(cstr, file=sys.stdout)
sys.stdout.flush()
m1, m1err, c2, c2err = estimate_m_and_c(
pres,
mres,
0.02,
jackknife=200 if n_jobs > 1000 else None,
)
m2, m2err, c1, c1err = estimate_m_and_c(
spres,
smres,
0.02,
jackknife=200 if n_jobs > 1000 else None,
swap12=True,
)
return dict(
m1=m1,
m1err=m1err,
c2=c2,
c2err=c2err,
pres=pres,
mres=mres,
m2=m2,
m2err=m2err,
c1=c1,
c1err=c1err,
spres=spres,
smres=smres,
)
def format_mc_res(res, space=None):
fstrs = []
m, merr = res["m1"], res["m1err"]
fstrs.append("m1 +/- merr: %0.6f +/- %0.6f [10^(-3), 3sigma]" % (
m/1e-3, 3*merr/1e-3
))
m, merr = res["m2"], res["m2err"]
fstrs.append("m2 +/- merr: %0.6f +/- %0.6f [10^(-3), 3sigma]" % (
m/1e-3, 3*merr/1e-3
))
c, cerr = res["c1"], res["c1err"]
fstrs.append("c1 +/- cerr: %0.6f +/- %0.6f [10^(-5), 3sigma]" % (
c/1e-3, 3*cerr/1e-3
))
c, cerr = res["c2"], res["c2err"]
fstrs.append("c2 +/- cerr: %0.6f +/- %0.6f [10^(-5), 3sigma]" % (
c/1e-3, 3*cerr/1e-3
))
if space is not None and space > 0:
st = "\n" + (" " * space)
else:
st = "\n"
return st.join(fstrs)
def meas_one_im(*, g1, g2, seed, n_stars=0, sym_nfold=None, interp=False):
rng = np.random.RandomState(seed=seed)
obj = galsim.Exponential(half_light_radius=0.5).shear(g1=g1, g2=g2)
psf = galsim.Gaussian(fwhm=0.9).withFlux(1e6)
obj = galsim.Convolve([obj, psf])
dim = 53
cen = (dim-1)//2
offset = rng.uniform(low=-0.5, high=0.5, size=2)
im = obj.drawImage(nx=dim, ny=dim, scale=0.2, offset=offset).array
jac = jac = ngmix.DiagonalJacobian(
scale=0.2,
row=cen+offset[1],
col=cen+offset[0],
)
psf_im = psf.drawImage(nx=dim, ny=dim, scale=0.2).array
psf_jac = ngmix.DiagonalJacobian(scale=0.2, row=cen, col=cen)
psf_obs = ngmix.Observation(
image=psf_im,
weight=np.ones_like(psf_im),
jacobian=psf_jac,
)
wgt = np.ones_like(im)
bmask = np.zeros_like(im, dtype=np.int32)
if True:
for _ in range(n_stars):
srad = np.power(10, rng.uniform(low=1, high=3))
ang = rng.uniform(low=0, high=2.0*np.pi)
lrad = rng.uniform(low=(srad - (dim/2-3)), high=srad-(dim/2-10)) + dim/2
xc = lrad * np.cos(ang) + dim/2
yc = lrad * np.sin(ang) + dim/2
srad2 = srad * srad
x, y = np.meshgrid(np.arange(dim), np.arange(dim))
msk = ((x-xc)**2 + (y-yc)**2) < srad2
bmask[msk] = 1
else:
import scipy.ndimage
msk = np.zeros_like(bmask)
angle = rng.uniform(low=0, high=360) * 0
col = int(rng.uniform(low=dim/2, high=dim-1))
msk[:, col:] = 1
msk = scipy.ndimage.rotate(
msk,
angle,
reshape=False,
order=1,
mode='constant',
cval=1.0,
)
bmask[msk == 1] = 1
if sym_nfold is not None:
bmask = symmetrize_bmask_nfold(bmask=bmask, nfolds=sym_nfold)
msk = bmask != 0
wgt[msk] = 0
im[msk] = np.nan
if interp:
nse = rng.normal(size=im.shape)
iim, inse = interpolate_image_and_noise(
image=im,
noises=[nse],
weight=wgt,
bmask=bmask,
bad_flags=1,
rng=rng,
fill_isolated_with_noise=True
)
obs = ngmix.Observation(
image=im,
weight=wgt,
jacobian=jac,
psf=psf_obs,
bmask=bmask,
)
mom = GaussMom(fwhm=1.2)
res = mom.go(obs=obs)
gauss_wgt = ngmix.GMixModel(
[0, 0, 0, 0, ngmix.moments.fwhm_to_T(1.2), 1],
'gauss',
)
cobs = obs.copy()
cobs.image = 1.0 - wgt
cobs.weight = np.ones_like(wgt)
stats = gauss_wgt.get_weighted_sums(
cobs,
1.2 * 2,
)
mfrac = stats["sums"][5] / stats["wsum"]
return res["e"][0], res["e"][1], obs, mfrac
def meas_response_one_im(seed, n_stars=0, sym_nfold=None, interp=False, swap12=False):
e1 = []
e2 = []
shear = np.linspace(0, 0.06, 50)
for s in shear:
if swap12:
_g1 = 0
_g2 = s
else:
_g1 = s
_g2 = 0
_e1, _e2, _obs, _mfrac = meas_one_im(
g1=_g1, g2=_g2, seed=seed, sym_nfold=sym_nfold, interp=interp,
n_stars=n_stars,
)
e1.append(_e1)
e2.append(_e2)
e1 = np.array(e1)
e2 = np.array(e2)
if swap12:
R = (e2[1:] - e2[:-1])/(shear[1:] - shear[:-1])
else:
R = (e1[1:] - e1[:-1])/(shear[1:] - shear[:-1])
sp = (shear[1:] + shear[:-1])/2
ds = sp[1] - sp[0]
if swap12:
_g1 = 0
_g2 = ds
ind = 1
else:
_g1 = ds
_g2 = 0
ind = 0
R0 = (
meas_one_im(
g1=_g1, g2=_g2, seed=seed, sym_nfold=sym_nfold, interp=interp,
n_stars=n_stars,
)[ind]
- meas_one_im(
g1=-_g1, g2=-_g2, seed=seed, sym_nfold=sym_nfold, interp=interp,
n_stars=n_stars,
)[ind]
)/2/ds
sp = np.concatenate([np.array([0]), sp])
R = np.concatenate([np.array([R0]), R])
mfrac = meas_one_im(
g1=0, g2=0, seed=seed, sym_nfold=sym_nfold, interp=interp,
n_stars=n_stars,
)[-1]
return dict(
shear=sp,
R=R,
mfrac=mfrac,
e1=e1,
e2=e2,
obs=_obs,
)
| [
"metadetect.detect.MEDSifier",
"pizza_cutter.slice_utils.symmetrize.symmetrize_bmask",
"numpy.sqrt",
"numpy.array",
"numpy.isfinite",
"numpy.rot90",
"copy.deepcopy",
"numpy.sin",
"numpy.random.RandomState",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"galsim.Convolve",
"numpy... | [((361, 927), 'yaml.safe_load', 'yaml.safe_load', (['""" metacal:\n psf: fitgauss\n types: [noshear, 1p, 1m, 2p, 2m]\n use_noise_image: True\n\n psf:\n lm_pars:\n maxfev: 2000\n ftol: 1.0e-05\n xtol: 1.0e-05\n model: gauss\n\n # we try many times because if this fails we get no psf info\n # for the entire patch\n ntry: 10\n\n sx:\n\n weight:\n fwhm: 1.2 # arcsec\n\n meds:\n box_padding: 2\n box_type: iso_radius\n max_box_size: 53\n min_box_size: 33\n rad_fac: 2\n rad_min: 4\n\n # check for an edge hit\n bmask_flags: 1610612736 # 2**29 || 2**30\n\n"""'], {}), '(\n """ metacal:\n psf: fitgauss\n types: [noshear, 1p, 1m, 2p, 2m]\n use_noise_image: True\n\n psf:\n lm_pars:\n maxfev: 2000\n ftol: 1.0e-05\n xtol: 1.0e-05\n model: gauss\n\n # we try many times because if this fails we get no psf info\n # for the entire patch\n ntry: 10\n\n sx:\n\n weight:\n fwhm: 1.2 # arcsec\n\n meds:\n box_padding: 2\n box_type: iso_radius\n max_box_size: 53\n min_box_size: 33\n rad_fac: 2\n rad_min: 4\n\n # check for an edge hit\n bmask_flags: 1610612736 # 2**29 || 2**30\n\n"""\n )\n', (375, 927), False, 'import yaml\n'), ((2360, 2391), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(100)'}), '(seed=100)\n', (2381, 2391), True, 'import numpy as np\n'), ((3135, 3154), 'numpy.zeros', 'np.zeros', (['jackknife'], {}), '(jackknife)\n', (3143, 3154), True, 'import numpy as np\n'), ((3165, 3184), 'numpy.zeros', 'np.zeros', (['jackknife'], {}), '(jackknife)\n', (3173, 3184), True, 'import numpy as np\n'), ((3195, 3214), 'numpy.zeros', 'np.zeros', (['jackknife'], {}), '(jackknife)\n', (3203, 3214), True, 'import numpy as np\n'), ((3225, 3244), 'numpy.zeros', 'np.zeros', (['jackknife'], {}), '(jackknife)\n', (3233, 3244), True, 'import numpy as np\n'), ((3257, 3276), 'numpy.zeros', 'np.zeros', (['jackknife'], {}), '(jackknife)\n', (3265, 3276), True, 'import numpy as np\n'), ((3973, 3992), 'numpy.zeros', 'np.zeros', (['jackknife'], {}), '(jackknife)\n', (3981, 3992), True, 'import numpy as np\n'), ((4005, 4024), 'numpy.zeros', 'np.zeros', (['jackknife'], {}), '(jackknife)\n', (4013, 4024), True, 'import numpy as np\n'), ((8204, 8216), 'numpy.sum', 'np.sum', (['wgts'], {}), '(wgts)\n', (8210, 8216), True, 'import numpy as np\n'), ((9143, 9175), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (9164, 9175), True, 'import numpy as np\n'), ((9347, 9370), 'numpy.meshgrid', 'np.meshgrid', (['gloc', 'gloc'], {}), '(gloc, gloc)\n', (9358, 9370), True, 'import numpy as np\n'), ((9889, 9919), 'galsim.Gaussian', 'galsim.Gaussian', ([], {'fwhm': 'psf_fwhm'}), '(fwhm=psf_fwhm)\n', (9904, 9919), False, 'import galsim\n'), ((9931, 9959), 'galsim.Convolve', 'galsim.Convolve', (['[gals, psf]'], {}), '([gals, psf])\n', (9946, 9959), False, 'import galsim\n'), ((10268, 10301), 'numpy.zeros_like', 'np.zeros_like', (['im'], {'dtype': 'np.int32'}), '(im, dtype=np.int32)\n', (10281, 10301), True, 'import numpy as np\n'), ((11052, 11069), 'numpy.zeros_like', 'np.zeros_like', (['im'], {}), '(im)\n', (11065, 11069), True, 'import numpy as np\n'), ((11360, 11384), 'ngmix.MultiBandObsList', 'ngmix.MultiBandObsList', ([], {}), '()\n', (11382, 11384), False, 'import ngmix\n'), ((11396, 11411), 'ngmix.ObsList', 'ngmix.ObsList', ([], {}), '()\n', (11409, 11411), False, 'import ngmix\n'), ((11607, 11625), 'ngmix.gaussmom.GaussMom', 'GaussMom', ([], {'fwhm': '(1.2)'}), '(fwhm=1.2)\n', (11615, 11625), False, 'from ngmix.gaussmom import GaussMom\n'), ((11778, 11856), 'metadetect.detect.MEDSifier', 'MEDSifier', (['mbobs'], {'sx_config': 'None', 'meds_config': 'meds_config', 'maskflags': 'maskflags'}), '(mbobs, sx_config=None, meds_config=meds_config, maskflags=maskflags)\n', (11787, 11856), False, 'from metadetect.detect import MEDSifier\n'), ((20781, 20813), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (20802, 20813), True, 'import numpy as np\n'), ((20947, 20974), 'galsim.Convolve', 'galsim.Convolve', (['[obj, psf]'], {}), '([obj, psf])\n', (20962, 20974), False, 'import galsim\n'), ((21149, 21224), 'ngmix.DiagonalJacobian', 'ngmix.DiagonalJacobian', ([], {'scale': '(0.2)', 'row': '(cen + offset[1])', 'col': '(cen + offset[0])'}), '(scale=0.2, row=cen + offset[1], col=cen + offset[0])\n', (21171, 21224), False, 'import ngmix\n'), ((21326, 21377), 'ngmix.DiagonalJacobian', 'ngmix.DiagonalJacobian', ([], {'scale': '(0.2)', 'row': 'cen', 'col': 'cen'}), '(scale=0.2, row=cen, col=cen)\n', (21348, 21377), False, 'import ngmix\n'), ((21512, 21528), 'numpy.ones_like', 'np.ones_like', (['im'], {}), '(im)\n', (21524, 21528), True, 'import numpy as np\n'), ((21542, 21575), 'numpy.zeros_like', 'np.zeros_like', (['im'], {'dtype': 'np.int32'}), '(im, dtype=np.int32)\n', (21555, 21575), True, 'import numpy as np\n'), ((22963, 23042), 'ngmix.Observation', 'ngmix.Observation', ([], {'image': 'im', 'weight': 'wgt', 'jacobian': 'jac', 'psf': 'psf_obs', 'bmask': 'bmask'}), '(image=im, weight=wgt, jacobian=jac, psf=psf_obs, bmask=bmask)\n', (22980, 23042), False, 'import ngmix\n'), ((23100, 23118), 'ngmix.gaussmom.GaussMom', 'GaussMom', ([], {'fwhm': '(1.2)'}), '(fwhm=1.2)\n', (23108, 23118), False, 'from ngmix.gaussmom import GaussMom\n'), ((23324, 23341), 'numpy.ones_like', 'np.ones_like', (['wgt'], {}), '(wgt)\n', (23336, 23341), True, 'import numpy as np\n'), ((23638, 23662), 'numpy.linspace', 'np.linspace', (['(0)', '(0.06)', '(50)'], {}), '(0, 0.06, 50)\n', (23649, 23662), True, 'import numpy as np\n'), ((24011, 24023), 'numpy.array', 'np.array', (['e1'], {}), '(e1)\n', (24019, 24023), True, 'import numpy as np\n'), ((24033, 24045), 'numpy.array', 'np.array', (['e2'], {}), '(e2)\n', (24041, 24045), True, 'import numpy as np\n'), ((1349, 1368), 'numpy.rot90', 'np.rot90', (['sym_bmask'], {}), '(sym_bmask)\n', (1357, 1368), True, 'import numpy as np\n'), ((2453, 2518), 'tqdm.trange', 'tqdm.trange', (['(500)'], {'leave': '(False)', 'desc': '"""running bootstrap"""', 'ncols': '(79)'}), "(500, leave=False, desc='running bootstrap', ncols=79)\n", (2464, 2518), False, 'import tqdm\n'), ((2686, 2699), 'numpy.sum', 'np.sum', (['_wgts'], {}), '(_wgts)\n', (2692, 2699), True, 'import numpy as np\n'), ((2919, 2932), 'numpy.std', 'np.std', (['mvals'], {}), '(mvals)\n', (2925, 2932), True, 'import numpy as np\n'), ((2983, 2996), 'numpy.std', 'np.std', (['cvals'], {}), '(cvals)\n', (2989, 2996), True, 'import numpy as np\n'), ((3321, 3397), 'tqdm.trange', 'tqdm.trange', (['jackknife'], {'desc': '"""running jackknife sums"""', 'leave': '(False)', 'ncols': '(79)'}), "(jackknife, desc='running jackknife sums', leave=False, ncols=79)\n", (3332, 3397), False, 'import tqdm\n'), ((3500, 3529), 'numpy.sum', 'np.sum', (['wgts[loc:loc + n_per]'], {}), '(wgts[loc:loc + n_per])\n', (3506, 3529), True, 'import numpy as np\n'), ((3921, 3939), 'numpy.mean', 'np.mean', (['(y2 * wgts)'], {}), '(y2 * wgts)\n', (3928, 3939), True, 'import numpy as np\n'), ((3942, 3960), 'numpy.mean', 'np.mean', (['(x2 * wgts)'], {}), '(x2 * wgts)\n', (3949, 3960), True, 'import numpy as np\n'), ((4056, 4141), 'tqdm.trange', 'tqdm.trange', (['jackknife'], {'desc': '"""running jackknife estimates"""', 'leave': '(False)', 'ncols': '(79)'}), "(jackknife, desc='running jackknife estimates', leave=False,\n ncols=79)\n", (4067, 4141), False, 'import tqdm\n'), ((4237, 4256), 'numpy.delete', 'np.delete', (['wgtsj', 'i'], {}), '(wgtsj, i)\n', (4246, 4256), True, 'import numpy as np\n'), ((8430, 8447), 'numpy.isfinite', 'np.isfinite', (['R22m'], {}), '(R22m)\n', (8441, 8447), True, 'import numpy as np\n'), ((10325, 10343), 'numpy.arange', 'np.arange', (['tot_dim'], {}), '(tot_dim)\n', (10334, 10343), True, 'import numpy as np\n'), ((10345, 10363), 'numpy.arange', 'np.arange', (['tot_dim'], {}), '(tot_dim)\n', (10354, 10363), True, 'import numpy as np\n'), ((10974, 10990), 'numpy.ones_like', 'np.ones_like', (['im'], {}), '(im)\n', (10986, 10990), True, 'import numpy as np\n'), ((11736, 11765), 'copy.deepcopy', 'copy.deepcopy', (["CONFIG['meds']"], {}), "(CONFIG['meds'])\n", (11749, 11765), False, 'import copy\n'), ((14551, 14572), 'numpy.array', 'np.array', (['d'], {'dtype': 'dt'}), '(d, dtype=dt)\n', (14559, 14572), True, 'import numpy as np\n'), ((15859, 15876), 'numpy.mean', 'np.mean', (["_d['g1']"], {}), "(_d['g1'])\n", (15866, 15876), True, 'import numpy as np\n'), ((15891, 15910), 'numpy.mean', 'np.mean', (["_d1p['g1']"], {}), "(_d1p['g1'])\n", (15898, 15910), True, 'import numpy as np\n'), ((15925, 15944), 'numpy.mean', 'np.mean', (["_d1m['g1']"], {}), "(_d1m['g1'])\n", (15932, 15944), True, 'import numpy as np\n'), ((15958, 15975), 'numpy.mean', 'np.mean', (["_d['g2']"], {}), "(_d['g2'])\n", (15965, 15975), True, 'import numpy as np\n'), ((15990, 16009), 'numpy.mean', 'np.mean', (["_d2p['g2']"], {}), "(_d2p['g2'])\n", (15997, 16009), True, 'import numpy as np\n'), ((16024, 16043), 'numpy.mean', 'np.mean', (["_d2m['g2']"], {}), "(_d2m['g2'])\n", (16031, 16043), True, 'import numpy as np\n'), ((17215, 17238), 'loky.get_reusable_executor', 'get_reusable_executor', ([], {}), '()\n', (17236, 17238), False, 'from loky import get_reusable_executor\n'), ((22134, 22154), 'numpy.zeros_like', 'np.zeros_like', (['bmask'], {}), '(bmask)\n', (22147, 22154), True, 'import numpy as np\n'), ((22728, 22861), 'pizza_cutter.slice_utils.interpolate.interpolate_image_and_noise', 'interpolate_image_and_noise', ([], {'image': 'im', 'noises': '[nse]', 'weight': 'wgt', 'bmask': 'bmask', 'bad_flags': '(1)', 'rng': 'rng', 'fill_isolated_with_noise': '(True)'}), '(image=im, noises=[nse], weight=wgt, bmask=bmask,\n bad_flags=1, rng=rng, fill_isolated_with_noise=True)\n', (22755, 22861), False, 'from pizza_cutter.slice_utils.interpolate import interpolate_image_and_noise\n'), ((1509, 1555), 'pizza_cutter.slice_utils.symmetrize.symmetrize_bmask', 'symmetrize_bmask', ([], {'bmask': 'bmask_rot', 'angle': 'angle'}), '(bmask=bmask_rot, angle=angle)\n', (1525, 1555), False, 'from pizza_cutter.slice_utils.symmetrize import symmetrize_bmask\n'), ((2942, 2960), 'numpy.mean', 'np.mean', (['(y2 * wgts)'], {}), '(y2 * wgts)\n', (2949, 2960), True, 'import numpy as np\n'), ((2963, 2981), 'numpy.mean', 'np.mean', (['(x2 * wgts)'], {}), '(x2 * wgts)\n', (2970, 2981), True, 'import numpy as np\n'), ((3545, 3596), 'numpy.sum', 'np.sum', (['(x1[loc:loc + n_per] * wgts[loc:loc + n_per])'], {}), '(x1[loc:loc + n_per] * wgts[loc:loc + n_per])\n', (3551, 3596), True, 'import numpy as np\n'), ((3621, 3672), 'numpy.sum', 'np.sum', (['(y1[loc:loc + n_per] * wgts[loc:loc + n_per])'], {}), '(y1[loc:loc + n_per] * wgts[loc:loc + n_per])\n', (3627, 3672), True, 'import numpy as np\n'), ((3697, 3748), 'numpy.sum', 'np.sum', (['(x2[loc:loc + n_per] * wgts[loc:loc + n_per])'], {}), '(x2[loc:loc + n_per] * wgts[loc:loc + n_per])\n', (3703, 3748), True, 'import numpy as np\n'), ((3773, 3824), 'numpy.sum', 'np.sum', (['(y2[loc:loc + n_per] * wgts[loc:loc + n_per])'], {}), '(y2[loc:loc + n_per] * wgts[loc:loc + n_per])\n', (3779, 3824), True, 'import numpy as np\n'), ((3866, 3884), 'numpy.mean', 'np.mean', (['(y1 * wgts)'], {}), '(y1 * wgts)\n', (3873, 3884), True, 'import numpy as np\n'), ((3887, 3905), 'numpy.mean', 'np.mean', (['(x1 * wgts)'], {}), '(x1 * wgts)\n', (3894, 3905), True, 'import numpy as np\n'), ((6682, 6695), 'numpy.vstack', 'np.vstack', (['rr'], {}), '(rr)\n', (6691, 6695), True, 'import numpy as np\n'), ((8403, 8419), 'numpy.isfinite', 'np.isfinite', (['g2m'], {}), '(g2m)\n', (8414, 8419), True, 'import numpy as np\n'), ((10896, 10957), 'ngmix.DiagonalJacobian', 'ngmix.DiagonalJacobian', ([], {'scale': 'scale', 'row': 'psf_cen', 'col': 'psf_cen'}), '(scale=scale, row=psf_cen, col=psf_cen)\n', (10918, 10957), False, 'import ngmix\n'), ((11237, 11298), 'ngmix.DiagonalJacobian', 'ngmix.DiagonalJacobian', ([], {'scale': 'scale', 'row': 'tot_cen', 'col': 'tot_cen'}), '(scale=scale, row=tot_cen, col=tot_cen)\n', (11259, 11298), False, 'import ngmix\n'), ((12346, 12431), 'numpy.any', 'np.any', (['(obs.bmask[cen - mw:cen + mw + 1, cen - mw:cen + mw + 1] & maskflags != 0)'], {}), '(obs.bmask[cen - mw:cen + mw + 1, cen - mw:cen + mw + 1] & maskflags != 0\n )\n', (12352, 12431), True, 'import numpy as np\n'), ((13956, 13974), 'ngmix.gaussmom.GaussMom', 'GaussMom', ([], {'fwhm': '(1.2)'}), '(fwhm=1.2)\n', (13964, 13974), False, 'from ngmix.gaussmom import GaussMom\n'), ((17021, 17053), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (17042, 17053), True, 'import numpy as np\n'), ((20825, 20866), 'galsim.Exponential', 'galsim.Exponential', ([], {'half_light_radius': '(0.5)'}), '(half_light_radius=0.5)\n', (20843, 20866), False, 'import galsim\n'), ((20897, 20922), 'galsim.Gaussian', 'galsim.Gaussian', ([], {'fwhm': '(0.9)'}), '(fwhm=0.9)\n', (20912, 20922), False, 'import galsim\n'), ((21448, 21468), 'numpy.ones_like', 'np.ones_like', (['psf_im'], {}), '(psf_im)\n', (21460, 21468), True, 'import numpy as np\n'), ((23200, 23228), 'ngmix.moments.fwhm_to_T', 'ngmix.moments.fwhm_to_T', (['(1.2)'], {}), '(1.2)\n', (23223, 23228), False, 'import ngmix\n'), ((24696, 24709), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (24704, 24709), True, 'import numpy as np\n'), ((24740, 24754), 'numpy.array', 'np.array', (['[R0]'], {}), '([R0])\n', (24748, 24754), True, 'import numpy as np\n'), ((2799, 2823), 'numpy.mean', 'np.mean', (['(y2[ind] * _wgts)'], {}), '(y2[ind] * _wgts)\n', (2806, 2823), True, 'import numpy as np\n'), ((2826, 2850), 'numpy.mean', 'np.mean', (['(x2[ind] * _wgts)'], {}), '(x2[ind] * _wgts)\n', (2833, 2850), True, 'import numpy as np\n'), ((2874, 2892), 'numpy.mean', 'np.mean', (['(y1 * wgts)'], {}), '(y1 * wgts)\n', (2881, 2892), True, 'import numpy as np\n'), ((2895, 2913), 'numpy.mean', 'np.mean', (['(x1 * wgts)'], {}), '(x1 * wgts)\n', (2902, 2913), True, 'import numpy as np\n'), ((4561, 4588), 'numpy.sum', 'np.sum', (['((mvals - mbar) ** 2)'], {}), '((mvals - mbar) ** 2)\n', (4567, 4588), True, 'import numpy as np\n'), ((4635, 4662), 'numpy.sum', 'np.sum', (['((cvals - cbar) ** 2)'], {}), '((cvals - cbar) ** 2)\n', (4641, 4662), True, 'import numpy as np\n'), ((8093, 8110), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (8101, 8110), True, 'import numpy as np\n'), ((8375, 8392), 'numpy.isfinite', 'np.isfinite', (['R22p'], {}), '(R22p)\n', (8386, 8392), True, 'import numpy as np\n'), ((9266, 9283), 'numpy.arange', 'np.arange', (['n_grid'], {}), '(n_grid)\n', (9275, 9283), True, 'import numpy as np\n'), ((9530, 9547), 'numpy.arange', 'np.arange', (['n_gals'], {}), '(n_gals)\n', (9539, 9547), True, 'import numpy as np\n'), ((10762, 10774), 'numpy.sqrt', 'np.sqrt', (['sr2'], {}), '(sr2)\n', (10769, 10774), True, 'import numpy as np\n'), ((10848, 10868), 'numpy.ones_like', 'np.ones_like', (['psf_im'], {}), '(psf_im)\n', (10860, 10868), True, 'import numpy as np\n'), ((21972, 21986), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (21981, 21986), True, 'import numpy as np\n'), ((21988, 22002), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (21997, 22002), True, 'import numpy as np\n'), ((1396, 1413), 'numpy.arange', 'np.arange', (['nfolds'], {}), '(nfolds)\n', (1405, 1413), True, 'import numpy as np\n'), ((2721, 2745), 'numpy.mean', 'np.mean', (['(y1[ind] * _wgts)'], {}), '(y1[ind] * _wgts)\n', (2728, 2745), True, 'import numpy as np\n'), ((2748, 2772), 'numpy.mean', 'np.mean', (['(x1[ind] * _wgts)'], {}), '(x1[ind] * _wgts)\n', (2755, 2772), True, 'import numpy as np\n'), ((4426, 4443), 'numpy.delete', 'np.delete', (['y2j', 'i'], {}), '(y2j, i)\n', (4435, 4443), True, 'import numpy as np\n'), ((4462, 4479), 'numpy.delete', 'np.delete', (['x2j', 'i'], {}), '(x2j, i)\n', (4471, 4479), True, 'import numpy as np\n'), ((8348, 8364), 'numpy.isfinite', 'np.isfinite', (['g2p'], {}), '(g2p)\n', (8359, 8364), True, 'import numpy as np\n'), ((13814, 13826), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13824, 13826), True, 'import matplotlib.pyplot as plt\n'), ((13847, 13869), 'matplotlib.pyplot.imshow', 'plt.imshow', (['obs.weight'], {}), '(obs.weight)\n', (13857, 13869), True, 'import matplotlib.pyplot as plt\n'), ((13921, 13936), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13934, 13936), False, 'import pdb\n'), ((14742, 14757), 'numpy.abs', 'np.abs', (["d['g2']"], {}), "(d['g2'])\n", (14748, 14757), True, 'import numpy as np\n'), ((19251, 19269), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (19267, 19269), False, 'import sys\n'), ((21845, 21856), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (21851, 21856), True, 'import numpy as np\n'), ((21889, 21900), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (21895, 21900), True, 'import numpy as np\n'), ((4297, 4314), 'numpy.delete', 'np.delete', (['y1j', 'i'], {}), '(y1j, i)\n', (4306, 4314), True, 'import numpy as np\n'), ((4333, 4350), 'numpy.delete', 'np.delete', (['x1j', 'i'], {}), '(x1j, i)\n', (4342, 4350), True, 'import numpy as np\n'), ((8320, 8337), 'numpy.isfinite', 'np.isfinite', (['R11m'], {}), '(R11m)\n', (8331, 8337), True, 'import numpy as np\n'), ((12662, 12708), 'pizza_cutter.slice_utils.symmetrize.symmetrize_bmask', 'symmetrize_bmask', ([], {'bmask': 'bmask_rot', 'angle': 'angle'}), '(bmask=bmask_rot, angle=angle)\n', (12678, 12708), False, 'from pizza_cutter.slice_utils.symmetrize import symmetrize_bmask\n'), ((14708, 14723), 'numpy.abs', 'np.abs', (["d['g1']"], {}), "(d['g1'])\n", (14714, 14723), True, 'import numpy as np\n'), ((8293, 8309), 'numpy.isfinite', 'np.isfinite', (['g1m'], {}), '(g1m)\n', (8304, 8309), True, 'import numpy as np\n'), ((8238, 8254), 'numpy.isfinite', 'np.isfinite', (['g1p'], {}), '(g1p)\n', (8249, 8254), True, 'import numpy as np\n'), ((8265, 8282), 'numpy.isfinite', 'np.isfinite', (['R11p'], {}), '(R11p)\n', (8276, 8282), True, 'import numpy as np\n'), ((13111, 13157), 'pizza_cutter.slice_utils.symmetrize.symmetrize_bmask', 'symmetrize_bmask', ([], {'bmask': 'bmask_rot', 'angle': 'angle'}), '(bmask=bmask_rot, angle=angle)\n', (13127, 13157), False, 'from pizza_cutter.slice_utils.symmetrize import symmetrize_bmask\n'), ((13464, 13483), 'numpy.rot90', 'np.rot90', (['bmask_rot'], {}), '(bmask_rot)\n', (13472, 13483), True, 'import numpy as np\n'), ((9601, 9648), 'galsim.Exponential', 'galsim.Exponential', ([], {'half_light_radius': '(hlr * _ds)'}), '(half_light_radius=hlr * _ds)\n', (9619, 9648), False, 'import galsim\n'), ((12952, 12966), 'numpy.arange', 'np.arange', (['sym'], {}), '(sym)\n', (12961, 12966), True, 'import numpy as np\n')] |
"""
two-stage Lasso
"""
from blackbox_selectinf.usecase.Lasso import TwoStageLasso
from blackbox_selectinf.learning.learning import (learn_select_prob, get_weight, get_CI)
import numpy as np
import argparse
import pickle
from regreg.smooth.glm import glm
from selectinf.algorithms import lasso
from scipy.stats import norm
import matplotlib.pyplot as plt
import torch
from selectinf.distributions.discrete_family import discrete_family
from sklearn.linear_model import Lasso, LinearRegression, LogisticRegression
parser = argparse.ArgumentParser(description='two stage lasso')
parser.add_argument('--data_type', type=str, default='linear')
parser.add_argument('--idx', type=int, default=0)
parser.add_argument('--lbd', type=float, default=30)
parser.add_argument('--indep', action='store_true', default=False)
parser.add_argument('--n', type=int, default=1000)
parser.add_argument('--p', type=int, default=10)
parser.add_argument('--m', type=int, default=500)
parser.add_argument('--n_b', type=int, default=1000)
parser.add_argument('--m_b', type=int, default=500)
parser.add_argument('--nrep', type=int, default=1)
parser.add_argument('--savemodel', action='store_true', default=False)
parser.add_argument('--modelname', type=str, default='model_')
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--ntrain', type=int, default=1000)
parser.add_argument('--logname', type=str, default='log_')
parser.add_argument('--loadmodel', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--thre', type=float, default=0.99)
parser.add_argument('--consec_epochs', type=int, default=2)
parser.add_argument('--nonnull', action='store_true', default=False)
args = parser.parse_args()
def main():
p = args.p
n = args.n
m = args.m
n_b = args.n_b
m_b = args.m_b
ntrain = args.ntrain
beta = np.zeros(p)
if args.nonnull:
beta[:int(p / 4)] = 5 / np.sqrt(n)
data_type = args.data_type
lbd = args.lbd
logs = [dict() for x in range(args.nrep)]
for j in range(args.idx, args.idx + args.nrep):
print("Starting simulation", j)
logs[j - args.idx]['seed'] = j
np.random.seed(j)
if data_type == 'linear':
X_1 = np.random.randn(n, p)
Y_1 = X_1 @ beta + np.random.randn(n)
X_2 = np.random.randn(m, p)
Y_2 = X_2 @ beta + np.random.randn(m)
elif data_type == 'binary':
X_1 = np.random.randn(n, p)
prob = 1 / (1 + np.exp(- X_1 @ beta))
Y_1 = np.random.binomial(1, prob, n)
X_2 = np.random.randn(m, p)
prob_2 = 1 / (1 + np.exp(- X_2 @ beta))
Y_2 = np.random.binomial(1, prob_2, m)
else:
raise AssertionError("invalid data_type")
X = np.concatenate([X_1, X_2])
Y = np.concatenate([Y_1, Y_2])
lassoClass = TwoStageLasso(X_1, Y_1, X_2, Y_2, lbd, data_type)
num_select = lassoClass.num_select
beta_E = beta[lassoClass.E]
E = lassoClass.E
print("select: ", np.sum(E))
print(E)
training_data = lassoClass.gen_train_data(ntrain=ntrain, n_b=n_b, m_b=m_b, remove_D0=args.indep, target_pos=-1)
Z_train = training_data['Z_train']
W_train = training_data['W_train']
neg_ind = np.arange(0, len(W_train))[W_train == 0]
pos_ind = np.arange(0, len(W_train))[W_train == 1]
neg_ind = np.random.choice(neg_ind, min(len(neg_ind), 950), replace=False)
W_train = np.append(W_train[pos_ind], W_train[neg_ind])
Z_train = np.concatenate([Z_train[pos_ind, :], Z_train[neg_ind, :]])
gamma = training_data['gamma']
Z_data_np = lassoClass.basis(X, Y)
Z_data = torch.tensor(Z_data_np, dtype=torch.float)
if args.indep:
gamma_D0 = training_data['gamma_D0']
Z_data = Z_data - gamma_D0 @ lassoClass.D_0
logs[j - args.idx]['ones'] = np.mean(W_train)
print("positive data:", np.mean(W_train))
# train
net = None
for ii in range(1):
print("recursion", ii)
net, flag, pr_data = learn_select_prob(Z_train, W_train, Z_data=Z_data, net=net, thre=args.thre,
consec_epochs=args.consec_epochs, num_epochs=args.epochs,
batch_size=args.batch_size, verbose=args.verbose, print_every=100)
if flag == 1:
print("Succeeded learning!")
break
else: # generate more data
pass
print("generate more data")
training_data = lassoClass.gen_train_data(ntrain=ntrain, n_b=n_b, m_b=m_b, remove_D0=args.indep)
Z_train_new = training_data['Z_train']
W_train_new = training_data['W_train']
Z_train = np.concatenate([Z_train, Z_train_new])
W_train = np.concatenate([W_train, W_train_new])
print("positive fraction {}".format(np.mean(W_train)))
logs[j - args.idx]['pr_data'] = pr_data
logs[j - args.idx]['flag'] = flag
theta_data = lassoClass.test_statistic(X, Y)
print("beta_hat", theta_data)
N_0 = Z_data - gamma @ theta_data
target_var = np.diag(lassoClass.Sigma1)
target_sd = np.sqrt(target_var)
gamma_list = np.linspace(-10 * target_sd, 10 * target_sd, 101)
interval_nn = np.zeros([num_select, 2])
logs[j - args.idx]['covered_nn'] = []
weight_val = np.zeros([num_select, 101])
for k in range(num_select):
target_theta_k = theta_data[k] + gamma_list[:, k]
Gamma_k = lassoClass.Sigma1[:, k] / lassoClass.Sigma1[k, k]
target_theta = theta_data + np.outer(gamma_list[:, k], Gamma_k)
weight_val[k, :] = get_weight(net, target_theta, N_0, gamma)
interval = get_CI(target_theta_k, weight_val[k, :], target_var[k], theta_data[k])
interval_nn[k, :] = interval
if interval_nn[k, 0] <= beta_E[k] <= interval_nn[k, 1]:
logs[j - args.idx]['covered_nn'].append(1)
else:
logs[j - args.idx]['covered_nn'].append(0)
logs[j - args.idx]['interval_nn'] = interval_nn
logs[j - args.idx]['width_nn'] = interval_nn[:, 1] - interval_nn[:, 0]
##################################################
# check learning
if False:
count = 0
nb = 50
pval = [[] for x in range(num_select)]
for ell in range(int(nb / np.mean(W_train))):
idx_1 = np.random.choice(n + m, n_b, replace=True)
X_1_b = X[idx_1, :]
Y_1_b = Y[idx_1]
idx_2 = np.random.choice(n + m, m_b, replace=True)
X_2_b = X[idx_2, :]
Y_2_b = Y[idx_2]
X_b = np.concatenate([X_1_b, X_2_b])
Y_b = np.concatenate([Y_1_b, Y_2_b])
if not np.all(lassoClass.select(X_1_b, Y_1_b) == lassoClass.sign):
continue
else:
count += 1
d_M = lassoClass.test_statistic(X_b, Y_b)
observed_target = d_M
for k in range(num_select):
target_theta_k = d_M[k] + gamma_list[:, k]
# after correction
Gamma_k = lassoClass.Sigma1[:, k] / lassoClass.Sigma1[k, k]
target_theta_0 = d_M + np.outer(gamma_list[:, k], Gamma_k)
# before
# target_theta_0 = np.tile(d_M, [101, 1])
# target_theta_0[:, k] = target_theta_k
weight_val_0 = get_weight(net, target_theta_0, N_0, gamma)
weight_val_2 = weight_val_0 * norm.pdf((target_theta_0[:, k] - observed_target[k]) / target_sd[k])
exp_family = discrete_family(target_theta_0.reshape(-1), weight_val_2.reshape(-1))
hypothesis = theta_data[k]
pivot = exp_family.cdf((hypothesis - observed_target[k]) / target_var[k], x=observed_target[k])
pivot = 2 * min(pivot, 1 - pivot)
pval[k].append(pivot)
if count == nb:
break
pval = np.array(pval)
logs[j - args.idx]['pval'] = pval
logs[j - args.idx]['false_rej'] = np.sum(pval <= 0.05, 1) / count
print(pval)
print("reject:", np.sum(pval <= 0.05, 1) / count)
##################################################
# lee et al
if data_type == 'linear':
g = glm.gaussian(X_1, Y_1)
else:
g = glm.logistic(X_1, Y_1)
model = lasso.lasso(g, lbd)
model.fit()
summ = model.summary(compute_intervals=True)
interval_lee = np.zeros([num_select, 2])
interval_lee[:, 0] = summ.lower_confidence
interval_lee[:, 1] = summ.upper_confidence
print(interval_lee)
logs[j - args.idx]['interval_lee'] = interval_lee
logs[j - args.idx]['covered_lee'] = []
for k in range(num_select):
if interval_lee[k, 0] <= beta_E[k] <= interval_lee[k, 1]:
logs[j - args.idx]['covered_lee'].append(1)
else:
logs[j - args.idx]['covered_lee'].append(0)
logs[j - args.idx]['width_lee'] = interval_lee[:, 1] - interval_lee[:, 0]
##################################################
# interval true
if False:
U = np.array(summ['upper_trunc'])
L = np.array(summ['lower_trunc'])
interval_true = np.zeros([num_select, 2])
weight_val_true = np.zeros([num_select, 101])
logs[j - args.idx]['covered_true'] = []
fig, ax = plt.subplots(ncols=num_select, figsize=(4 * num_select, 5))
for k in range(num_select):
target_val = theta_data[k] + gamma_list[:, k]
sigma_k = np.sqrt(m / n * target_var[k]) # what scaling?
weight_val_true[k, :] = norm.cdf((U[k] - target_val) / sigma_k) - norm.cdf(
(L[k] - target_val) / sigma_k)
interval_true[k, :] = get_CI(target_val, weight_val_true[k, :], target_var[k], theta_data[k])
if interval_true[k, 0] <= beta_E[k] <= interval_true[k, 1]:
logs[j - args.idx]['covered_true'].append(1)
else:
logs[j - args.idx]['covered_true'].append(0)
# plot
if num_select == 1:
plt.plot(target_val, weight_val[k, :], label='nn')
plt.plot(target_val, weight_val_true[k, :], label='truth', ls='--')
plt.legend()
else:
ax[k].plot(target_val, weight_val[k, :], label='nn')
ax[k].plot(target_val, weight_val_true[k, :], label='truth', ls='--')
ax[k].legend()
plt.savefig('{}_n_{}_p_{}_nb_{}_lbd_{}_{}.png'.format(args.logname, n, p, n_b, args.lbd, j))
logs[j - args.idx]['interval_true'] = interval_true
logs[j - args.idx]['width_true'] = interval_true[:, 1] - interval_true[:, 0]
##################################################
# stage 2 interval
logs[j - args.idx]['covered_2'] = []
interval_2 = np.zeros([num_select, 2])
if data_type == 'binary':
pr_hat = 1 / (1 + np.exp(-X_2[:, E] @ lassoClass.beta_ls_2)).reshape(-1)
W = np.diag(pr_hat * (1 - pr_hat))
else:
W = np.identity(m)
var_2 = np.diag(np.linalg.inv(X_2[:, E].T @ W @ X_2[:, E]))
for k in range(num_select):
interval_2[k, :] = tuple(
(norm.ppf(0.025) * np.sqrt(var_2[k]), -norm.ppf(0.025) * np.sqrt(var_2[k])) + lassoClass.beta_ls_2[k])
if interval_2[k, 0] <= beta_E[k] <= interval_2[k, 1]:
logs[j - args.idx]['covered_2'].append(1)
else:
logs[j - args.idx]['covered_2'].append(0)
logs[j - args.idx]['interval_2'] = interval_2
logs[j - args.idx]['width_2'] = interval_2[:, 1] - interval_2[:, 0]
##################################################
# naive interval
logs[j - args.idx]['covered_naive'] = []
interval_naive = np.zeros([num_select, 2])
for k in range(num_select):
interval_naive[k, :] = tuple(
(norm.ppf(0.025) * target_sd[k], -norm.ppf(0.025) * target_sd[k]) + lassoClass.beta_ls[k])
if interval_naive[k, 0] <= beta_E[k] <= interval_naive[k, 1]:
logs[j - args.idx]['covered_naive'].append(1)
else:
logs[j - args.idx]['covered_naive'].append(0)
logs[j - args.idx]['interval_naive'] = interval_naive
logs[j - args.idx]['width_naive'] = interval_naive[:, 1] - interval_naive[:, 0]
logs[j - args.idx]['beta_true'] = beta
logs[j - args.idx]['E'] = E
logs[j - args.idx]['beta_E'] = beta_E
logs[j - args.idx]['beta_hat'] = theta_data
path = open('{}_n_{}_p_{}_nb_{}_lbd_{}_{}.pickle'.format(args.logname, n, p, n_b, args.lbd, j), 'wb')
pickle.dump(logs[j - args.idx], path)
path.close()
print(logs)
if __name__ == "__main__":
main()
| [
"blackbox_selectinf.learning.learning.get_CI",
"numpy.sqrt",
"numpy.array",
"selectinf.algorithms.lasso.lasso",
"scipy.stats.norm.cdf",
"blackbox_selectinf.learning.learning.learn_select_prob",
"numpy.random.binomial",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.exp... | [((524, 578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""two stage lasso"""'}), "(description='two stage lasso')\n", (547, 578), False, 'import argparse\n'), ((1967, 1978), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (1975, 1978), True, 'import numpy as np\n'), ((2279, 2296), 'numpy.random.seed', 'np.random.seed', (['j'], {}), '(j)\n', (2293, 2296), True, 'import numpy as np\n'), ((2909, 2935), 'numpy.concatenate', 'np.concatenate', (['[X_1, X_2]'], {}), '([X_1, X_2])\n', (2923, 2935), True, 'import numpy as np\n'), ((2948, 2974), 'numpy.concatenate', 'np.concatenate', (['[Y_1, Y_2]'], {}), '([Y_1, Y_2])\n', (2962, 2974), True, 'import numpy as np\n'), ((2996, 3045), 'blackbox_selectinf.usecase.Lasso.TwoStageLasso', 'TwoStageLasso', (['X_1', 'Y_1', 'X_2', 'Y_2', 'lbd', 'data_type'], {}), '(X_1, Y_1, X_2, Y_2, lbd, data_type)\n', (3009, 3045), False, 'from blackbox_selectinf.usecase.Lasso import TwoStageLasso\n'), ((3630, 3675), 'numpy.append', 'np.append', (['W_train[pos_ind]', 'W_train[neg_ind]'], {}), '(W_train[pos_ind], W_train[neg_ind])\n', (3639, 3675), True, 'import numpy as np\n'), ((3694, 3752), 'numpy.concatenate', 'np.concatenate', (['[Z_train[pos_ind, :], Z_train[neg_ind, :]]'], {}), '([Z_train[pos_ind, :], Z_train[neg_ind, :]])\n', (3708, 3752), True, 'import numpy as np\n'), ((3853, 3895), 'torch.tensor', 'torch.tensor', (['Z_data_np'], {'dtype': 'torch.float'}), '(Z_data_np, dtype=torch.float)\n', (3865, 3895), False, 'import torch\n'), ((4062, 4078), 'numpy.mean', 'np.mean', (['W_train'], {}), '(W_train)\n', (4069, 4078), True, 'import numpy as np\n'), ((5432, 5458), 'numpy.diag', 'np.diag', (['lassoClass.Sigma1'], {}), '(lassoClass.Sigma1)\n', (5439, 5458), True, 'import numpy as np\n'), ((5479, 5498), 'numpy.sqrt', 'np.sqrt', (['target_var'], {}), '(target_var)\n', (5486, 5498), True, 'import numpy as np\n'), ((5520, 5569), 'numpy.linspace', 'np.linspace', (['(-10 * target_sd)', '(10 * target_sd)', '(101)'], {}), '(-10 * target_sd, 10 * target_sd, 101)\n', (5531, 5569), True, 'import numpy as np\n'), ((5592, 5617), 'numpy.zeros', 'np.zeros', (['[num_select, 2]'], {}), '([num_select, 2])\n', (5600, 5617), True, 'import numpy as np\n'), ((5685, 5712), 'numpy.zeros', 'np.zeros', (['[num_select, 101]'], {}), '([num_select, 101])\n', (5693, 5712), True, 'import numpy as np\n'), ((9015, 9034), 'selectinf.algorithms.lasso.lasso', 'lasso.lasso', (['g', 'lbd'], {}), '(g, lbd)\n', (9026, 9034), False, 'from selectinf.algorithms import lasso\n'), ((9131, 9156), 'numpy.zeros', 'np.zeros', (['[num_select, 2]'], {}), '([num_select, 2])\n', (9139, 9156), True, 'import numpy as np\n'), ((11696, 11721), 'numpy.zeros', 'np.zeros', (['[num_select, 2]'], {}), '([num_select, 2])\n', (11704, 11721), True, 'import numpy as np\n'), ((12684, 12709), 'numpy.zeros', 'np.zeros', (['[num_select, 2]'], {}), '([num_select, 2])\n', (12692, 12709), True, 'import numpy as np\n'), ((13564, 13601), 'pickle.dump', 'pickle.dump', (['logs[j - args.idx]', 'path'], {}), '(logs[j - args.idx], path)\n', (13575, 13601), False, 'import pickle\n'), ((2032, 2042), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2039, 2042), True, 'import numpy as np\n'), ((2349, 2370), 'numpy.random.randn', 'np.random.randn', (['n', 'p'], {}), '(n, p)\n', (2364, 2370), True, 'import numpy as np\n'), ((2439, 2460), 'numpy.random.randn', 'np.random.randn', (['m', 'p'], {}), '(m, p)\n', (2454, 2460), True, 'import numpy as np\n'), ((3176, 3185), 'numpy.sum', 'np.sum', (['E'], {}), '(E)\n', (3182, 3185), True, 'import numpy as np\n'), ((4111, 4127), 'numpy.mean', 'np.mean', (['W_train'], {}), '(W_train)\n', (4118, 4127), True, 'import numpy as np\n'), ((4262, 4471), 'blackbox_selectinf.learning.learning.learn_select_prob', 'learn_select_prob', (['Z_train', 'W_train'], {'Z_data': 'Z_data', 'net': 'net', 'thre': 'args.thre', 'consec_epochs': 'args.consec_epochs', 'num_epochs': 'args.epochs', 'batch_size': 'args.batch_size', 'verbose': 'args.verbose', 'print_every': '(100)'}), '(Z_train, W_train, Z_data=Z_data, net=net, thre=args.thre,\n consec_epochs=args.consec_epochs, num_epochs=args.epochs, batch_size=\n args.batch_size, verbose=args.verbose, print_every=100)\n', (4279, 4471), False, 'from blackbox_selectinf.learning.learning import learn_select_prob, get_weight, get_CI\n'), ((5990, 6031), 'blackbox_selectinf.learning.learning.get_weight', 'get_weight', (['net', 'target_theta', 'N_0', 'gamma'], {}), '(net, target_theta, N_0, gamma)\n', (6000, 6031), False, 'from blackbox_selectinf.learning.learning import learn_select_prob, get_weight, get_CI\n'), ((6055, 6125), 'blackbox_selectinf.learning.learning.get_CI', 'get_CI', (['target_theta_k', 'weight_val[k, :]', 'target_var[k]', 'theta_data[k]'], {}), '(target_theta_k, weight_val[k, :], target_var[k], theta_data[k])\n', (6061, 6125), False, 'from blackbox_selectinf.learning.learning import learn_select_prob, get_weight, get_CI\n'), ((8568, 8582), 'numpy.array', 'np.array', (['pval'], {}), '(pval)\n', (8576, 8582), True, 'import numpy as np\n'), ((8923, 8945), 'regreg.smooth.glm.glm.gaussian', 'glm.gaussian', (['X_1', 'Y_1'], {}), '(X_1, Y_1)\n', (8935, 8945), False, 'from regreg.smooth.glm import glm\n'), ((8976, 8998), 'regreg.smooth.glm.glm.logistic', 'glm.logistic', (['X_1', 'Y_1'], {}), '(X_1, Y_1)\n', (8988, 8998), False, 'from regreg.smooth.glm import glm\n'), ((9835, 9864), 'numpy.array', 'np.array', (["summ['upper_trunc']"], {}), "(summ['upper_trunc'])\n", (9843, 9864), True, 'import numpy as np\n'), ((9881, 9910), 'numpy.array', 'np.array', (["summ['lower_trunc']"], {}), "(summ['lower_trunc'])\n", (9889, 9910), True, 'import numpy as np\n'), ((9939, 9964), 'numpy.zeros', 'np.zeros', (['[num_select, 2]'], {}), '([num_select, 2])\n', (9947, 9964), True, 'import numpy as np\n'), ((9995, 10022), 'numpy.zeros', 'np.zeros', (['[num_select, 101]'], {}), '([num_select, 101])\n', (10003, 10022), True, 'import numpy as np\n'), ((10097, 10156), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'num_select', 'figsize': '(4 * num_select, 5)'}), '(ncols=num_select, figsize=(4 * num_select, 5))\n', (10109, 10156), True, 'import matplotlib.pyplot as plt\n'), ((11857, 11887), 'numpy.diag', 'np.diag', (['(pr_hat * (1 - pr_hat))'], {}), '(pr_hat * (1 - pr_hat))\n', (11864, 11887), True, 'import numpy as np\n'), ((11918, 11932), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (11929, 11932), True, 'import numpy as np\n'), ((11957, 11999), 'numpy.linalg.inv', 'np.linalg.inv', (['(X_2[:, E].T @ W @ X_2[:, E])'], {}), '(X_2[:, E].T @ W @ X_2[:, E])\n', (11970, 11999), True, 'import numpy as np\n'), ((2402, 2420), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (2417, 2420), True, 'import numpy as np\n'), ((2492, 2510), 'numpy.random.randn', 'np.random.randn', (['m'], {}), '(m)\n', (2507, 2510), True, 'import numpy as np\n'), ((2565, 2586), 'numpy.random.randn', 'np.random.randn', (['n', 'p'], {}), '(n, p)\n', (2580, 2586), True, 'import numpy as np\n'), ((2655, 2685), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'prob', 'n'], {}), '(1, prob, n)\n', (2673, 2685), True, 'import numpy as np\n'), ((2704, 2725), 'numpy.random.randn', 'np.random.randn', (['m', 'p'], {}), '(m, p)\n', (2719, 2725), True, 'import numpy as np\n'), ((2796, 2828), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'prob_2', 'm'], {}), '(1, prob_2, m)\n', (2814, 2828), True, 'import numpy as np\n'), ((5012, 5050), 'numpy.concatenate', 'np.concatenate', (['[Z_train, Z_train_new]'], {}), '([Z_train, Z_train_new])\n', (5026, 5050), True, 'import numpy as np\n'), ((5077, 5115), 'numpy.concatenate', 'np.concatenate', (['[W_train, W_train_new]'], {}), '([W_train, W_train_new])\n', (5091, 5115), True, 'import numpy as np\n'), ((5923, 5958), 'numpy.outer', 'np.outer', (['gamma_list[:, k]', 'Gamma_k'], {}), '(gamma_list[:, k], Gamma_k)\n', (5931, 5958), True, 'import numpy as np\n'), ((6784, 6826), 'numpy.random.choice', 'np.random.choice', (['(n + m)', 'n_b'], {'replace': '(True)'}), '(n + m, n_b, replace=True)\n', (6800, 6826), True, 'import numpy as np\n'), ((6920, 6962), 'numpy.random.choice', 'np.random.choice', (['(n + m)', 'm_b'], {'replace': '(True)'}), '(n + m, m_b, replace=True)\n', (6936, 6962), True, 'import numpy as np\n'), ((7054, 7084), 'numpy.concatenate', 'np.concatenate', (['[X_1_b, X_2_b]'], {}), '([X_1_b, X_2_b])\n', (7068, 7084), True, 'import numpy as np\n'), ((7107, 7137), 'numpy.concatenate', 'np.concatenate', (['[Y_1_b, Y_2_b]'], {}), '([Y_1_b, Y_2_b])\n', (7121, 7137), True, 'import numpy as np\n'), ((8675, 8698), 'numpy.sum', 'np.sum', (['(pval <= 0.05)', '(1)'], {}), '(pval <= 0.05, 1)\n', (8681, 8698), True, 'import numpy as np\n'), ((10285, 10315), 'numpy.sqrt', 'np.sqrt', (['(m / n * target_var[k])'], {}), '(m / n * target_var[k])\n', (10292, 10315), True, 'import numpy as np\n'), ((10514, 10585), 'blackbox_selectinf.learning.learning.get_CI', 'get_CI', (['target_val', 'weight_val_true[k, :]', 'target_var[k]', 'theta_data[k]'], {}), '(target_val, weight_val_true[k, :], target_var[k], theta_data[k])\n', (10520, 10585), False, 'from blackbox_selectinf.learning.learning import learn_select_prob, get_weight, get_CI\n'), ((8760, 8783), 'numpy.sum', 'np.sum', (['(pval <= 0.05)', '(1)'], {}), '(pval <= 0.05, 1)\n', (8766, 8783), True, 'import numpy as np\n'), ((10373, 10412), 'scipy.stats.norm.cdf', 'norm.cdf', (['((U[k] - target_val) / sigma_k)'], {}), '((U[k] - target_val) / sigma_k)\n', (10381, 10412), False, 'from scipy.stats import norm\n'), ((10415, 10454), 'scipy.stats.norm.cdf', 'norm.cdf', (['((L[k] - target_val) / sigma_k)'], {}), '((L[k] - target_val) / sigma_k)\n', (10423, 10454), False, 'from scipy.stats import norm\n'), ((10893, 10943), 'matplotlib.pyplot.plot', 'plt.plot', (['target_val', 'weight_val[k, :]'], {'label': '"""nn"""'}), "(target_val, weight_val[k, :], label='nn')\n", (10901, 10943), True, 'import matplotlib.pyplot as plt\n'), ((10964, 11031), 'matplotlib.pyplot.plot', 'plt.plot', (['target_val', 'weight_val_true[k, :]'], {'label': '"""truth"""', 'ls': '"""--"""'}), "(target_val, weight_val_true[k, :], label='truth', ls='--')\n", (10972, 11031), True, 'import matplotlib.pyplot as plt\n'), ((11052, 11064), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11062, 11064), True, 'import matplotlib.pyplot as plt\n'), ((2615, 2634), 'numpy.exp', 'np.exp', (['(-X_1 @ beta)'], {}), '(-X_1 @ beta)\n', (2621, 2634), True, 'import numpy as np\n'), ((2756, 2775), 'numpy.exp', 'np.exp', (['(-X_2 @ beta)'], {}), '(-X_2 @ beta)\n', (2762, 2775), True, 'import numpy as np\n'), ((5168, 5184), 'numpy.mean', 'np.mean', (['W_train'], {}), '(W_train)\n', (5175, 5184), True, 'import numpy as np\n'), ((6740, 6756), 'numpy.mean', 'np.mean', (['W_train'], {}), '(W_train)\n', (6747, 6756), True, 'import numpy as np\n'), ((7934, 7977), 'blackbox_selectinf.learning.learning.get_weight', 'get_weight', (['net', 'target_theta_0', 'N_0', 'gamma'], {}), '(net, target_theta_0, N_0, gamma)\n', (7944, 7977), False, 'from blackbox_selectinf.learning.learning import learn_select_prob, get_weight, get_CI\n'), ((7696, 7731), 'numpy.outer', 'np.outer', (['gamma_list[:, k]', 'Gamma_k'], {}), '(gamma_list[:, k], Gamma_k)\n', (7704, 7731), True, 'import numpy as np\n'), ((8032, 8100), 'scipy.stats.norm.pdf', 'norm.pdf', (['((target_theta_0[:, k] - observed_target[k]) / target_sd[k])'], {}), '((target_theta_0[:, k] - observed_target[k]) / target_sd[k])\n', (8040, 8100), False, 'from scipy.stats import norm\n'), ((11786, 11827), 'numpy.exp', 'np.exp', (['(-X_2[:, E] @ lassoClass.beta_ls_2)'], {}), '(-X_2[:, E] @ lassoClass.beta_ls_2)\n', (11792, 11827), True, 'import numpy as np\n'), ((12092, 12107), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.025)'], {}), '(0.025)\n', (12100, 12107), False, 'from scipy.stats import norm\n'), ((12110, 12127), 'numpy.sqrt', 'np.sqrt', (['var_2[k]'], {}), '(var_2[k])\n', (12117, 12127), True, 'import numpy as np\n'), ((12148, 12165), 'numpy.sqrt', 'np.sqrt', (['var_2[k]'], {}), '(var_2[k])\n', (12155, 12165), True, 'import numpy as np\n'), ((12806, 12821), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.025)'], {}), '(0.025)\n', (12814, 12821), False, 'from scipy.stats import norm\n'), ((12130, 12145), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.025)'], {}), '(0.025)\n', (12138, 12145), False, 'from scipy.stats import norm\n'), ((12839, 12854), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.025)'], {}), '(0.025)\n', (12847, 12854), False, 'from scipy.stats import norm\n')] |
"""Methods related to sampling and smoothing elevations."""
import time
import numpy as np
from sfrmaker.routing import get_nextupsegs, get_upsegs, make_graph
def smooth_elevations(fromids, toids, elevations, start_elevations=None): # elevup, elevdn):
"""
Parameters
----------
fromids : sequence of hashables
toids : sequence of hashables
Downstream connections of fromids
elevations : sequence of floats
Elevation for each edge (line) in a stream network, or if start_elevations
are specified, the end elevation for each edge.
start_elevations : sequence of floats, optional
Start elevation for edge (line) in a stream network.
By default, None.
Returns
-------
Elevations : dict or tuple
Dictionary of smoothed edge elevations,
or smoothed end elevations, start elevations
"""
# make forward and reverse dictionaries with routing info
graph = dict(zip(fromids, toids))
assert 0 in set(graph.values()), 'No outlets in routing network!'
graph_r = make_graph(toids, fromids)
# make dictionaries of segment end elevations
elevations = dict(zip(fromids, elevations))
if start_elevations is not None:
elevmax = dict(zip(fromids, start_elevations))
def get_upseg_levels(seg):
"""Traverse routing network, returning a list of segments
at each level upstream from the outlets. (level 0 route to seg;
segments in level 1 route to a segment in level 0, etc.)
Parameters:
-----------
seg : int
Starting segment number
Returns
-------
all_upsegs : list
List with list of segments at each level
"""
upsegs = graph_r[seg].copy()
all_upsegs = [upsegs]
for i in range(len(fromids)):
upsegs = get_nextupsegs(graph_r, upsegs)
if len(upsegs) > 0:
all_upsegs.append(upsegs)
else:
break
return all_upsegs
def reset_elevations(seg):
"""Reset segment elevations above (upsegs) and below (outseg) a node.
"""
oseg = graph[seg]
all_upsegs = np.array(list(get_upsegs(graph_r, seg)) + [seg]) # all segments upstream of node
elevmin_s = np.min([elevations[s] for s in all_upsegs]) # minimum current elevation upstream of node
oldmin_s = elevations[seg]
elevs = [elevmin_s, oldmin_s]
if oseg > 0: # if segment is not an outlet,
if start_elevations is not None:
elevs.append(elevmax[oseg]) # outseg start elevation (already updated)
# set segment end elevation as min of
# upstream elevations, current elevation, outseg start elevation
elevations[seg] = np.min(elevs)
# if the node is not an outlet, reset the outseg max if the current min is lower
if oseg > 0:
if start_elevations is not None:
next_reach_elev = elevmax[oseg]
elevmax[graph[seg]] = np.min([elevmin_s, next_reach_elev])
else:
next_reach_elev = elevations[oseg]
elevations[graph[seg]] = np.min([elevmin_s, next_reach_elev])
print('\nSmoothing elevations...')
ta = time.time()
# get list of segments at each level, starting with 0 (outlet)
segment_levels = get_upseg_levels(0)
# at each level, reset all of the segment elevations as necessary
for level in segment_levels:
for s in level:
if 0 in level:
j=2
reset_elevations(s)
print("finished in {:.2f}s".format(time.time() - ta))
if start_elevations is not None:
return elevations, elevmax
return elevations
| [
"sfrmaker.routing.make_graph",
"sfrmaker.routing.get_upsegs",
"numpy.min",
"sfrmaker.routing.get_nextupsegs",
"time.time"
] | [((1071, 1097), 'sfrmaker.routing.make_graph', 'make_graph', (['toids', 'fromids'], {}), '(toids, fromids)\n', (1081, 1097), False, 'from sfrmaker.routing import get_nextupsegs, get_upsegs, make_graph\n'), ((3293, 3304), 'time.time', 'time.time', ([], {}), '()\n', (3302, 3304), False, 'import time\n'), ((2312, 2355), 'numpy.min', 'np.min', (['[elevations[s] for s in all_upsegs]'], {}), '([elevations[s] for s in all_upsegs])\n', (2318, 2355), True, 'import numpy as np\n'), ((2805, 2818), 'numpy.min', 'np.min', (['elevs'], {}), '(elevs)\n', (2811, 2818), True, 'import numpy as np\n'), ((1869, 1900), 'sfrmaker.routing.get_nextupsegs', 'get_nextupsegs', (['graph_r', 'upsegs'], {}), '(graph_r, upsegs)\n', (1883, 1900), False, 'from sfrmaker.routing import get_nextupsegs, get_upsegs, make_graph\n'), ((3060, 3096), 'numpy.min', 'np.min', (['[elevmin_s, next_reach_elev]'], {}), '([elevmin_s, next_reach_elev])\n', (3066, 3096), True, 'import numpy as np\n'), ((3207, 3243), 'numpy.min', 'np.min', (['[elevmin_s, next_reach_elev]'], {}), '([elevmin_s, next_reach_elev])\n', (3213, 3243), True, 'import numpy as np\n'), ((3658, 3669), 'time.time', 'time.time', ([], {}), '()\n', (3667, 3669), False, 'import time\n'), ((2224, 2248), 'sfrmaker.routing.get_upsegs', 'get_upsegs', (['graph_r', 'seg'], {}), '(graph_r, seg)\n', (2234, 2248), False, 'from sfrmaker.routing import get_nextupsegs, get_upsegs, make_graph\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 08:42:45 2018
@author: <NAME>
title: load_pupil
"""
#%% Imports
import pandas as pd
import numpy as np
from time_fixer import time_fixer
from movingmean import movingmean
#%% Output
class PupilReturnValue(object):
def __init__(self, eyes_id0, eyes_id1, ticnd_id0, ticnd_id1, real_fps_id0,
real_fps_id1, drop_fps_id0, drop_fps_id1, eyes_both, info, fix_time):
self.id0_capturedata = eyes_id0
self.id1_capturedata = eyes_id1
self.id0_eyedata = ticnd_id0
self.id1_eyedata = ticnd_id1
self.id0_rel_framedrops = 1-np.size(eyes_id0,1)/np.size(ticnd_id0,1)
self.id1_rel_framedrops = 1-np.size(eyes_id1,1)/np.size(ticnd_id1,1)
self.id0_abs_framedrops = np.size(ticnd_id0,1)-np.size(eyes_id0,1)
self.id1_abs_framedrops = np.size(ticnd_id1,1)-np.size(eyes_id1,1)
self.id0_real_fps = real_fps_id0
self.id1_real_fps = real_fps_id1
self.id0_drop_fps = drop_fps_id0
self.id1_drop_fps = drop_fps_id1
self.capturedata = eyes_both
self.info = info
self.timefix = fix_time
#%% main function
def load_pupil(directory, file_date, file_num, csv_data_input, info_input,
fix_time, frame_rate, use_filter, window_size):
# Path to the data we want to load
data_path = directory + file_date + '\\' + file_num + '\\'
#%% Load eye_data and info
eyes_both = pd.read_csv(data_path + 'exports\\' + csv_data_input, delimiter=',')
info_temp = pd.read_csv(data_path + info_input, delimiter=',')
info_temp2 = info_temp.T
info_temp2.columns = info_temp2.iloc[0]
info = info_temp2.drop(info_temp2.index[0])
#%% extract eye_data and seperate left from right
eyes_id0 = eyes_both.loc[eyes_both['id'] == 0]
eyes_id1 = eyes_both.loc[eyes_both['id'] == 1]
#%% convert to array and align start time
ticnd_id0 = np.ndarray((np.size(eyes_id0,0), 8))
ticnd_id0[:,0] = np.asarray(eyes_id0.timestamp) - \
float(np.asarray(info.loc[:,'Start Time (Synced)']))
ticnd_id0[:,1] = np.asarray(eyes_id0.index)
ticnd_id0[:,2] = np.asarray(eyes_id0.confidence)
ticnd_id0[:,3] = np.asarray(eyes_id0.norm_pos_x)
ticnd_id0[:,4] = np.asarray(eyes_id0.norm_pos_y)
ticnd_id0[:,5] = np.asarray(eyes_id0.diameter)
ticnd_id1 = np.ndarray((np.size(eyes_id1,0), 8))
ticnd_id1[:,0] = np.asarray(eyes_id1.timestamp) - \
float(np.asarray(info.loc[:,'Start Time (Synced)']))
ticnd_id1[:,1] = np.asarray(eyes_id1.index)
ticnd_id1[:,2] = np.asarray(eyes_id1.confidence)
ticnd_id1[:,3] = np.asarray(eyes_id1.norm_pos_x)
ticnd_id1[:,4] = np.asarray(eyes_id1.norm_pos_y)
ticnd_id1[:,5] = np.asarray(eyes_id1.diameter)
#%% fix framedrops
if fix_time == 1:
# delete frames which occured before sync
mask_id0 = ticnd_id0[:,0]>(-1/(1*frame_rate))
ticnd_id0 = ticnd_id0[mask_id0,...]
mask_id1 = ticnd_id1[:,0]>(-1/(1*frame_rate))
ticnd_id1 = ticnd_id1[mask_id1,...]
# the frame closest to t=0 becomes t_0, delete earlier frames
min_id0 = np.argmin(abs(ticnd_id0[:,1:2]))
min_id1 = np.argmin(abs(ticnd_id1[:,1:2]))
ticnd_id0[:,0] = ticnd_id0[:,0]-ticnd_id0[min_id0,0]
ticnd_id1[:,0] = ticnd_id1[:,0]-ticnd_id1[min_id1,0]
mask_id0 = ticnd_id0[:,0]>=0
ticnd_id0 = ticnd_id0[mask_id0,...]
mask_id1 = ticnd_id1[:,0]>=0
ticnd_id1 = ticnd_id1[mask_id1,...]
# add back dropped frames
fix_ticnd_id0 = time_fixer(ticnd_id0,frame_rate)
fix_ticnd_id1 = time_fixer(ticnd_id1,frame_rate)
# cut of overlapping frames
len_both = [len(fix_ticnd_id0),len(fix_ticnd_id1)]
len_ind = np.argmin(len_both)
len_all = np.min(len_both)
if len_ind == 0:
fix_ticnd_id0 = np.delete(fix_ticnd_id0,
list(range(len_all+1,len_both[0])), axis=0)
elif len_ind == 1:
fix_ticnd_id1 = np.delete(fix_ticnd_id1,
list(range(len_all+1,len_both[1])), axis=0)
ticnd_id0 = fix_ticnd_id0
ticnd_id1 = fix_ticnd_id1
# calculate velocity in x and y direction in units of the eyetracker
ticnd_id0[:,6] = np.append(0, np.diff(ticnd_id0[:,4])/frame_rate)
ticnd_id0[:,7] = np.append(0, np.diff(ticnd_id0[:,5])/frame_rate)
ticnd_id1[:,6] = np.append(0, np.diff(ticnd_id1[:,4])/frame_rate)
ticnd_id1[:,7] = np.append(0, np.diff(ticnd_id1[:,5])/frame_rate)
# calculate mean FPS after use of time_fix
real_fps_id0 = 1/np.mean(np.diff(ticnd_id0[:,0]))
real_fps_id1 = 1/np.mean(np.diff(ticnd_id1[:,0]))
# calculate mean FPS with framedrops
timestamp_id0 = np.asarray(eyes_id0.timestamp)
timestamp_id1 = np.asarray(eyes_id1.timestamp)
time_id0 = timestamp_id0[-1] - timestamp_id0[0]
time_id1 = timestamp_id1[-1] - timestamp_id1[0]
drop_fps_id0 = np.size(timestamp_id0,0)/time_id0
drop_fps_id1 = np.size(timestamp_id1,0)/time_id1
#%% filter eye_data
if use_filter == 1 and window_size > 0:
ticnd_id0[:,3] = movingmean(ticnd_id0[:,3],window_size)
ticnd_id0[:,4] = movingmean(ticnd_id0[:,4],window_size)
ticnd_id1[:,3] = movingmean(ticnd_id1[:,3],window_size)
ticnd_id1[:,4] = movingmean(ticnd_id1[:,4],window_size)
return PupilReturnValue(eyes_id0, eyes_id1, ticnd_id0, ticnd_id1, real_fps_id0,
real_fps_id1, drop_fps_id0, drop_fps_id1, eyes_both,
info, fix_time)
| [
"pandas.read_csv",
"numpy.size",
"time_fixer.time_fixer",
"numpy.asarray",
"numpy.diff",
"numpy.min",
"numpy.argmin",
"movingmean.movingmean"
] | [((1571, 1639), 'pandas.read_csv', 'pd.read_csv', (["(data_path + 'exports\\\\' + csv_data_input)"], {'delimiter': '""","""'}), "(data_path + 'exports\\\\' + csv_data_input, delimiter=',')\n", (1582, 1639), True, 'import pandas as pd\n'), ((1657, 1707), 'pandas.read_csv', 'pd.read_csv', (['(data_path + info_input)'], {'delimiter': '""","""'}), "(data_path + info_input, delimiter=',')\n", (1668, 1707), True, 'import pandas as pd\n'), ((2249, 2275), 'numpy.asarray', 'np.asarray', (['eyes_id0.index'], {}), '(eyes_id0.index)\n', (2259, 2275), True, 'import numpy as np\n'), ((2298, 2329), 'numpy.asarray', 'np.asarray', (['eyes_id0.confidence'], {}), '(eyes_id0.confidence)\n', (2308, 2329), True, 'import numpy as np\n'), ((2352, 2383), 'numpy.asarray', 'np.asarray', (['eyes_id0.norm_pos_x'], {}), '(eyes_id0.norm_pos_x)\n', (2362, 2383), True, 'import numpy as np\n'), ((2406, 2437), 'numpy.asarray', 'np.asarray', (['eyes_id0.norm_pos_y'], {}), '(eyes_id0.norm_pos_y)\n', (2416, 2437), True, 'import numpy as np\n'), ((2460, 2489), 'numpy.asarray', 'np.asarray', (['eyes_id0.diameter'], {}), '(eyes_id0.diameter)\n', (2470, 2489), True, 'import numpy as np\n'), ((2691, 2717), 'numpy.asarray', 'np.asarray', (['eyes_id1.index'], {}), '(eyes_id1.index)\n', (2701, 2717), True, 'import numpy as np\n'), ((2740, 2771), 'numpy.asarray', 'np.asarray', (['eyes_id1.confidence'], {}), '(eyes_id1.confidence)\n', (2750, 2771), True, 'import numpy as np\n'), ((2794, 2825), 'numpy.asarray', 'np.asarray', (['eyes_id1.norm_pos_x'], {}), '(eyes_id1.norm_pos_x)\n', (2804, 2825), True, 'import numpy as np\n'), ((2848, 2879), 'numpy.asarray', 'np.asarray', (['eyes_id1.norm_pos_y'], {}), '(eyes_id1.norm_pos_y)\n', (2858, 2879), True, 'import numpy as np\n'), ((2902, 2931), 'numpy.asarray', 'np.asarray', (['eyes_id1.diameter'], {}), '(eyes_id1.diameter)\n', (2912, 2931), True, 'import numpy as np\n'), ((5117, 5147), 'numpy.asarray', 'np.asarray', (['eyes_id0.timestamp'], {}), '(eyes_id0.timestamp)\n', (5127, 5147), True, 'import numpy as np\n'), ((5169, 5199), 'numpy.asarray', 'np.asarray', (['eyes_id1.timestamp'], {}), '(eyes_id1.timestamp)\n', (5179, 5199), True, 'import numpy as np\n'), ((2130, 2160), 'numpy.asarray', 'np.asarray', (['eyes_id0.timestamp'], {}), '(eyes_id0.timestamp)\n', (2140, 2160), True, 'import numpy as np\n'), ((2572, 2602), 'numpy.asarray', 'np.asarray', (['eyes_id1.timestamp'], {}), '(eyes_id1.timestamp)\n', (2582, 2602), True, 'import numpy as np\n'), ((3821, 3854), 'time_fixer.time_fixer', 'time_fixer', (['ticnd_id0', 'frame_rate'], {}), '(ticnd_id0, frame_rate)\n', (3831, 3854), False, 'from time_fixer import time_fixer\n'), ((3879, 3912), 'time_fixer.time_fixer', 'time_fixer', (['ticnd_id1', 'frame_rate'], {}), '(ticnd_id1, frame_rate)\n', (3889, 3912), False, 'from time_fixer import time_fixer\n'), ((4038, 4057), 'numpy.argmin', 'np.argmin', (['len_both'], {}), '(len_both)\n', (4047, 4057), True, 'import numpy as np\n'), ((4077, 4093), 'numpy.min', 'np.min', (['len_both'], {}), '(len_both)\n', (4083, 4093), True, 'import numpy as np\n'), ((5338, 5363), 'numpy.size', 'np.size', (['timestamp_id0', '(0)'], {}), '(timestamp_id0, 0)\n', (5345, 5363), True, 'import numpy as np\n'), ((5392, 5417), 'numpy.size', 'np.size', (['timestamp_id1', '(0)'], {}), '(timestamp_id1, 0)\n', (5399, 5417), True, 'import numpy as np\n'), ((5528, 5568), 'movingmean.movingmean', 'movingmean', (['ticnd_id0[:, 3]', 'window_size'], {}), '(ticnd_id0[:, 3], window_size)\n', (5538, 5568), False, 'from movingmean import movingmean\n'), ((5593, 5633), 'movingmean.movingmean', 'movingmean', (['ticnd_id0[:, 4]', 'window_size'], {}), '(ticnd_id0[:, 4], window_size)\n', (5603, 5633), False, 'from movingmean import movingmean\n'), ((5668, 5708), 'movingmean.movingmean', 'movingmean', (['ticnd_id1[:, 3]', 'window_size'], {}), '(ticnd_id1[:, 3], window_size)\n', (5678, 5708), False, 'from movingmean import movingmean\n'), ((5733, 5773), 'movingmean.movingmean', 'movingmean', (['ticnd_id1[:, 4]', 'window_size'], {}), '(ticnd_id1[:, 4], window_size)\n', (5743, 5773), False, 'from movingmean import movingmean\n'), ((832, 853), 'numpy.size', 'np.size', (['ticnd_id0', '(1)'], {}), '(ticnd_id0, 1)\n', (839, 853), True, 'import numpy as np\n'), ((853, 873), 'numpy.size', 'np.size', (['eyes_id0', '(1)'], {}), '(eyes_id0, 1)\n', (860, 873), True, 'import numpy as np\n'), ((908, 929), 'numpy.size', 'np.size', (['ticnd_id1', '(1)'], {}), '(ticnd_id1, 1)\n', (915, 929), True, 'import numpy as np\n'), ((929, 949), 'numpy.size', 'np.size', (['eyes_id1', '(1)'], {}), '(eyes_id1, 1)\n', (936, 949), True, 'import numpy as np\n'), ((2083, 2103), 'numpy.size', 'np.size', (['eyes_id0', '(0)'], {}), '(eyes_id0, 0)\n', (2090, 2103), True, 'import numpy as np\n'), ((2180, 2226), 'numpy.asarray', 'np.asarray', (["info.loc[:, 'Start Time (Synced)']"], {}), "(info.loc[:, 'Start Time (Synced)'])\n", (2190, 2226), True, 'import numpy as np\n'), ((2525, 2545), 'numpy.size', 'np.size', (['eyes_id1', '(0)'], {}), '(eyes_id1, 0)\n', (2532, 2545), True, 'import numpy as np\n'), ((2622, 2668), 'numpy.asarray', 'np.asarray', (["info.loc[:, 'Start Time (Synced)']"], {}), "(info.loc[:, 'Start Time (Synced)'])\n", (2632, 2668), True, 'import numpy as np\n'), ((4633, 4657), 'numpy.diff', 'np.diff', (['ticnd_id0[:, 4]'], {}), '(ticnd_id0[:, 4])\n', (4640, 4657), True, 'import numpy as np\n'), ((4704, 4728), 'numpy.diff', 'np.diff', (['ticnd_id0[:, 5]'], {}), '(ticnd_id0[:, 5])\n', (4711, 4728), True, 'import numpy as np\n'), ((4777, 4801), 'numpy.diff', 'np.diff', (['ticnd_id1[:, 4]'], {}), '(ticnd_id1[:, 4])\n', (4784, 4801), True, 'import numpy as np\n'), ((4848, 4872), 'numpy.diff', 'np.diff', (['ticnd_id1[:, 5]'], {}), '(ticnd_id1[:, 5])\n', (4855, 4872), True, 'import numpy as np\n'), ((4968, 4992), 'numpy.diff', 'np.diff', (['ticnd_id0[:, 0]'], {}), '(ticnd_id0[:, 0])\n', (4975, 4992), True, 'import numpy as np\n'), ((5023, 5047), 'numpy.diff', 'np.diff', (['ticnd_id1[:, 0]'], {}), '(ticnd_id1[:, 0])\n', (5030, 5047), True, 'import numpy as np\n'), ((668, 688), 'numpy.size', 'np.size', (['eyes_id0', '(1)'], {}), '(eyes_id0, 1)\n', (675, 688), True, 'import numpy as np\n'), ((688, 709), 'numpy.size', 'np.size', (['ticnd_id0', '(1)'], {}), '(ticnd_id0, 1)\n', (695, 709), True, 'import numpy as np\n'), ((746, 766), 'numpy.size', 'np.size', (['eyes_id1', '(1)'], {}), '(eyes_id1, 1)\n', (753, 766), True, 'import numpy as np\n'), ((766, 787), 'numpy.size', 'np.size', (['ticnd_id1', '(1)'], {}), '(ticnd_id1, 1)\n', (773, 787), True, 'import numpy as np\n')] |
import numpy as np
def test_matrix_addition():
matrix1 = np.array([
[1, 2, 3],
[4, 5, 6]
])
matrix2 = np.array([
[10, 11, 12],
[13, 14, 15],
])
result = matrix1 + matrix2
print("result", result)
expected = np.array([
[11, 13, 15],
[17, 19, 21],
])
print("expected", expected)
assert np.equal(result, expected).all()
| [
"numpy.array",
"numpy.equal"
] | [((62, 94), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (70, 94), True, 'import numpy as np\n'), ((131, 169), 'numpy.array', 'np.array', (['[[10, 11, 12], [13, 14, 15]]'], {}), '([[10, 11, 12], [13, 14, 15]])\n', (139, 169), True, 'import numpy as np\n'), ((270, 308), 'numpy.array', 'np.array', (['[[11, 13, 15], [17, 19, 21]]'], {}), '([[11, 13, 15], [17, 19, 21]])\n', (278, 308), True, 'import numpy as np\n'), ((376, 402), 'numpy.equal', 'np.equal', (['result', 'expected'], {}), '(result, expected)\n', (384, 402), True, 'import numpy as np\n')] |
import numpy as np
def computeCost(X, y, theta):
m = len(X)
costs = np.power(np.matmul(X, theta) - y, 2)
return np.sum(costs) / (2*m) | [
"numpy.sum",
"numpy.matmul"
] | [((130, 143), 'numpy.sum', 'np.sum', (['costs'], {}), '(costs)\n', (136, 143), True, 'import numpy as np\n'), ((90, 109), 'numpy.matmul', 'np.matmul', (['X', 'theta'], {}), '(X, theta)\n', (99, 109), True, 'import numpy as np\n')] |
import numpy as np
def mia_get_threshold(train_conf, test_conf):
result = []
for val in train_conf: result.append( [val, 1] )
for val in test_conf: result.append( [val, 0] )
train_cnt = len(train_conf)
test_cnt = len(test_conf)
result = np.array(result, dtype=np.float32)
result = result[result[:,0].argsort()]
one = train_cnt
zero = test_cnt
best_atk_acc = -1
threshold = -1
for i in range(len(result)):
atk_acc = 0.5 * (one/train_cnt + (test_cnt-zero)/test_cnt)
if best_atk_acc < atk_acc and threshold < result[i][0]:
best_atk_acc = atk_acc
threshold = result[i][0]
if result[i][1] == 1:
one = one-1
else: zero = zero-1
return threshold, best_atk_acc
| [
"numpy.array"
] | [((267, 301), 'numpy.array', 'np.array', (['result'], {'dtype': 'np.float32'}), '(result, dtype=np.float32)\n', (275, 301), True, 'import numpy as np\n')] |
alist = [1, 2, 3, 4]
blist = alist[1:3]
alist[1:3] = [5, 5]
print('blist: ', blist)
import numpy as np
alist = np.array([1, 2, 3, 4])
blist = alist[1:3]
alist[1:3] = [5, 5]
print('blist: ', blist)
# ! This program is very interesting, the explanation is here.
# ! [numpy的切片和python的切片区别](https://blog.csdn.net/bornfree5511/article/details/116375695) | [
"numpy.array"
] | [((113, 135), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (121, 135), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import dace
import numpy as np
import select
import sys
N = dace.symbol("N")
M = dace.symbol("M")
dtype = dace.float64
# This implementation of transposed DGEMV assumes that the two vectors (x and y) fit into
# FPGA fast memory
def make_init_state(sdfg):
state = sdfg.add_state("init")
a_host = state.add_array("A", (M, N), dtype)
a_device = state.add_array(
"A_device", (M, N),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
x_host = state.add_array("x", (M, ), dtype)
x_device = state.add_array(
"x_device", (M, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
y_host = state.add_array("y", (M, ), dtype)
y_device = state.add_array(
"y_device", (N, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
state.add_memlet_path(
a_host,
a_device,
memlet=dace.memlet.Memlet.simple(a_device, "0:N, 0:M"))
state.add_memlet_path(
x_host, x_device, memlet=dace.memlet.Memlet.simple(x_device, "0:M"))
state.add_memlet_path(
y_host, y_device, memlet=dace.memlet.Memlet.simple(y_device, "0:N"))
return state
def make_finalize_state(sdfg):
state = sdfg.add_state("finalize")
y_host = state.add_array("y", (M, ), dtype)
y_device = state.add_array(
"y_device", (N, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
state.add_memlet_path(
y_device, y_host, memlet=dace.memlet.Memlet.simple(y_host, "0:N"))
return state
def make_load_state(sdfg):
state = sdfg.add_state("load")
y = state.add_array(
"y_nested", (N, ), dtype, storage=dace.dtypes.StorageType.FPGA_Global)
y_buffer = state.add_array(
"y_buffer", (N, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Local)
state.add_memlet_path(
y, y_buffer, memlet=dace.memlet.Memlet.simple(y_buffer, "0:N"))
return state
def make_store_state(sdfg):
state = sdfg.add_state("store")
y_buffer = state.add_array(
"y_buffer", (N, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Local)
y = state.add_array(
"y_nested", (N, ), dtype, storage=dace.dtypes.StorageType.FPGA_Global)
state.add_memlet_path(
y_buffer, y, memlet=dace.memlet.Memlet.simple(y, "0:N"))
return state
def make_compute_state(sdfg):
state = sdfg.add_state("compute")
a = state.add_array(
"A_nested", (M, N), dtype, storage=dace.dtypes.StorageType.FPGA_Global)
x = state.add_array(
"x_nested", (M, ), dtype, storage=dace.dtypes.StorageType.FPGA_Global)
y_buffer = state.add_array(
"y_buffer", (N, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Local)
cols_entry, cols_exit = state.add_map(
"cols", {"m": "0:M"}, schedule=dace.ScheduleType.Sequential)
rows_entry, rows_exit = state.add_map("rows", {"n": "0:N"})
tasklet = state.add_tasklet("update", {"a", "x_in"}, {"update"},
"update = a * x_in")
wcr_memlet = dace.memlet.Memlet.simple(
y_buffer, "n", wcr_str="lambda a, b: a + b", wcr_identity=0)
state.add_memlet_path(
a,
cols_entry,
rows_entry,
tasklet,
dst_conn="a",
memlet=dace.memlet.Memlet.simple(a, "m, n"))
state.add_memlet_path(
x,
cols_entry,
rows_entry,
tasklet,
dst_conn="x_in",
memlet=dace.memlet.Memlet.simple(x, "m"))
state.add_memlet_path(
tasklet,
rows_exit,
cols_exit,
y_buffer,
src_conn="update",
memlet=wcr_memlet)
return state
def make_outer_compute_state(sdfg):
state = sdfg.add_state("gemv_transposed")
nested_sdfg = dace.SDFG("gemv_transposed")
load_state = make_load_state(nested_sdfg)
compute_state = make_compute_state(nested_sdfg)
store_state = make_store_state(nested_sdfg)
nested_sdfg.add_edge(load_state, compute_state,
dace.graph.edges.InterstateEdge())
nested_sdfg.add_edge(compute_state, store_state,
dace.graph.edges.InterstateEdge())
tasklet = state.add_nested_sdfg(nested_sdfg, sdfg,
{"A_nested", "x_nested"}, {"y_nested"})
a_device = state.add_array(
"A_device", (M, N),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
x_device = state.add_array(
"x_device", (M, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
y_device = state.add_array(
"y_device", (N, ),
dtype,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
state.add_memlet_path(
a_device,
tasklet,
dst_conn="A_nested",
memlet=dace.memlet.Memlet.simple(a_device, "0:M, 0:N"))
state.add_memlet_path(
x_device,
tasklet,
dst_conn="x_nested",
memlet=dace.memlet.Memlet.simple(x_device, "0:M"))
state.add_memlet_path(
tasklet,
y_device,
src_conn="y_nested",
memlet=dace.memlet.Memlet.simple(y_device, "0:N"))
return state
def make_sdfg(specialize):
if specialize:
name = "gemv_transposed_{}x{}".format(N.get(), M.get())
else:
name = "gemv_transposed_{}xM".format(N.get())
sdfg = dace.SDFG(name)
init_state = make_init_state(sdfg)
fpga_state = make_outer_compute_state(sdfg)
finalize_state = make_finalize_state(sdfg)
sdfg.add_edge(init_state, fpga_state, dace.graph.edges.InterstateEdge())
sdfg.add_edge(fpga_state, finalize_state,
dace.graph.edges.InterstateEdge())
return sdfg
if __name__ == "__main__":
print("==== Program start ====")
parser = argparse.ArgumentParser()
parser.add_argument("N", type=int)
parser.add_argument("M", type=int)
parser.add_argument(
"-specialize",
default=False,
action="store_true",
help="Also fix M in hardware")
args = vars(parser.parse_args())
N.set(args["N"])
if args["specialize"]:
print("Specializing M...")
M.set(args["M"])
gemv = make_sdfg(args["specialize"])
gemv.draw_to_file()
gemv.specialize()
if not args["specialize"]:
M.set(args["M"])
print("Running GEMV {}x{} ({}specialized)".format(
N.get(), M.get(), ("" if args["specialize"] else "not ")))
A = dace.ndarray([M, N], dtype=dtype)
x = dace.ndarray([M], dtype=dtype)
y = dace.ndarray([N], dtype=dtype)
# Intialize: randomize A, x and y
# A[:, :] = np.random.rand(M.get(), N.get()).astype(dtype.type)
# x[:] = np.random.rand(M.get()).astype(dtype.type)
# y[:] = np.random.rand(N.get()).astype(dtype.type)
A[:, :] = 1
x[:] = 1
y[:] = 0
# Regression
regression = np.matmul(np.transpose(A), x) + y
#############################################
# Run DaCe program
if args["specialize"]:
gemv(A=A, x=x, y=x)
else:
gemv(A=A, M=M, x=x, y=y)
residual = np.linalg.norm(y - regression) / dace.eval(N * M)
print("Residual:", residual)
diff = np.abs(y - regression)
wrong_elements = np.transpose(np.nonzero(diff >= 0.01))
highest_diff = np.max(diff)
print("==== Program end ====")
if residual >= 0.01 or highest_diff >= 0.01:
print("Verification failed!")
print("Residual: {}".format(residual))
print("Incorrect elements: {} / {}".format(wrong_elements.shape[0],
dace.eval(N * M)))
print("Highest difference: {}".format(highest_diff))
print("** Result:\n", y)
print("** Reference:\n", regression)
print("Type \"debug\" to enter debugger, "
"or any other string to quit (timeout in 10 seconds)")
read, _, _ = select.select([sys.stdin], [], [], 10)
if len(read) > 0 and sys.stdin.readline().strip().lower() == "debug":
print("Entering debugger...")
import pdb
pdb.set_trace()
else:
print("Exiting...")
exit(1)
exit(0)
| [
"numpy.abs",
"select.select",
"numpy.transpose",
"argparse.ArgumentParser",
"dace.graph.edges.InterstateEdge",
"dace.memlet.Memlet.simple",
"dace.symbol",
"numpy.max",
"sys.stdin.readline",
"dace.SDFG",
"dace.eval",
"numpy.nonzero",
"numpy.linalg.norm",
"pdb.set_trace",
"dace.ndarray"
] | [((138, 154), 'dace.symbol', 'dace.symbol', (['"""N"""'], {}), "('N')\n", (149, 154), False, 'import dace\n'), ((159, 175), 'dace.symbol', 'dace.symbol', (['"""M"""'], {}), "('M')\n", (170, 175), False, 'import dace\n'), ((3340, 3430), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['y_buffer', '"""n"""'], {'wcr_str': '"""lambda a, b: a + b"""', 'wcr_identity': '(0)'}), "(y_buffer, 'n', wcr_str='lambda a, b: a + b',\n wcr_identity=0)\n", (3365, 3430), False, 'import dace\n'), ((4053, 4081), 'dace.SDFG', 'dace.SDFG', (['"""gemv_transposed"""'], {}), "('gemv_transposed')\n", (4062, 4081), False, 'import dace\n'), ((5703, 5718), 'dace.SDFG', 'dace.SDFG', (['name'], {}), '(name)\n', (5712, 5718), False, 'import dace\n'), ((6128, 6153), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6151, 6153), False, 'import argparse\n'), ((6794, 6827), 'dace.ndarray', 'dace.ndarray', (['[M, N]'], {'dtype': 'dtype'}), '([M, N], dtype=dtype)\n', (6806, 6827), False, 'import dace\n'), ((6836, 6866), 'dace.ndarray', 'dace.ndarray', (['[M]'], {'dtype': 'dtype'}), '([M], dtype=dtype)\n', (6848, 6866), False, 'import dace\n'), ((6875, 6905), 'dace.ndarray', 'dace.ndarray', (['[N]'], {'dtype': 'dtype'}), '([N], dtype=dtype)\n', (6887, 6905), False, 'import dace\n'), ((7519, 7541), 'numpy.abs', 'np.abs', (['(y - regression)'], {}), '(y - regression)\n', (7525, 7541), True, 'import numpy as np\n'), ((7621, 7633), 'numpy.max', 'np.max', (['diff'], {}), '(diff)\n', (7627, 7633), True, 'import numpy as np\n'), ((4305, 4338), 'dace.graph.edges.InterstateEdge', 'dace.graph.edges.InterstateEdge', ([], {}), '()\n', (4336, 4338), False, 'import dace\n'), ((4418, 4451), 'dace.graph.edges.InterstateEdge', 'dace.graph.edges.InterstateEdge', ([], {}), '()\n', (4449, 4451), False, 'import dace\n'), ((5897, 5930), 'dace.graph.edges.InterstateEdge', 'dace.graph.edges.InterstateEdge', ([], {}), '()\n', (5928, 5930), False, 'import dace\n'), ((5996, 6029), 'dace.graph.edges.InterstateEdge', 'dace.graph.edges.InterstateEdge', ([], {}), '()\n', (6027, 6029), False, 'import dace\n'), ((7425, 7455), 'numpy.linalg.norm', 'np.linalg.norm', (['(y - regression)'], {}), '(y - regression)\n', (7439, 7455), True, 'import numpy as np\n'), ((7458, 7474), 'dace.eval', 'dace.eval', (['(N * M)'], {}), '(N * M)\n', (7467, 7474), False, 'import dace\n'), ((7576, 7600), 'numpy.nonzero', 'np.nonzero', (['(diff >= 0.01)'], {}), '(diff >= 0.01)\n', (7586, 7600), True, 'import numpy as np\n'), ((8230, 8268), 'select.select', 'select.select', (['[sys.stdin]', '[]', '[]', '(10)'], {}), '([sys.stdin], [], [], 10)\n', (8243, 8268), False, 'import select\n'), ((1049, 1096), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['a_device', '"""0:N, 0:M"""'], {}), "(a_device, '0:N, 0:M')\n", (1074, 1096), False, 'import dace\n'), ((1159, 1201), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['x_device', '"""0:M"""'], {}), "(x_device, '0:M')\n", (1184, 1201), False, 'import dace\n'), ((1264, 1306), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['y_device', '"""0:N"""'], {}), "(y_device, '0:N')\n", (1289, 1306), False, 'import dace\n'), ((1660, 1700), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['y_host', '"""0:N"""'], {}), "(y_host, '0:N')\n", (1685, 1700), False, 'import dace\n'), ((2096, 2138), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['y_buffer', '"""0:N"""'], {}), "(y_buffer, '0:N')\n", (2121, 2138), False, 'import dace\n'), ((2536, 2571), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['y', '"""0:N"""'], {}), "(y, '0:N')\n", (2561, 2571), False, 'import dace\n'), ((3569, 3605), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['a', '"""m, n"""'], {}), "(a, 'm, n')\n", (3594, 3605), False, 'import dace\n'), ((3742, 3775), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['x', '"""m"""'], {}), "(x, 'm')\n", (3767, 3775), False, 'import dace\n'), ((5147, 5194), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['a_device', '"""0:M, 0:N"""'], {}), "(a_device, '0:M, 0:N')\n", (5172, 5194), False, 'import dace\n'), ((5302, 5344), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['x_device', '"""0:M"""'], {}), "(x_device, '0:M')\n", (5327, 5344), False, 'import dace\n'), ((5452, 5494), 'dace.memlet.Memlet.simple', 'dace.memlet.Memlet.simple', (['y_device', '"""0:N"""'], {}), "(y_device, '0:N')\n", (5477, 5494), False, 'import dace\n'), ((7212, 7227), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (7224, 7227), True, 'import numpy as np\n'), ((8424, 8439), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8437, 8439), False, 'import pdb\n'), ((7931, 7947), 'dace.eval', 'dace.eval', (['(N * M)'], {}), '(N * M)\n', (7940, 7947), False, 'import dace\n'), ((8298, 8318), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (8316, 8318), False, 'import sys\n')] |
'''
dateCreated: 190801
objective: demonstrate how to make a colormap in two different ways.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as mcm
x=np.linspace(0,10)
y=x**2
# let's say you want to add a colorbar to your plot. there's an easy way and a
# "robust" way
# Easy Way =============================
f,p=plt.subplots()
pltItem=p.scatter(x,y,c=y,s=2)
f.colorbar(pltItem)
# Robust Way ===========================
cmm = mcm.ScalarMappable()
cmm.set_array(y)
f1,p1=plt.subplots()
p1.scatter(x,y,c=y,s=2)
p1.set_ylim([y.max(),y.min()])
f1.colorbar(cmm)
plt.show()
| [
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((181, 199), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {}), '(0, 10)\n', (192, 199), True, 'import numpy as np\n'), ((349, 363), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (361, 363), True, 'import matplotlib.pyplot as plt\n'), ((463, 483), 'matplotlib.cm.ScalarMappable', 'mcm.ScalarMappable', ([], {}), '()\n', (481, 483), True, 'import matplotlib.cm as mcm\n'), ((508, 522), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (520, 522), True, 'import matplotlib.pyplot as plt\n'), ((596, 606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (604, 606), True, 'import matplotlib.pyplot as plt\n')] |
from milk.supervised.classifier import normaliselabels
import numpy as np
def test_normaliselabels():
np.random.seed(22)
labels = np.zeros(120, np.uint8)
labels[40:] += 1
labels[65:] += 1
reorder = np.argsort(np.random.rand(len(labels)))
labels = labels[reorder]
labels2,names = normaliselabels(labels)
for new_n,old_n in enumerate(names):
assert np.all( (labels == old_n) == (labels2 == new_n) )
| [
"numpy.all",
"numpy.zeros",
"numpy.random.seed",
"milk.supervised.classifier.normaliselabels"
] | [((107, 125), 'numpy.random.seed', 'np.random.seed', (['(22)'], {}), '(22)\n', (121, 125), True, 'import numpy as np\n'), ((139, 162), 'numpy.zeros', 'np.zeros', (['(120)', 'np.uint8'], {}), '(120, np.uint8)\n', (147, 162), True, 'import numpy as np\n'), ((308, 331), 'milk.supervised.classifier.normaliselabels', 'normaliselabels', (['labels'], {}), '(labels)\n', (323, 331), False, 'from milk.supervised.classifier import normaliselabels\n'), ((388, 435), 'numpy.all', 'np.all', (['((labels == old_n) == (labels2 == new_n))'], {}), '((labels == old_n) == (labels2 == new_n))\n', (394, 435), True, 'import numpy as np\n')] |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import random
from collections.abc import Iterable
import numpy as np
import pandas as pd
from ... import opcodes
from ...core import ENTITY_TYPE, OutputType, get_output_types, recursive_tile
from ...core.operand import OperandStage, MapReduceOperand
from ...serialization.serializables import (
BoolField,
DictField,
Float32Field,
KeyField,
Int32Field,
Int64Field,
NDArrayField,
StringField,
)
from ...tensor.operands import TensorShuffleProxy
from ...tensor.random import RandomStateField
from ...tensor.utils import gen_random_seeds
from ...utils import has_unknown_shape
from ..initializer import Series as asseries
from ..operands import DataFrameOperandMixin, DataFrameOperand
from ..utils import parse_index
_ILOC_COL_HEADER = "_gsamp_iloc_col_"
_WEIGHT_COL_HEADER = "_gsamp_weight_col_"
# code adapted from pandas.core.groupby.groupby.DataFrameGroupBy.sample
def _sample_groupby_iter(
groupby, obj_index, n, frac, replace, weights, random_state=None, errors="ignore"
):
if weights is None:
ws = [None] * groupby.ngroups
elif not isinstance(weights, Iterable) or isinstance(weights, str):
ws = [weights] * groupby.ngroups
else:
weights = pd.Series(weights, index=obj_index)
ws = [weights.iloc[idx] for idx in groupby.indices.values()]
group_iterator = groupby.grouper.get_iterator(groupby._selected_obj)
if not replace and errors == "ignore":
for (_, obj), w in zip(group_iterator, ws):
yield obj.sample(
n=n, frac=frac, replace=replace, weights=w, random_state=random_state
) if len(obj) > n else obj
else:
for (_, obj), w in zip(group_iterator, ws):
yield obj.sample(
n=n, frac=frac, replace=replace, weights=w, random_state=random_state
)
class GroupBySampleILoc(DataFrameOperand, DataFrameOperandMixin):
_op_code_ = opcodes.GROUPBY_SAMPLE_ILOC
_op_module_ = "dataframe.groupby"
_groupby_params = DictField("groupby_params")
_size = Int64Field("size")
_frac = Float32Field("frac")
_replace = BoolField("replace")
_weights = KeyField("weights")
_seed = Int32Field("seed")
_random_state = RandomStateField("random_state")
_errors = StringField("errors")
_random_col_id = Int32Field("random_col_id")
# for chunks
# num of instances for chunks
_left_iloc_bound = Int64Field("left_iloc_bound")
def __init__(
self,
groupby_params=None,
size=None,
frac=None,
replace=None,
weights=None,
random_state=None,
seed=None,
errors=None,
left_iloc_bound=None,
random_col_id=None,
**kw
):
super().__init__(
_groupby_params=groupby_params,
_size=size,
_frac=frac,
_seed=seed,
_replace=replace,
_weights=weights,
_random_state=random_state,
_errors=errors,
_left_iloc_bound=left_iloc_bound,
_random_col_id=random_col_id,
**kw
)
if self._random_col_id is None:
self._random_col_id = random.randint(10000, 99999)
@property
def groupby_params(self):
return self._groupby_params
@property
def size(self):
return self._size
@property
def frac(self):
return self._frac
@property
def replace(self):
return self._replace
@property
def weights(self):
return self._weights
@property
def seed(self):
return self._seed
@property
def random_state(self):
if self._random_state is None:
self._random_state = np.random.RandomState(self.seed)
return self._random_state
@property
def errors(self):
return self._errors
@property
def left_iloc_bound(self):
return self._left_iloc_bound
@property
def random_col_id(self):
return self._random_col_id
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
input_iter = iter(inputs)
next(input_iter)
if isinstance(self.weights, ENTITY_TYPE):
self._weights = next(input_iter)
def __call__(self, df):
self._output_types = [OutputType.tensor]
inp_tileables = [df]
if self.weights is not None:
inp_tileables.append(self.weights)
return self.new_tileable(
inp_tileables, dtype=np.dtype(np.int_), shape=(np.nan,)
)
@classmethod
def tile(cls, op: "GroupBySampleILoc"):
in_df = op.inputs[0]
out_tensor = op.outputs[0]
iloc_col_header = _ILOC_COL_HEADER + str(op.random_col_id)
weight_col_header = _WEIGHT_COL_HEADER + str(op.random_col_id)
if has_unknown_shape(in_df):
yield
if op.weights is None:
weights_iter = itertools.repeat(None)
else:
weights_iter = iter(op.weights.chunks)
if isinstance(op.groupby_params["by"], list):
map_cols = list(op.groupby_params["by"])
else: # pragma: no cover
map_cols = []
dtypes = in_df.dtypes.copy()
dtypes.at[iloc_col_header] = np.dtype(np.int_)
map_cols.append(iloc_col_header)
if op.weights is not None:
dtypes.at[weight_col_header] = op.weights.dtype
map_cols.append(weight_col_header)
new_dtypes = dtypes[map_cols]
new_columns_value = parse_index(new_dtypes.index, store_data=True)
map_chunks = []
left_ilocs = np.array((0,) + in_df.nsplits[0]).cumsum()
for inp_chunk, weight_chunk in zip(in_df.chunks, weights_iter):
new_op = op.copy().reset_key()
new_op._left_iloc_bound = int(left_ilocs[inp_chunk.index[0]])
new_op.stage = OperandStage.map
new_op._output_types = [OutputType.dataframe]
inp_chunks = [inp_chunk]
if weight_chunk is not None:
inp_chunks.append(weight_chunk)
params = inp_chunk.params
params.update(
dict(
dtypes=new_dtypes,
columns_value=new_columns_value,
shape=(inp_chunk.shape[0], len(new_dtypes)),
index=inp_chunk.index,
)
)
map_chunks.append(new_op.new_chunk(inp_chunks, **params))
new_op = op.copy().reset_key()
new_op._output_types = [OutputType.dataframe]
params = in_df.params
params.update(
dict(
chunks=map_chunks,
nsplits=(in_df.nsplits[0], (len(new_dtypes),)),
dtypes=new_dtypes,
columns_value=new_columns_value,
shape=(in_df.shape[0], len(new_dtypes)),
)
)
map_df = new_op.new_tileable(op.inputs, **params)
groupby_params = op.groupby_params.copy()
groupby_params.pop("selection", None)
grouped = yield from recursive_tile(map_df.groupby(**groupby_params))
result_chunks = []
seeds = gen_random_seeds(len(grouped.chunks), op.random_state)
for group_chunk, seed in zip(grouped.chunks, seeds):
new_op = op.copy().reset_key()
new_op.stage = OperandStage.reduce
new_op._weights = None
new_op._random_state = None
new_op._seed = seed
result_chunks.append(
new_op.new_chunk(
[group_chunk],
shape=(np.nan,),
index=(group_chunk.index[0],),
dtype=out_tensor.dtype,
)
)
new_op = op.copy().reset_key()
params = out_tensor.params
params.update(
dict(chunks=result_chunks, nsplits=((np.nan,) * len(result_chunks),))
)
return new_op.new_tileables(op.inputs, **params)
@classmethod
def execute(cls, ctx, op: "GroupBySampleILoc"):
in_data = ctx[op.inputs[0].key]
iloc_col = _ILOC_COL_HEADER + str(op.random_col_id)
weight_col = _WEIGHT_COL_HEADER + str(op.random_col_id)
if op.stage == OperandStage.map:
if op.weights is not None:
ret = pd.DataFrame(
{
iloc_col: np.arange(
op.left_iloc_bound, op.left_iloc_bound + len(in_data)
),
weight_col: ctx[op.weights.key],
},
index=in_data.index,
)
else:
ret = pd.DataFrame(
{
iloc_col: np.arange(
op.left_iloc_bound, op.left_iloc_bound + len(in_data)
),
},
index=in_data.index,
)
if isinstance(op.groupby_params["by"], list):
ret = pd.concat([in_data[op.groupby_params["by"]], ret], axis=1)
ctx[op.outputs[0].key] = ret
else:
if weight_col not in in_data.obj.columns:
weight_col = None
if len(in_data.obj) == 0 or in_data.ngroups == 0:
ctx[op.outputs[0].key] = np.array([], dtype=np.int_)
else:
ctx[op.outputs[0].key] = np.concatenate(
[
sample_pd[iloc_col].to_numpy()
for sample_pd in _sample_groupby_iter(
in_data,
in_data.obj.index,
n=op.size,
frac=op.frac,
replace=op.replace,
weights=weight_col,
random_state=op.random_state,
errors=op.errors,
)
]
)
class GroupBySample(MapReduceOperand, DataFrameOperandMixin):
_op_code_ = opcodes.RAND_SAMPLE
_op_module_ = "dataframe.groupby"
_groupby_params = DictField("groupby_params")
_size = Int64Field("size")
_frac = Float32Field("frac")
_replace = BoolField("replace")
_weights = KeyField("weights")
_seed = Int32Field("seed")
_random_state = RandomStateField("random_state")
_errors = StringField("errors")
# for chunks
# num of instances for chunks
_input_nsplits = NDArrayField("input_nsplits")
def __init__(
self,
groupby_params=None,
size=None,
frac=None,
replace=None,
weights=None,
random_state=None,
seed=None,
errors=None,
input_nsplits=None,
**kw
):
super().__init__(
_groupby_params=groupby_params,
_size=size,
_frac=frac,
_seed=seed,
_replace=replace,
_weights=weights,
_random_state=random_state,
_errors=errors,
_input_nsplits=input_nsplits,
**kw
)
@property
def groupby_params(self):
return self._groupby_params
@property
def size(self):
return self._size
@property
def frac(self):
return self._frac
@property
def replace(self):
return self._replace
@property
def weights(self):
return self._weights
@property
def seed(self):
return self._seed
@property
def random_state(self):
return self._random_state
@property
def errors(self):
return self._errors
@property
def input_nsplits(self):
return self._input_nsplits
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
input_iter = iter(inputs)
next(input_iter)
if isinstance(self.weights, ENTITY_TYPE):
self._weights = next(input_iter)
def __call__(self, groupby):
df = groupby
while df.op.output_types[0] not in (OutputType.dataframe, OutputType.series):
df = df.inputs[0]
selection = groupby.op.groupby_params.pop("selection", None)
if df.ndim > 1 and selection:
if isinstance(selection, tuple) and selection not in df.dtypes:
selection = list(selection)
result_df = df[selection]
else:
result_df = df
params = result_df.params
params["shape"] = (
(np.nan,) if result_df.ndim == 1 else (np.nan, result_df.shape[-1])
)
params["index_value"] = parse_index(result_df.index_value.to_pandas()[:0])
input_dfs = [df]
if isinstance(self.weights, ENTITY_TYPE):
input_dfs.append(self.weights)
self._output_types = get_output_types(result_df)
return self.new_tileable(input_dfs, **params)
@classmethod
def _tile_one_chunk(cls, op: "GroupBySample", in_df, weights):
out = op.outputs[0]
input_dfs = [in_df]
if isinstance(weights, ENTITY_TYPE):
input_dfs.append(weights)
params = out.params
chunk_op = op.copy().reset_key()
if isinstance(weights, ENTITY_TYPE):
chunk_op._weights = weights
params["index"] = (0,) * out.ndim
chunk = chunk_op.new_chunk([c.chunks[0] for c in input_dfs], **params)
df_op = op.copy().reset_key()
return df_op.new_tileables(
input_dfs, chunks=[chunk], nsplits=((s,) for s in out.shape), **params
)
@classmethod
def _tile_distributed(cls, op: "GroupBySample", in_df, weights):
out_df = op.outputs[0]
if has_unknown_shape(in_df):
yield
sample_iloc_op = GroupBySampleILoc(
groupby_params=op.groupby_params,
size=op.size,
frac=op.frac,
replace=op.replace,
weights=weights,
random_state=op.random_state,
errors=op.errors,
seed=None,
left_iloc_bound=None,
)
sampled_iloc = yield from recursive_tile(sample_iloc_op(in_df))
map_chunks = []
for c in sampled_iloc.chunks:
new_op = op.copy().reset_key()
new_op.stage = OperandStage.map
new_op._weights = None
new_op._output_types = [OutputType.tensor]
new_op._input_nsplits = np.array(in_df.nsplits[0])
map_chunks.append(
new_op.new_chunk(
[c], dtype=sampled_iloc.dtype, shape=(np.nan,), index=c.index
)
)
proxy_chunk = TensorShuffleProxy(dtype=sampled_iloc.dtype).new_chunk(
map_chunks, shape=()
)
reduce_chunks = []
for src_chunk in in_df.chunks:
new_op = op.copy().reset_key()
new_op._weights = None
new_op._output_types = [OutputType.tensor]
new_op.stage = OperandStage.reduce
new_op.reducer_index = (src_chunk.index[0],)
new_op._input_nsplits = np.array(in_df.nsplits[0])
reduce_chunks.append(
new_op.new_chunk(
[proxy_chunk],
index=src_chunk.index,
dtype=sampled_iloc.dtype,
shape=(np.nan,),
)
)
combine_chunks = []
for src_chunk, reduce_chunk in zip(in_df.chunks, reduce_chunks):
new_op = op.copy().reset_key()
new_op.stage = OperandStage.combine
new_op._weights = None
params = out_df.params
if out_df.ndim == 2:
params.update(
dict(
index=src_chunk.index,
dtypes=out_df.dtypes,
shape=(np.nan, out_df.shape[1]),
columns_value=out_df.columns_value,
)
)
else:
params.update(
dict(
index=(src_chunk.index[0],),
dtype=out_df.dtype,
shape=(np.nan,),
name=out_df.name,
)
)
combine_chunks.append(new_op.new_chunk([src_chunk, reduce_chunk], **params))
new_op = op.copy().reset_key()
if out_df.ndim == 2:
new_nsplits = ((np.nan,) * in_df.chunk_shape[0], (out_df.shape[1],))
else:
new_nsplits = ((np.nan,) * in_df.chunk_shape[0],)
return new_op.new_tileables(
out_df.inputs, chunks=combine_chunks, nsplits=new_nsplits, **out_df.params
)
@classmethod
def tile(cls, op: "GroupBySample"):
in_df = op.inputs[0]
if in_df.ndim == 2:
in_df = yield from recursive_tile(in_df.rechunk({1: (in_df.shape[1],)}))
weights = op.weights
if isinstance(weights, ENTITY_TYPE):
weights = yield from recursive_tile(weights.rechunk({0: in_df.nsplits[0]}))
if len(in_df.chunks) == 1:
return cls._tile_one_chunk(op, in_df, weights)
return (yield from cls._tile_distributed(op, in_df, weights))
@classmethod
def execute(cls, ctx, op: "GroupBySample"):
out_df = op.outputs[0]
if op.stage == OperandStage.map:
in_data = ctx[op.inputs[0].key]
in_data = np.sort(in_data)
input_nsplits = np.copy(op.input_nsplits).tolist()
pos_array = np.cumsum([0] + input_nsplits)
poses = np.searchsorted(in_data, pos_array).tolist()
for idx, (left, right) in enumerate(zip(poses, poses[1:])):
ctx[op.outputs[0].key, (idx,)] = in_data[left:right]
elif op.stage == OperandStage.reduce:
in_indexes = list(op.iter_mapper_data(ctx))
idx = np.sort(np.concatenate(in_indexes))
if op.outputs[0].index[0] > 0:
acc_nsplits = np.cumsum(op.input_nsplits)
idx -= acc_nsplits[op.outputs[0].index[0] - 1]
ctx[op.outputs[0].key] = idx
elif op.stage == OperandStage.combine:
in_data = ctx[op.inputs[0].key]
idx = ctx[op.inputs[1].key]
selection = op.groupby_params.get("selection")
if selection:
in_data = in_data[selection]
ctx[op.outputs[0].key] = in_data.iloc[idx]
else:
in_data = ctx[op.inputs[0].key]
weights = op.weights
if isinstance(weights, ENTITY_TYPE):
weights = ctx[weights.key]
params = op.groupby_params.copy()
selection = params.pop("selection", None)
grouped = in_data.groupby(**params)
if selection is not None:
grouped = grouped[selection]
result = pd.concat(
[
sample_df
for sample_df in _sample_groupby_iter(
grouped,
in_data.index,
n=op.size,
frac=op.frac,
replace=op.replace,
weights=weights,
random_state=op.random_state,
errors=op.errors,
)
]
)
ctx[out_df.key] = result
def groupby_sample(
groupby,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
errors="ignore",
):
"""
Return a random sample of items from each group.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items to return for each group. Cannot be used with
`frac` and must be no larger than the smallest group unless
`replace` is True. Default is one if `frac` is None.
frac : float, optional
Fraction of items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : list-like, optional
Default None results in equal probability weighting.
If passed a list-like then values must have the same length as
the underlying DataFrame or Series object and will be used as
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
errors : {'ignore', 'raise'}, default 'ignore'
If ignore, errors will not be raised when `replace` is False
and size of some group is less than `n`.
Returns
-------
Series or DataFrame
A new object of same type as caller containing items randomly
sampled within each group from the caller object.
See Also
--------
DataFrame.sample: Generate random samples from a DataFrame object.
numpy.random.choice: Generate a random sample from a given 1-D numpy
array.
Examples
--------
>>> import mars.dataframe as md
>>> df = md.DataFrame(
... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
... )
>>> df.execute()
a b
0 red 0
1 red 1
2 blue 2
3 blue 3
4 black 4
5 black 5
Select one row at random for each distinct value in column a. The
`random_state` argument can be used to guarantee reproducibility:
>>> df.groupby("a").sample(n=1, random_state=1).execute()
a b
4 black 4
2 blue 2
1 red 1
Set `frac` to sample fixed proportions rather than counts:
>>> df.groupby("a")["b"].sample(frac=0.5, random_state=2).execute()
5 5
2 2
0 0
Name: b, dtype: int64
Control sample probabilities within groups by setting weights:
>>> df.groupby("a").sample(
... n=1,
... weights=[1, 1, 1, 0, 0, 1],
... random_state=1,
... ).execute()
a b
5 black 5
2 blue 2
0 red 0
"""
groupby_params = groupby.op.groupby_params.copy()
groupby_params.pop("as_index", None)
if weights is not None and not isinstance(weights, ENTITY_TYPE):
weights = asseries(weights)
n = 1 if n is None and frac is None else n
rs = copy.deepcopy(
random_state.to_numpy() if hasattr(random_state, "to_numpy") else random_state
)
op = GroupBySample(
size=n,
frac=frac,
replace=replace,
weights=weights,
random_state=rs,
groupby_params=groupby_params,
errors=errors,
)
return op(groupby)
| [
"pandas.Series",
"numpy.copy",
"numpy.searchsorted",
"numpy.sort",
"numpy.array",
"numpy.concatenate",
"numpy.cumsum",
"numpy.dtype",
"pandas.concat",
"random.randint",
"numpy.random.RandomState",
"itertools.repeat"
] | [((5903, 5920), 'numpy.dtype', 'np.dtype', (['np.int_'], {}), '(np.int_)\n', (5911, 5920), True, 'import numpy as np\n'), ((1848, 1883), 'pandas.Series', 'pd.Series', (['weights'], {'index': 'obj_index'}), '(weights, index=obj_index)\n', (1857, 1883), True, 'import pandas as pd\n'), ((3828, 3856), 'random.randint', 'random.randint', (['(10000)', '(99999)'], {}), '(10000, 99999)\n', (3842, 3856), False, 'import random\n'), ((4370, 4402), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (4391, 4402), True, 'import numpy as np\n'), ((5572, 5594), 'itertools.repeat', 'itertools.repeat', (['None'], {}), '(None)\n', (5588, 5594), False, 'import itertools\n'), ((15155, 15181), 'numpy.array', 'np.array', (['in_df.nsplits[0]'], {}), '(in_df.nsplits[0])\n', (15163, 15181), True, 'import numpy as np\n'), ((15824, 15850), 'numpy.array', 'np.array', (['in_df.nsplits[0]'], {}), '(in_df.nsplits[0])\n', (15832, 15850), True, 'import numpy as np\n'), ((18194, 18210), 'numpy.sort', 'np.sort', (['in_data'], {}), '(in_data)\n', (18201, 18210), True, 'import numpy as np\n'), ((18298, 18328), 'numpy.cumsum', 'np.cumsum', (['([0] + input_nsplits)'], {}), '([0] + input_nsplits)\n', (18307, 18328), True, 'import numpy as np\n'), ((5148, 5165), 'numpy.dtype', 'np.dtype', (['np.int_'], {}), '(np.int_)\n', (5156, 5165), True, 'import numpy as np\n'), ((6264, 6297), 'numpy.array', 'np.array', (['((0,) + in_df.nsplits[0])'], {}), '((0,) + in_df.nsplits[0])\n', (6272, 6297), True, 'import numpy as np\n'), ((9706, 9764), 'pandas.concat', 'pd.concat', (["[in_data[op.groupby_params['by']], ret]"], {'axis': '(1)'}), "([in_data[op.groupby_params['by']], ret], axis=1)\n", (9715, 9764), True, 'import pandas as pd\n'), ((10013, 10040), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int_'}), '([], dtype=np.int_)\n', (10021, 10040), True, 'import numpy as np\n'), ((18239, 18264), 'numpy.copy', 'np.copy', (['op.input_nsplits'], {}), '(op.input_nsplits)\n', (18246, 18264), True, 'import numpy as np\n'), ((18349, 18384), 'numpy.searchsorted', 'np.searchsorted', (['in_data', 'pos_array'], {}), '(in_data, pos_array)\n', (18364, 18384), True, 'import numpy as np\n'), ((18663, 18689), 'numpy.concatenate', 'np.concatenate', (['in_indexes'], {}), '(in_indexes)\n', (18677, 18689), True, 'import numpy as np\n'), ((18764, 18791), 'numpy.cumsum', 'np.cumsum', (['op.input_nsplits'], {}), '(op.input_nsplits)\n', (18773, 18791), True, 'import numpy as np\n')] |
#import warnings
#warnings.filterwarnings('ignore', category=DeprecationWarning)
#warnings.filterwarnings('ignore', category=RuntimeWarning)
#warnings.filterwarnings('ignore', category=FutureWarning)
import numpy as np
import h5py
# We don't care if we're reimporting blessings, under this context.
#warnings.filterwarnings('ignore')
class Plotter(object):
'''
This is a semi-generic plotting interface that has a built in curses based terminal plotter.
It's fairly specific to what we're using it for here, but we could (and maybe should) build it out into
a little library that we can use via the command line to plot things. Might be useful for looking at data later.
That would also cut the size of this tool down by a good bit.
'''
#def __init__(self, kinavg, kinrw, iteration, bin_labels, state_labels, state_pops, bin_pops, interface='matplotlib'):
def __init__(self, h5file, h5key, iteration=-1, interface='matplotlib'):
# Need to sort through and fix all this, but hey.
self.iteration = iteration
# These two are important for... reasons.
try:
self.bin_labels = list(bin_labels[...])
self.state_labels = list(state_labels[...]) + ['unknown']
except:
try:
self.state_labels = list(h5file['state_labels'][...]) + ['unknown']
except:
self.state_labels = None
# unless we totally fail out.
self.interface = interface
# What we should ACTUALLY do is just... yeah, just have it sub in what we need.
# We'll need to throw in the state labels or whatever, but.
self.h5file = h5file
self.h5key = h5key
# We should determine the number of dimensions of our dataset...
# This has time data, so an i to j is a 3 dim, and an i is 2.
try:
self.dim = len(h5file[h5key].shape)
except:
self.dim = 1
try:
# does the ci exist?
a = h5file[h5key]['expected']
except:
self.dim = 1
def plot(self, i=0, j=1, tau=1, iteration=None, dim=0, interface=None):
if iteration == None:
iteration = self.iteration
self.__generic_ci__(self.h5file, iteration, i, j, tau=tau, h5key=self.h5key, dim=dim, interface=interface)
def __generic_ci__(self, h5file, iteration, i, j, tau, h5key='rate_evolution', dim=0, interface=None):
# This function just calls the appropriate plot function for our available
# interface.
if (interface == None and self.interface == 'text') or interface == 'text':
if self.dim > 1:
self.__terminal_ci__(h5file, iteration, i, j, tau, h5key)
else:
self.__terminal_expected__(h5file, iteration, i, j, tau, h5key, dim)
else:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
if self.dim == 3:
plt.plot(h5file[h5key]['expected'][:iteration, i, j] / tau, color='black')
plt.plot(h5file[h5key]['ci_ubound'][:iteration, i, j] / tau, color='grey')
plt.plot(h5file[h5key]['ci_lbound'][:iteration, i, j] / tau, color='grey')
else:
plt.plot(h5file[h5key]['expected'][:iteration, i] / tau, color='black')
plt.plot(h5file[h5key]['ci_ubound'][:iteration, i] / tau, color='grey')
plt.plot(h5file[h5key]['ci_lbound'][:iteration, i] / tau, color='grey')
plt.show()
except:
print('Unable to import plotting interface. An X server ($DISPLAY) is required.')
if self.dim > 1:
self.__terminal_ci__(h5file, iteration, i, j, tau)
else:
self.__terminal_expected__(h5file, iteration, i, j, tau, h5key, dim)
return 1
def __generic_histo__(self, vector, labels):
# This function just calls the appropriate plot function for our available
# interface. Same thing as generic_ci, but for a histogram.
if self.interface == 'text':
self.__terminal_histo__(vector, labels)
else:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
plt.bar(list(range(0, np.array(vector).shape[0])), vector, linewidth=0, align='center', color='gold', tick_label=labels)
plt.show()
except:
print('Unable to import plotting interface. An X server ($DISPLAY) is required.')
self.__terminal_histo__(h5file, vector, labels)
return 1
def __terminal_histo__(self, vector, labels, fullscreen_mode=True):
from blessings import Terminal
self.t = Terminal()
h = int(self.t.height / 4) * 3
w = self.t.width
cols = np.array(vector).shape[0]
# Let's print this business!
colwidth = w / cols
with self.t.fullscreen():
for y in range(0, h):
for x in range(0, cols):
if x == 0:
with self.t.location(0, y):
print(self.t.red('{0:.4f}|'.format(float(h-y)/float(h))))
with self.t.location((x*colwidth)+8+len(labels[x])/2, y):
if vector[x] >= (float(h-y)/float(h)):
#print(float(h-y)/float(h))
print(self.t.on_blue(' '))
for x in range(0, cols):
if x == 0:
with self.t.location(x, h):
print('States| ')
with self.t.location((x*colwidth)+8, h):
print(self.t.blue(labels[x]))
if fullscreen_mode:
input("Press enter to continue.")
def __terminal_ci__(self, h5file, iteration, si, sj, tau, h5key):
from blessings import Terminal
self.t = Terminal()
h = int(self.t.height / 4 * 3.75)
# We'll figure out how to subsample the timepoints...
w = self.t.width
if self.dim == 3:
in_tup = (iteration-1, si, sj)
else:
in_tup = (iteration-1, si)
yupper = (h5file[h5key]['ci_ubound'][in_tup] / tau) * 2
ylower = (h5file[h5key]['ci_lbound'][in_tup] / tau) / 2
# Here are points pertaining to height.
scale = np.array([0.0] + [ylower+i*(yupper-ylower)/np.float(h) for i in range(0, h)])[::-1]
if iteration > w:
block_size = iteration / w
else:
block_size = 1
with self.t.fullscreen():
try:
for x in range(0, w-12):
iter = x * block_size
if self.dim == 3:
in_tup = (iter-1, si, sj)
else:
in_tup = (iter-1, si)
yupper = (h5file[h5key]['ci_ubound'][in_tup] / tau)
ylower = (h5file[h5key]['ci_lbound'][in_tup] / tau)
ci = np.digitize([yupper, ylower], scale)
if x == 0:
for y in range(0, h+1):
with self.t.location(0, y):
print(self.t.bold(self.t.red('{0:.7f}|'.format(scale[y]))))
for y in range(ci[0], ci[1]):
#with self.t.location(x+12, y):
print(self.t.move(y, x+12) + self.t.on_blue(' '))
#print(self.t.on_blue(' '))
#with self.t.location(x+12, np.digitize(h5file['rate_evolution']['expected'][iter-1, si, sj]/tau, scale)):
# print(self.t.on_blue('-'))
print(self.t.move(np.digitize(h5file[h5key]['expected'][in_tup]/tau, scale), x+12) + self.t.on_blue('-'))
for x in range(0, w-12, w/10):
if x == 0:
with self.t.location(x, h+1):
print('Iteration| ')
with self.t.location(x+12, h+1):
iter = x * block_size
print(self.t.blue(str(iter)))
except:
pass
with self.t.location(0, h+2):
# We need to improve this.
#if h5key == 'rate_evolution':
# print("k_ij from {} to {} from iter 1 to {}".format(self.state_labels[si], self.state_labels[sj], self.iteration))
#elif h5key == 'conditional_flux_evolution':
# print("i->j flux from {} to {} from iter 1 to {}".format(self.state_labels[si], self.state_labels[sj], self.iteration))
if self.dim == 3:
print("{} from {} to {} from iter 1 to {}".format(h5key, self.state_labels[si], self.state_labels[sj], self.iteration))
else:
print("{} of state {} from iter 1 to {}".format(h5key, self.state_labels[si], self.iteration))
with self.t.location(0, h+3):
input("Press enter to continue.")
def __terminal_expected__(self, h5file, iteration, si, sj, tau, h5key, dim):
from blessings import Terminal
self.t = Terminal()
h = int(self.t.height / 4 * 3.75)
# We'll figure out how to subsample the timepoints...
w = self.t.width
if self.dim == 3:
in_tup = (iteration-1, si, sj)
else:
in_tup = (iteration-1, si)
in_tup = (iteration-1, dim)
try:
yupper = (np.max(h5file) / tau) * 2
except:
in_tup = (iteration-1)
yupper = (np.max(h5file) / tau) * 2
ylower = (np.min(h5file) / tau) * 2
# Here are points pertaining to height.
if yupper > 0:
yupper = (np.max(h5file) / tau) * 1.2
ylower = (np.min(h5file) / tau) / 2
scale = np.array([0.0] + [ylower+i*(yupper-ylower)/np.float(h) for i in range(0, h)])[::-1]
else:
yupper = (np.max(h5file) / tau) / 2
ylower = (np.min(h5file) / tau) * 1.2
scale = np.array([ylower+i*(yupper-ylower)/np.float(h) for i in range(0, h)] + [0.0])[::-1]
if iteration > w:
block_size = iteration / w
else:
block_size = 1
with self.t.fullscreen():
try:
for x in range(0, w-12):
iter = x * block_size
if self.dim == 3:
in_tup = (iter-1, si, sj)
else:
in_tup = (iter-1, si)
in_tup = (iter-1, dim)
try:
yupper = (h5file[in_tup] / tau)
except:
in_tup = (iter-1)
yupper = (h5file[in_tup] / tau)
ylower = (h5file[in_tup] / tau)
ci = np.digitize([yupper, ylower], scale)
if x == 0:
for y in range(0, h+1):
with self.t.location(0, y):
print(self.t.bold(self.t.red('{0:.7f}|'.format(scale[y]))))
for y in range(ci[0], ci[1]):
print(self.t.move(y, x+12) + self.t.on_blue(' '))
#print(self.t.on_blue(' '))
print(self.t.move(np.digitize(h5file[in_tup]/tau, scale), x+12) + self.t.on_blue('-'))
for x in range(0, w-12, w/10):
if x == 0:
with self.t.location(x, h+1):
print('Iteration| ')
with self.t.location(x+12, h+1):
iter = x * block_size
print(self.t.blue(str(iter)))
except:
pass
with self.t.location(0, h+2):
# We need to improve this.
print("{} from iter 1 to {}".format(h5key, self.iteration))
with self.t.location(0, h+3):
input("Press enter to continue.")
| [
"numpy.float",
"blessings.Terminal",
"matplotlib.use",
"numpy.digitize",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.array",
"numpy.min",
"matplotlib.pyplot.show"
] | [((4958, 4968), 'blessings.Terminal', 'Terminal', ([], {}), '()\n', (4966, 4968), False, 'from blessings import Terminal\n'), ((6142, 6152), 'blessings.Terminal', 'Terminal', ([], {}), '()\n', (6150, 6152), False, 'from blessings import Terminal\n'), ((9448, 9458), 'blessings.Terminal', 'Terminal', ([], {}), '()\n', (9456, 9458), False, 'from blessings import Terminal\n'), ((2927, 2950), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (2941, 2950), False, 'import matplotlib\n'), ((3637, 3647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3645, 3647), True, 'from matplotlib import pyplot as plt\n'), ((4379, 4402), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (4393, 4402), False, 'import matplotlib\n'), ((4609, 4619), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4617, 4619), True, 'from matplotlib import pyplot as plt\n'), ((5048, 5064), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (5056, 5064), True, 'import numpy as np\n'), ((9924, 9938), 'numpy.min', 'np.min', (['h5file'], {}), '(h5file)\n', (9930, 9938), True, 'import numpy as np\n'), ((3058, 3132), 'matplotlib.pyplot.plot', 'plt.plot', (["(h5file[h5key]['expected'][:iteration, i, j] / tau)"], {'color': '"""black"""'}), "(h5file[h5key]['expected'][:iteration, i, j] / tau, color='black')\n", (3066, 3132), True, 'from matplotlib import pyplot as plt\n'), ((3153, 3227), 'matplotlib.pyplot.plot', 'plt.plot', (["(h5file[h5key]['ci_ubound'][:iteration, i, j] / tau)"], {'color': '"""grey"""'}), "(h5file[h5key]['ci_ubound'][:iteration, i, j] / tau, color='grey')\n", (3161, 3227), True, 'from matplotlib import pyplot as plt\n'), ((3248, 3322), 'matplotlib.pyplot.plot', 'plt.plot', (["(h5file[h5key]['ci_lbound'][:iteration, i, j] / tau)"], {'color': '"""grey"""'}), "(h5file[h5key]['ci_lbound'][:iteration, i, j] / tau, color='grey')\n", (3256, 3322), True, 'from matplotlib import pyplot as plt\n'), ((3365, 3436), 'matplotlib.pyplot.plot', 'plt.plot', (["(h5file[h5key]['expected'][:iteration, i] / tau)"], {'color': '"""black"""'}), "(h5file[h5key]['expected'][:iteration, i] / tau, color='black')\n", (3373, 3436), True, 'from matplotlib import pyplot as plt\n'), ((3457, 3528), 'matplotlib.pyplot.plot', 'plt.plot', (["(h5file[h5key]['ci_ubound'][:iteration, i] / tau)"], {'color': '"""grey"""'}), "(h5file[h5key]['ci_ubound'][:iteration, i] / tau, color='grey')\n", (3465, 3528), True, 'from matplotlib import pyplot as plt\n'), ((3549, 3620), 'matplotlib.pyplot.plot', 'plt.plot', (["(h5file[h5key]['ci_lbound'][:iteration, i] / tau)"], {'color': '"""grey"""'}), "(h5file[h5key]['ci_lbound'][:iteration, i] / tau, color='grey')\n", (3557, 3620), True, 'from matplotlib import pyplot as plt\n'), ((7250, 7286), 'numpy.digitize', 'np.digitize', (['[yupper, ylower]', 'scale'], {}), '([yupper, ylower], scale)\n', (7261, 7286), True, 'import numpy as np\n'), ((9781, 9795), 'numpy.max', 'np.max', (['h5file'], {}), '(h5file)\n', (9787, 9795), True, 'import numpy as np\n'), ((10043, 10057), 'numpy.max', 'np.max', (['h5file'], {}), '(h5file)\n', (10049, 10057), True, 'import numpy as np\n'), ((10093, 10107), 'numpy.min', 'np.min', (['h5file'], {}), '(h5file)\n', (10099, 10107), True, 'import numpy as np\n'), ((10259, 10273), 'numpy.max', 'np.max', (['h5file'], {}), '(h5file)\n', (10265, 10273), True, 'import numpy as np\n'), ((10307, 10321), 'numpy.min', 'np.min', (['h5file'], {}), '(h5file)\n', (10313, 10321), True, 'import numpy as np\n'), ((11167, 11203), 'numpy.digitize', 'np.digitize', (['[yupper, ylower]', 'scale'], {}), '([yupper, ylower], scale)\n', (11178, 11203), True, 'import numpy as np\n'), ((9880, 9894), 'numpy.max', 'np.max', (['h5file'], {}), '(h5file)\n', (9886, 9894), True, 'import numpy as np\n'), ((6639, 6650), 'numpy.float', 'np.float', (['h'], {}), '(h)\n', (6647, 6650), True, 'import numpy as np\n'), ((7971, 8030), 'numpy.digitize', 'np.digitize', (["(h5file[h5key]['expected'][in_tup] / tau)", 'scale'], {}), "(h5file[h5key]['expected'][in_tup] / tau, scale)\n", (7982, 8030), True, 'import numpy as np\n'), ((11649, 11689), 'numpy.digitize', 'np.digitize', (['(h5file[in_tup] / tau)', 'scale'], {}), '(h5file[in_tup] / tau, scale)\n', (11660, 11689), True, 'import numpy as np\n'), ((4494, 4510), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (4502, 4510), True, 'import numpy as np\n'), ((10182, 10193), 'numpy.float', 'np.float', (['h'], {}), '(h)\n', (10190, 10193), True, 'import numpy as np\n'), ((10390, 10401), 'numpy.float', 'np.float', (['h'], {}), '(h)\n', (10398, 10401), True, 'import numpy as np\n')] |
"""Run decoding analyses from eyelink signal for the working memory task and
save decoding performance"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
import mne
from h5io import read_hdf5
from mne.decoding import (GeneralizingEstimator, cross_val_multiscore,
LinearModel)
from sklearn.pipeline import make_pipeline
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from jr.gat import (AngularRegression, scorer_spearman,
scorer_angle)
from base import (complete_behavior, get_events_interactions)
from config import path_data
import sys
subject = sys.argv[1] # read a swarm file for parralel computing on biowulf
# Define analyses
analyses = ['cue_side', 'cue_type',
'target_angle_cue_angle', 'target_sfreq_cue_sfreq']
analyses = dict(Target=[],
Cue=analyses,
Probe=[])
output_folder = '/decoding_from_eyelink/'
# Load behavior file and epoch data
def load(subject, event_type):
# Behavior
fname = op.join(path_data, subject, 'behavior_%s.hdf5' % event_type)
events = read_hdf5(fname)
# add explicit conditions
events = complete_behavior(events)
# MEG
fname = op.join(path_data, subject, 'epochs_%s.fif' % event_type)
epochs = mne.read_epochs(fname)
return epochs, events
# Create result folder
results_folder = op.join(path_data + 'results/' + subject + output_folder)
if not os.path.exists(results_folder):
os.makedirs(results_folder)
# Loop across each analysis
for epoch_type, epoch_analyses in analyses.iteritems():
epochs, events = load(subject, epoch_type)
events = get_events_interactions(events)
# Keep only eye tracker signal (x and y eye position)
epochs.pick_channels(['UADC009-2104', 'UADC010-2104'])
for analysis in epoch_analyses:
fname = results_folder +\
'%s_scores_%s_%s.npy' % (subject, epoch_type, analysis)
# define to-be-predicted values
y = np.array(events[analysis])
# Define estimators depending on the analysis
if 'angle' in analysis[:14]:
clf = AngularRegression(make_pipeline(StandardScaler(),
LinearModel(Ridge())),
independent=False)
scorer = scorer_angle
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y = np.array(y, dtype=float)
elif 'sfreq' in analysis[:14]:
clf = make_pipeline(StandardScaler(), LinearModel(Ridge()))
scorer = scorer_spearman
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y = np.array(y, dtype=float)
elif ('cue_side' in analysis or 'cue_type' in analysis):
clf = make_pipeline(StandardScaler(),
LinearModel(LogisticRegression()))
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring='roc_auc',
n_jobs=24, **kwargs)
le = preprocessing.LabelEncoder()
le.fit(y)
y = le.transform(y)
# only consider non NaN values
if ('cue_side' in analysis or 'cue_type' in analysis):
sel = np.where(y != 0)[0]
else:
sel = np.where(~np.isnan(y))[0]
# Run decoding
gat.fit(epochs._data[sel], y=y[sel])
scores = cross_val_multiscore(gat, epochs._data[sel],
y=y[sel], cv=StratifiedKFold(12))
scores = scores.mean(axis=0)
# save cross-validated scores
np.save(fname, np.array(scores))
| [
"h5io.read_hdf5",
"os.path.exists",
"sklearn.preprocessing.LabelEncoder",
"os.makedirs",
"numpy.where",
"mne.decoding.GeneralizingEstimator",
"os.path.join",
"sklearn.metrics.make_scorer",
"sklearn.linear_model.Ridge",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.Stratifie... | [((1622, 1679), 'os.path.join', 'op.join', (["(path_data + 'results/' + subject + output_folder)"], {}), "(path_data + 'results/' + subject + output_folder)\n", (1629, 1679), True, 'import os.path as op\n'), ((1277, 1337), 'os.path.join', 'op.join', (['path_data', 'subject', "('behavior_%s.hdf5' % event_type)"], {}), "(path_data, subject, 'behavior_%s.hdf5' % event_type)\n", (1284, 1337), True, 'import os.path as op\n'), ((1351, 1367), 'h5io.read_hdf5', 'read_hdf5', (['fname'], {}), '(fname)\n', (1360, 1367), False, 'from h5io import read_hdf5\n'), ((1411, 1436), 'base.complete_behavior', 'complete_behavior', (['events'], {}), '(events)\n', (1428, 1436), False, 'from base import complete_behavior, get_events_interactions\n'), ((1460, 1517), 'os.path.join', 'op.join', (['path_data', 'subject', "('epochs_%s.fif' % event_type)"], {}), "(path_data, subject, 'epochs_%s.fif' % event_type)\n", (1467, 1517), True, 'import os.path as op\n'), ((1531, 1553), 'mne.read_epochs', 'mne.read_epochs', (['fname'], {}), '(fname)\n', (1546, 1553), False, 'import mne\n'), ((1687, 1717), 'os.path.exists', 'os.path.exists', (['results_folder'], {}), '(results_folder)\n', (1701, 1717), False, 'import os\n'), ((1723, 1750), 'os.makedirs', 'os.makedirs', (['results_folder'], {}), '(results_folder)\n', (1734, 1750), False, 'import os\n'), ((1895, 1926), 'base.get_events_interactions', 'get_events_interactions', (['events'], {}), '(events)\n', (1918, 1926), False, 'from base import complete_behavior, get_events_interactions\n'), ((2239, 2265), 'numpy.array', 'np.array', (['events[analysis]'], {}), '(events[analysis])\n', (2247, 2265), True, 'import numpy as np\n'), ((2766, 2790), 'numpy.array', 'np.array', (['y'], {'dtype': 'float'}), '(y, dtype=float)\n', (2774, 2790), True, 'import numpy as np\n'), ((4076, 4092), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4084, 4092), True, 'import numpy as np\n'), ((3118, 3142), 'numpy.array', 'np.array', (['y'], {'dtype': 'float'}), '(y, dtype=float)\n', (3126, 3142), True, 'import numpy as np\n'), ((3698, 3714), 'numpy.where', 'np.where', (['(y != 0)'], {}), '(y != 0)\n', (3706, 3714), True, 'import numpy as np\n'), ((3957, 3976), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(12)'], {}), '(12)\n', (3972, 3976), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2407, 2423), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2421, 2423), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2668, 2687), 'sklearn.metrics.make_scorer', 'make_scorer', (['scorer'], {}), '(scorer)\n', (2679, 2687), False, 'from sklearn.metrics import make_scorer\n'), ((2862, 2878), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2876, 2878), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3371, 3437), 'mne.decoding.GeneralizingEstimator', 'GeneralizingEstimator', (['clf'], {'scoring': '"""roc_auc"""', 'n_jobs': '(24)'}), "(clf, scoring='roc_auc', n_jobs=24, **kwargs)\n", (3392, 3437), False, 'from mne.decoding import GeneralizingEstimator, cross_val_multiscore, LinearModel\n'), ((3495, 3523), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (3521, 3523), False, 'from sklearn import preprocessing\n'), ((2487, 2494), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (2492, 2494), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((2892, 2899), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (2897, 2899), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((3020, 3039), 'sklearn.metrics.make_scorer', 'make_scorer', (['scorer'], {}), '(scorer)\n', (3031, 3039), False, 'from sklearn.metrics import make_scorer\n'), ((3240, 3256), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3254, 3256), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3760, 3771), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (3768, 3771), True, 'import numpy as np\n'), ((3302, 3322), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (3320, 3322), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n')] |
import math
import os
import random
import sys
import time
import logging
import numpy as np
import tensorflow as tf
from random import shuffle
import pickle
from tensorflow.python import debug as tf_debug
import qe_model
from tensorflow.python.training import saver as save_mod
from collections import deque
from qe_model import State
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("output_classes", 5,
"Number of output classes.")
tf.app.flags.DEFINE_integer("size", 100, "Size of each model layer.")
tf.app.flags.DEFINE_integer("embedding_size", 620, "Word Embedding Dimension.")
tf.app.flags.DEFINE_integer("num_layers", 1, "Number of layers in the model.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 5000,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("qescore", False,
"Set to True for prediction.")
tf.app.flags.DEFINE_boolean("combine_ft", False,
"Combine the feature file and target file into 1 for all datasets.")
tf.app.flags.DEFINE_boolean("regression", True,
"Set to True for regression training. False for classification.")
tf.app.flags.DEFINE_boolean("split_data", False,
"Split data set into training, development, and test set.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False,
"Train using fp16 instead of fp32.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
_buckets = [9, 14, 24, 49, 69, 79, 99]
def buckify_data(data):
data_set = [[] for _ in _buckets]
for features, label in data:
for bucket_id, input_size in enumerate(_buckets):
if len(features) <= input_size:
data_set[bucket_id].append((features, label))
break
return data_set
def create_model(session, state):
"""Create translation model and initialize or load parameters in session."""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = qe_model.QEModel(
_buckets,
FLAGS.size,
FLAGS.embedding_size,
FLAGS.num_layers,
FLAGS.output_classes,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
FLAGS.regression,
state=state,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and save_mod.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
# def load_and_enqueue(sess, enqueue_op, coord):
# with open('dummy_data/features.bin') as feature_file, open('dummy_data/labels.bin') as label_file:
# while not coord.should_stop():
# feature_array = np.fromfile(feature_file, np.float32, 128)
# if feature_array.shape[0] == 0:
# print('reach end of file, reset using seek(0,0)')
# feature_file.seek(0,0)
# label_file.seek(0,0)
# continue
# label_value = np.fromfile(label_file, np.float32, 128)
# sess.run(enqueue_op, feed_dict={feature_input: feature_array,
# label_input: label_value})
def combine_ft_helper(feature_file, target_file, output_file):
data_set = []
features = pickle.load(open(FLAGS.data_dir+feature_file, "rb"))
with open(FLAGS.data_dir+target_file, "r" ) as flabels:
for feature, label in zip(features, flabels):
data_set.append((feature, float(label)))
pickle.dump(data_set, open(FLAGS.data_dir+output_file, "wb"))
def combine_ft():
combine_ft_helper('qualvec_train.txt', 'train.hter', 'train_set.p')
combine_ft_helper('qualvec_dev.txt', 'dev.hter', 'develop_set.p')
combine_ft_helper('qualvec_test.txt', 'test.hter', 'test_set.p')
def split_data():
data_set = []
qualVec = pickle.load( open(FLAGS.data_dir+'qualvec0.txt', "rb" ) )
with open(FLAGS.data_dir+'ter_class.data', "r" ) as flabels:
for features, label in zip(qualVec, flabels):
data_set.append((features, label))
dataLen = len(data_set)
indices = list(range(dataLen))
shuffle(data_set)
print('length of data ', len(data_set))
p1 = int(.8*float(dataLen))
p2 = int(.9*float(dataLen))
print(dataLen, p1, p2)
train_set = data_set[:p1]
develop_set = data_set[p1:p2]
test_set = data_set[p2:]
pickle.dump( train_set, open( FLAGS.data_dir+"train_set.p", "wb" ) )
pickle.dump( develop_set, open( FLAGS.data_dir+"develop_set.p", "wb" ) )
pickle.dump( test_set, open( FLAGS.data_dir+"test_set.p", "wb" ) )
def train():
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, State.TRAIN)
sess.graph.finalize()
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Read data into buckets and compute their sizes.
train_set = buckify_data(pickle.load(open(FLAGS.data_dir+"train_set.p", "rb")))
dev_set = buckify_data(pickle.load(open(FLAGS.data_dir+"develop_set.p", "rb")))
# test_set = pickle.load( open( "test_set.p", "rb" ) )
train_bucket_sizes = [len(train_set[b]) for b in range(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in range(len(train_bucket_sizes))]
dev_bucket_sizes = [len(dev_set[b]) for b in range(len(_buckets))]
dev_total_size = float(sum(dev_bucket_sizes))
dev_buckets_frac = [i/dev_total_size for i in dev_bucket_sizes]
# This is the training loop.
step_time, mse_train = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in range(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
inputs, labels = model.get_batch(train_set, bucket_id)
# score is rmse if regression. loss is mse for regression
# else it is accuracy for classification.
_, step_loss, score_train = model.step(
sess, inputs, labels, bucket_id, State.TRAIN,
FLAGS.regression)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
# score_train += score_train / FLAGS.steps_per_checkpoint
mse_train += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
rmse_train = math.sqrt(mse_train)
print ("global step %d learning rate %.4f step-time %.2f rmse "
"%.4f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, rmse_train))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and rmse_train > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(rmse_train)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "qualScore.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
dev_score = 0.0
for bucket_id in range(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
inputs, labels = model.get_batch(dev_set, bucket_id)
lossList, eval_loss, outputs = model.step(
sess, inputs, labels, bucket_id, State.TEST,
FLAGS.regression)
for x, y, loss in zip(outputs[:10], labels[:10], lossList[:10]):
print(x,y,loss)
# print('output shape', )
dev_score += eval_loss*dev_buckets_frac[bucket_id]
# dev_loss += eval_loss
print(" eval: bucket %d score %.2f" % (bucket_id, math.sqrt(eval_loss)))
# dev_score = math.sqrt(dev_score)
print(" eval: all bucket rmse: %.2f" % (dev_score))
sys.stdout.flush()
# Write to summary writer
summary_str = tf.Summary(value=[
tf.Summary.Value(tag="dev. rmse", simple_value=dev_score),
tf.Summary.Value(tag="train rmse", simple_value=rmse_train)])
summary_writer.add_summary(summary_str, current_step)
step_time, mse_train = 0.0, 0.0
def qescore():
with tf.Session() as sess:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
# Create model and load parameters.
# summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
test_set = pickle.load(open(FLAGS.data_dir+"test_set.p", "rb"))
out_path = os.path.join(FLAGS.data_dir,
"test_prediction.txt")
# Get token-ids for the input sentences.
results = []
model = create_model(sess, State.QESCORE)
model.batch_size = 1 # We decode one sentence at a time.
# with open(out_path, mode="a") as outfile:
for feature, label in test_set:
# Get the bucket id for this sentence
bucket_id = len(_buckets) - 1
for i, bucket_size in enumerate(_buckets):
if len(feature) <= bucket_size:
bucket_id = i
break
else:
logging.warning("Sentence truncated")
# Get a 1-element batch to feed the sentence to the model.
inputs, labels = model.get_batch(
{bucket_id: [(feature, label)]}, bucket_id)
# Get output logits for the sentence.
inputs = [np.reshape(x, (-1, FLAGS.embedding_size)) for x in inputs]
# print('label size', len(labels))
_, eval_loss, outputs = model.step(
sess, inputs, labels, bucket_id, State.QESCORE,
FLAGS.regression)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
# Write the quality vectors to file
# outputs2 = [output[0] for output in outputs]
results.append(outputs)
# np.savetxt(outfile, outputs)
# outfile.write("{0}\n".format(outputs))
# with open(out_path,'wb') as f:
# np.savetxt(f, results, fmt='%.5f')
pickle.dump(results, open(out_path, "wb" ) )
def main(_):
if FLAGS.split_data:
split_data()
elif FLAGS.combine_ft:
combine_ft()
elif FLAGS.qescore:
qescore()
else:
train()
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.python.training.saver.checkpoint_exists",
"math.sqrt",
"tensorflow.Summary.Value",
"tensorflow.app.run",
"numpy.reshape",
"tensorflow.Session",
"tensorflow.app.flags.DEFINE_boolean",
"sys.stdout.flush",
"numpy.random.random_sample",
"random.shuffle",
"tensorflow.app.flags.DEFINE_inte... | [((340, 405), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.5)', '"""Learning rate."""'], {}), "('learning_rate', 0.5, 'Learning rate.')\n", (365, 405), True, 'import tensorflow as tf\n'), ((406, 509), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate_decay_factor"""', '(0.99)', '"""Learning rate decays by this much."""'], {}), "('learning_rate_decay_factor', 0.99,\n 'Learning rate decays by this much.')\n", (431, 509), True, 'import tensorflow as tf\n'), ((532, 619), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""max_gradient_norm"""', '(5.0)', '"""Clip gradients to this norm."""'], {}), "('max_gradient_norm', 5.0,\n 'Clip gradients to this norm.')\n", (557, 619), True, 'import tensorflow as tf\n'), ((642, 729), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch size to use during training."""'], {}), "('batch_size', 64,\n 'Batch size to use during training.')\n", (669, 729), True, 'import tensorflow as tf\n'), ((754, 831), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""output_classes"""', '(5)', '"""Number of output classes."""'], {}), "('output_classes', 5, 'Number of output classes.')\n", (781, 831), True, 'import tensorflow as tf\n'), ((860, 929), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""size"""', '(100)', '"""Size of each model layer."""'], {}), "('size', 100, 'Size of each model layer.')\n", (887, 929), True, 'import tensorflow as tf\n'), ((930, 1009), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""embedding_size"""', '(620)', '"""Word Embedding Dimension."""'], {}), "('embedding_size', 620, 'Word Embedding Dimension.')\n", (957, 1009), True, 'import tensorflow as tf\n'), ((1010, 1088), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_layers"""', '(1)', '"""Number of layers in the model."""'], {}), "('num_layers', 1, 'Number of layers in the model.')\n", (1037, 1088), True, 'import tensorflow as tf\n'), ((1089, 1153), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""/tmp"""', '"""Data directory"""'], {}), "('data_dir', '/tmp', 'Data directory')\n", (1115, 1153), True, 'import tensorflow as tf\n'), ((1154, 1224), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '"""/tmp"""', '"""Training directory."""'], {}), "('train_dir', '/tmp', 'Training directory.')\n", (1180, 1224), True, 'import tensorflow as tf\n'), ((1225, 1335), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_train_data_size"""', '(0)', '"""Limit on the size of training data (0: no limit)."""'], {}), "('max_train_data_size', 0,\n 'Limit on the size of training data (0: no limit).')\n", (1252, 1335), True, 'import tensorflow as tf\n'), ((1360, 1470), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""steps_per_checkpoint"""', '(5000)', '"""How many training steps to do per checkpoint."""'], {}), "('steps_per_checkpoint', 5000,\n 'How many training steps to do per checkpoint.')\n", (1387, 1470), True, 'import tensorflow as tf\n'), ((1495, 1571), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""qescore"""', '(False)', '"""Set to True for prediction."""'], {}), "('qescore', False, 'Set to True for prediction.')\n", (1522, 1571), True, 'import tensorflow as tf\n'), ((1600, 1721), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""combine_ft"""', '(False)', '"""Combine the feature file and target file into 1 for all datasets."""'], {}), "('combine_ft', False,\n 'Combine the feature file and target file into 1 for all datasets.')\n", (1627, 1721), True, 'import tensorflow as tf\n'), ((1746, 1863), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""regression"""', '(True)', '"""Set to True for regression training. False for classification."""'], {}), "('regression', True,\n 'Set to True for regression training. False for classification.')\n", (1773, 1863), True, 'import tensorflow as tf\n'), ((1888, 2000), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""split_data"""', '(False)', '"""Split data set into training, development, and test set."""'], {}), "('split_data', False,\n 'Split data set into training, development, and test set.')\n", (1915, 2000), True, 'import tensorflow as tf\n'), ((2025, 2119), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""self_test"""', '(False)', '"""Run a self-test if this is set to True."""'], {}), "('self_test', False,\n 'Run a self-test if this is set to True.')\n", (2052, 2119), True, 'import tensorflow as tf\n'), ((2144, 2231), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""use_fp16"""', '(False)', '"""Train using fp16 instead of fp32."""'], {}), "('use_fp16', False,\n 'Train using fp16 instead of fp32.')\n", (2171, 2231), True, 'import tensorflow as tf\n'), ((2882, 3138), 'qe_model.QEModel', 'qe_model.QEModel', (['_buckets', 'FLAGS.size', 'FLAGS.embedding_size', 'FLAGS.num_layers', 'FLAGS.output_classes', 'FLAGS.max_gradient_norm', 'FLAGS.batch_size', 'FLAGS.learning_rate', 'FLAGS.learning_rate_decay_factor', 'FLAGS.regression'], {'state': 'state', 'dtype': 'dtype'}), '(_buckets, FLAGS.size, FLAGS.embedding_size, FLAGS.\n num_layers, FLAGS.output_classes, FLAGS.max_gradient_norm, FLAGS.\n batch_size, FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,\n FLAGS.regression, state=state, dtype=dtype)\n', (2898, 3138), False, 'import qe_model\n'), ((3449, 3495), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (3478, 3495), True, 'import tensorflow as tf\n'), ((5453, 5470), 'random.shuffle', 'shuffle', (['data_set'], {}), '(data_set)\n', (5460, 5470), False, 'from random import shuffle\n'), ((12810, 12822), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (12820, 12822), True, 'import tensorflow as tf\n'), ((3512, 3566), 'tensorflow.python.training.saver.checkpoint_exists', 'save_mod.checkpoint_exists', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (3538, 3566), True, 'from tensorflow.python.training import saver as save_mod\n'), ((5947, 5959), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5957, 5959), True, 'import tensorflow as tf\n'), ((6158, 6208), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.train_dir', 'sess.graph'], {}), '(FLAGS.train_dir, sess.graph)\n', (6179, 6208), True, 'import tensorflow as tf\n'), ((10269, 10281), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10279, 10281), True, 'import tensorflow as tf\n'), ((10617, 10668), 'os.path.join', 'os.path.join', (['FLAGS.data_dir', '"""test_prediction.txt"""'], {}), "(FLAGS.data_dir, 'test_prediction.txt')\n", (10629, 10668), False, 'import os\n'), ((3796, 3829), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3827, 3829), True, 'import tensorflow as tf\n'), ((7283, 7308), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (7306, 7308), True, 'import numpy as np\n'), ((7502, 7513), 'time.time', 'time.time', ([], {}), '()\n', (7511, 7513), False, 'import time\n'), ((8307, 8327), 'math.sqrt', 'math.sqrt', (['mse_train'], {}), '(mse_train)\n', (8316, 8327), False, 'import math\n'), ((8859, 8906), 'os.path.join', 'os.path.join', (['FLAGS.train_dir', '"""qualScore.ckpt"""'], {}), "(FLAGS.train_dir, 'qualScore.ckpt')\n", (8871, 8906), False, 'import os\n'), ((9897, 9915), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9913, 9915), False, 'import sys\n'), ((11219, 11256), 'logging.warning', 'logging.warning', (['"""Sentence truncated"""'], {}), "('Sentence truncated')\n", (11234, 11256), False, 'import logging\n'), ((11888, 11929), 'numpy.reshape', 'np.reshape', (['x', '(-1, FLAGS.embedding_size)'], {}), '(x, (-1, FLAGS.embedding_size))\n', (11898, 11929), True, 'import numpy as np\n'), ((7898, 7909), 'time.time', 'time.time', ([], {}), '()\n', (7907, 7909), False, 'import time\n'), ((10003, 10060), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""dev. rmse"""', 'simple_value': 'dev_score'}), "(tag='dev. rmse', simple_value=dev_score)\n", (10019, 10060), True, 'import tensorflow as tf\n'), ((10074, 10133), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""train rmse"""', 'simple_value': 'rmse_train'}), "(tag='train rmse', simple_value=rmse_train)\n", (10090, 10133), True, 'import tensorflow as tf\n'), ((9763, 9783), 'math.sqrt', 'math.sqrt', (['eval_loss'], {}), '(eval_loss)\n', (9772, 9783), False, 'import math\n')] |
import numpy as np
import pandas as pd
from controller.cnn import CNN
import keras
import os
from sklearn.model_selection import train_test_split
from keras.layers import Activation
from keras.layers import Conv2D, BatchNormalization, Dense, Flatten, Reshape
class SudokuSolver:
def __init__(self, train = False):
if not train:
self.load_model()
pass
def norm(self, a):
return (a/9)-.5
def denorm(self, a):
return (a+.5)*9
def get_data_set(self):
'''
Loading 1M Sudoku's as Dataset, This will take some time Hang On
'''
dataset_path = "../dataset/sudoku.csv"
data = pd.read_csv(dataset_path)
feat_raw = data['quizzes']
label_raw = data['solutions']
feat = []
label = []
for i in feat_raw:
x = np.array([int(j) for j in i]).reshape((9, 9, 1))
feat.append(x)
feat = np.array(feat)
feat = feat/9
feat -= .5
for i in label_raw:
x = np.array([int(j) for j in i]).reshape((81, 1)) - 1
label.append(x)
label = np.array(label)
del(feat_raw)
del(label_raw)
x_train, x_test, y_train, y_test = train_test_split(
feat, label, test_size=0.2, random_state=42)
return x_train, x_test, y_train, y_test
def train_model(self):
x_train, x_test, y_train, y_test = self.get_data_set()
self.model = CNN()
self.model.train(x_train, y_train)
def load_model(self):
model_path = os.path.join(os.getcwd() , "controller/cnn.model")
self.model = keras.models.load_model(model_path)
def solve(self, sample):
'''
This function solve's the sudoku by filling blank positions one by one.
'''
feat = sample
while(1):
out = self.model.predict(feat.reshape((1, 9, 9, 1)))
out = out.squeeze()
pred = np.argmax(out, axis=1).reshape((9, 9))+1
prob = np.around(np.max(out, axis=1).reshape((9, 9)), 2)
feat = self.denorm(feat).reshape((9, 9))
mask = (feat == 0)
if(mask.sum() == 0):
break
prob_new = prob*mask
ind = np.argmax(prob_new)
x, y = (ind//9), (ind % 9)
val = pred[x][y]
feat[x][y] = val
feat = self.norm(feat)
return pred
def test_accuracy(self, feats, labels):
correct = 0
for i, feat in enumerate(feats):
pred = self.solve(feat)
true = labels[i].reshape((9, 9))+1
if(abs(true - pred).sum() == 0):
correct += 1
print(correct/feats.shape[0])
def solve_sudoku(self, game):
game = game.reshape((9, 9, 1))
game = self.norm(game)
game = self.solve(game)
return game
if __name__ == "__main__":
ss = SudokuSolver()
game = [ [0, 8, 0, 0, 3, 2, 0, 0, 1],
[7, 0, 3, 0, 8, 0, 0, 0, 2],
[5, 0, 0, 0, 0, 7, 0, 3, 0],
[0, 5, 0, 0, 0, 1, 9, 7, 0],
[6, 0, 0, 7, 0, 9, 0, 0, 8],
[0, 4, 7, 2, 0, 0, 0, 5, 0],
[0, 2, 0, 6, 0, 0, 0, 0, 9],
[8, 0, 0, 0, 9, 0, 3, 0, 5],
[3, 0, 0, 8, 2, 0, 0, 1, 0]]
game = np.array([int(j) for j in game]).reshape((9, 9, 1))
# ss.train()
ss.load_model()
game = ss.solve_sudoku(game)
print('solved puzzle:\n')
print(game)
| [
"keras.models.load_model",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.argmax",
"os.getcwd",
"numpy.max",
"numpy.array",
"controller.cnn.CNN"
] | [((688, 713), 'pandas.read_csv', 'pd.read_csv', (['dataset_path'], {}), '(dataset_path)\n', (699, 713), True, 'import pandas as pd\n'), ((963, 977), 'numpy.array', 'np.array', (['feat'], {}), '(feat)\n', (971, 977), True, 'import numpy as np\n'), ((1161, 1176), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1169, 1176), True, 'import numpy as np\n'), ((1267, 1328), 'sklearn.model_selection.train_test_split', 'train_test_split', (['feat', 'label'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(feat, label, test_size=0.2, random_state=42)\n', (1283, 1328), False, 'from sklearn.model_selection import train_test_split\n'), ((1505, 1510), 'controller.cnn.CNN', 'CNN', ([], {}), '()\n', (1508, 1510), False, 'from controller.cnn import CNN\n'), ((1683, 1718), 'keras.models.load_model', 'keras.models.load_model', (['model_path'], {}), '(model_path)\n', (1706, 1718), False, 'import keras\n'), ((1624, 1635), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1633, 1635), False, 'import os\n'), ((2321, 2340), 'numpy.argmax', 'np.argmax', (['prob_new'], {}), '(prob_new)\n', (2330, 2340), True, 'import numpy as np\n'), ((2017, 2039), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (2026, 2039), True, 'import numpy as np\n'), ((2087, 2106), 'numpy.max', 'np.max', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (2093, 2106), True, 'import numpy as np\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import numpy as np
from typing import Dict
import uuid
import logging
from collections import namedtuple
import msgpack
from datetime import datetime
import traceback
from singa_auto.model import Params
from singa_auto.advisor import ParamsType
from singa_auto.utils.local_cache import LocalCache
from .redis import RedisSession
logger = logging.getLogger(__name__)
class InvalidParamsError(Exception): pass
class InvalidParamsFormatError(Exception): pass
REDIS_NAMESPACE = 'PARAMS'
PARAM_DATA_TYPE_SEPARATOR = '//'
PARAM_DATA_TYPE_NUMPY = 'NP'
_ParamMeta = namedtuple('_ParamMeta', ('param_id', 'score', 'time'))
class ParamCache(object):
'''
Retrieves and caches parameters for a session & a worker, backed by an in-memory cache and Redis for cross-worker sharing (optional).
:param str session_id: Session ID associated with the parameters
'''
'''
Internally, organises data into these namespaces:
params:<param_id> | Params by ID
meta | Aggregate of all global metadata:
{ params: { GLOBAL_BEST: { score, time, param_id },
{ GLOBAL_RECENT: { score, time, param_id } }
'''
def __init__(self,
session_id='local',
redis_host=None,
redis_port=None,
cache_size=4):
self._params: Dict[str, _ParamMeta] = {} # Stores params metadata
redis_namespace = f'{REDIS_NAMESPACE}:{session_id}'
self._redis = RedisSession(redis_namespace, redis_host, redis_port)
self._local_cache = LocalCache(cache_size)
'''
Stores parameters into underlying storage.
:param Params params: Parameters as a { <name>: <numpy array> } dictionary
:param datetime time: When the parameters were produced
:param float score: Score associated with the parameters
'''
def store_params(self, params: Params, score: float = None, time: datetime = None):
if params is None:
raise InvalidParamsError('`params` cannot be `None`')
self._redis.acquire_lock()
try:
# With redis, sync in-memory metadata with Redis'
self._pull_from_redis()
param_meta = self._update_params_meta(score, time)
if param_meta:
# Store input params in in-memory cache
self._local_cache.put(param_meta.param_id, params)
if self._redis:
self._push_to_redis()
finally:
self._redis.release_lock()
'''
Retrieves parameters from underlying storage.
:param ParamsType params_type: Type of parameters to retrieve
:returns: Parameters as a { <name>: <numpy array> } dictionary
:rtype: Params
'''
def retrieve_params(self, params_type: ParamsType) -> Params:
self._redis.acquire_lock()
try:
# With redis, sync in-memory metadata with Redis'
self._pull_from_redis()
# Get param id to fetch
param_id = self._get_params_by_type(params_type)
if param_id is None:
return None
logger.info('To use params "{}"'.format(param_id))
# Check in cache first
params = self._local_cache.get(param_id)
if params is not None:
return params
# Check in redis next, and store it in cache
params = self._pull_params_from_redis(param_id)
if params is None:
logger.error('Params don\'t exist in Redis!')
return None
self._local_cache.put(param_id, params)
return params
finally:
self._redis.release_lock()
'''
Clears all parameters for this session from underlying storage.
'''
def clear_all_params(self):
self._clear_all_from_redis()
####################################
# Policies for params storage
####################################
# Given input params with score & time, update params metadata
# Returns param meta for the input params, None if params meta is not to be stored
def _update_params_meta(self, score: float, time: datetime):
score = score or 0
time = time or datetime.now()
param_id = str(uuid.uuid4()) # Give it an ID
param_meta = _ParamMeta(param_id, score, time)
# Update local recent params
prev_meta = self._params.get('LOCAL_RECENT')
if prev_meta is None or time >= prev_meta.time:
self._params['LOCAL_RECENT'] = param_meta
# Update local best params
prev_meta = self._params.get('LOCAL_BEST')
if prev_meta is None or score >= prev_meta.score:
self._params['LOCAL_BEST'] = param_meta
# Update global recent params
prev_meta = self._params.get('GLOBAL_RECENT')
if prev_meta is None or time >= prev_meta.time:
self._params['GLOBAL_RECENT'] = param_meta
# Update global best params
prev_meta = self._params.get('GLOBAL_BEST')
if prev_meta is None or score >= prev_meta.score:
self._params['GLOBAL_BEST'] = param_meta
return param_meta
def _get_params_by_type(self, params_type: ParamsType) -> str:
if params_type == ParamsType.NONE:
return None
elif params_type == ParamsType.LOCAL_RECENT:
return self._get_local_recent_params()
elif params_type == ParamsType.LOCAL_BEST:
return self._get_local_best_params()
elif params_type == ParamsType.GLOBAL_RECENT:
return self._get_global_recent_params()
elif params_type == ParamsType.GLOBAL_BEST:
return self._get_global_best_params()
else:
raise InvalidParamsError('No such params type: "{}"'.format(params_type))
def _get_local_recent_params(self):
param_meta = self._params.get('LOCAL_RECENT')
if param_meta is None:
return None
return param_meta.param_id
def _get_local_best_params(self):
param_meta = self._params.get('LOCAL_BEST')
if param_meta is None:
return None
return param_meta.param_id
def _get_global_recent_params(self):
param_meta = self._params.get('GLOBAL_RECENT')
if param_meta is None:
return None
return param_meta.param_id
def _get_global_best_params(self):
param_meta = self._params.get('GLOBAL_BEST')
if param_meta is None:
return None
return param_meta.param_id
####################################
# Redis communication
####################################
# Pulls metadata from Redis, updating local metadata
def _pull_from_redis(self):
redis_params = self._pull_metadata_from_redis()
# Merge with local params meta
for (param_type, param_meta) in redis_params.items():
self._params[param_type] = param_meta
# Pushes metadata & selected params to Redis, deletes outdated params on Redis
def _push_to_redis(self):
params_to_push = ['GLOBAL_BEST', 'GLOBAL_RECENT']
# Extract params meta to share
params_shared = { param_type: param_meta for (param_type, param_meta) in self._params.items() if param_type in params_to_push }
# Compare new against old params, and determine which params to push and delete from Redis
redis_params = self._pull_metadata_from_redis()
og_param_ids = set([x.param_id for x in redis_params.values()])
new_param_ids = set([x.param_id for x in params_shared.values()])
to_add = [x for x in new_param_ids if x not in og_param_ids]
to_delete = [x for x in og_param_ids if x not in new_param_ids]
# For each param to add, push it
for param_id in to_add:
params = self._local_cache.get(param_id)
if params:
self._push_params_to_redis(param_id, params)
# Delete params to delete
if len(to_delete) > 0:
self._delete_params_from_redis(*to_delete)
# Push updated metadata to Redis
self._push_metadata_to_redis(params_shared)
def _push_metadata_to_redis(self, params):
redis_params = { param_type: self._param_meta_to_jsonable(param_meta) for (param_type, param_meta) in params.items() }
metadata = {
'params': redis_params
}
logger.info('Pushing metadata to Redis: {}...'.format(metadata))
metadata_str = json.dumps(metadata)
self._redis.set('meta', metadata_str)
def _pull_metadata_from_redis(self):
metadata_str = self._redis.get('meta')
# Pull metadata from redis
if metadata_str is not None:
metadata = json.loads(metadata_str)
logger.info('Pulled metadata from Redis: {}'.format(metadata))
# For each param stored on Redis, update its metadata
params = metadata.get('params', {})
params = { param_type: self._jsonable_to_param_meta(jsonable) for (param_type, jsonable) in params.items() }
return params
return {}
def _delete_params_from_redis(self, *param_ids):
logger.info('Deleting params: {}...'.format(param_ids))
param_names = ['params:{}'.format(x) for x in param_ids]
self._redis.delete(*param_names)
# Clears ALL metadata and params for session from Redis
def _clear_all_from_redis(self):
logger.info('Clearing metadata and params from Redis...')
self._redis.delete('meta')
self._redis.delete_pattern('params:*')
def _push_params_to_redis(self, param_id: str, params: Params):
logger.info('Pushing params: "{}"...'.format(param_id))
param_name = 'params:{}'.format(param_id)
params_bytes = _serialize_params(params)
self._redis.set(param_name, params_bytes)
def _pull_params_from_redis(self, param_id: str) -> Params:
logger.info('Pulling params: "{}"...'.format(param_id))
param_name = 'params:{}'.format(param_id)
params_bytes = self._redis.get(param_name)
if params_bytes is None:
return None
params = _deserialize_params(params_bytes)
return params
def _param_meta_to_jsonable(self, param_meta: _ParamMeta):
jsonable = param_meta._asdict()
jsonable['time'] = str(jsonable['time'])
return jsonable
def _jsonable_to_param_meta(self, jsonable):
jsonable['time'] = datetime.strptime(jsonable['time'], '%Y-%m-%d %H:%M:%S.%f')
param_meta = _ParamMeta(**jsonable)
return param_meta
def _serialize_params(params):
# Serialize as `msgpack`
params_simple = _simplify_params(params)
params_bytes = msgpack.packb(params_simple, use_bin_type=True)
return params_bytes
def _deserialize_params(params_bytes):
# Deserialize as `msgpack`
params_simple = msgpack.unpackb(params_bytes, raw=False)
params = _unsimplify_params(params_simple)
return params
def _simplify_params(params):
try:
params_simple = {}
assert isinstance(params, dict)
for (name, value) in params.items():
assert isinstance(name, str)
assert PARAM_DATA_TYPE_SEPARATOR not in name # Internally used as separator for types
# If value is a numpy array, prefix it with type
# Otherwise, it must be one of the basic types
if isinstance(value, np.ndarray):
name = f'{PARAM_DATA_TYPE_NUMPY}{PARAM_DATA_TYPE_SEPARATOR}{name}'
value = value.tolist()
else:
assert isinstance(value, (str, float, int))
params_simple[name] = value
return params_simple
except:
traceback.print_stack()
raise InvalidParamsFormatError()
def _unsimplify_params(params_simple):
params = {}
for (name, value) in params_simple.items():
if PARAM_DATA_TYPE_SEPARATOR in name:
(type_id, name) = name.split(PARAM_DATA_TYPE_SEPARATOR)
if type_id == PARAM_DATA_TYPE_NUMPY:
value = np.array(value)
params[name] = value
return params
| [
"logging.getLogger",
"json.loads",
"collections.namedtuple",
"traceback.print_stack",
"datetime.datetime.strptime",
"msgpack.packb",
"json.dumps",
"uuid.uuid4",
"msgpack.unpackb",
"datetime.datetime.now",
"numpy.array",
"singa_auto.utils.local_cache.LocalCache"
] | [((1142, 1169), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1159, 1169), False, 'import logging\n'), ((1365, 1420), 'collections.namedtuple', 'namedtuple', (['"""_ParamMeta"""', "('param_id', 'score', 'time')"], {}), "('_ParamMeta', ('param_id', 'score', 'time'))\n", (1375, 1420), False, 'from collections import namedtuple\n'), ((11688, 11735), 'msgpack.packb', 'msgpack.packb', (['params_simple'], {'use_bin_type': '(True)'}), '(params_simple, use_bin_type=True)\n', (11701, 11735), False, 'import msgpack\n'), ((11852, 11892), 'msgpack.unpackb', 'msgpack.unpackb', (['params_bytes'], {'raw': '(False)'}), '(params_bytes, raw=False)\n', (11867, 11892), False, 'import msgpack\n'), ((2452, 2474), 'singa_auto.utils.local_cache.LocalCache', 'LocalCache', (['cache_size'], {}), '(cache_size)\n', (2462, 2474), False, 'from singa_auto.utils.local_cache import LocalCache\n'), ((9432, 9452), 'json.dumps', 'json.dumps', (['metadata'], {}), '(metadata)\n', (9442, 9452), False, 'import json\n'), ((11432, 11491), 'datetime.datetime.strptime', 'datetime.strptime', (["jsonable['time']", '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(jsonable['time'], '%Y-%m-%d %H:%M:%S.%f')\n", (11449, 11491), False, 'from datetime import datetime\n'), ((5131, 5145), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5143, 5145), False, 'from datetime import datetime\n'), ((5169, 5181), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5179, 5181), False, 'import uuid\n'), ((9684, 9708), 'json.loads', 'json.loads', (['metadata_str'], {}), '(metadata_str)\n', (9694, 9708), False, 'import json\n'), ((12710, 12733), 'traceback.print_stack', 'traceback.print_stack', ([], {}), '()\n', (12731, 12733), False, 'import traceback\n'), ((13068, 13083), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (13076, 13083), True, 'import numpy as np\n')] |
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import annotations
import itertools
from abc import ABC, abstractmethod
from functools import singledispatch
from typing import Any, Dict, List, Optional, Union
import numpy as np
from braket.default_simulator.observables import (
Hadamard,
Hermitian,
Identity,
PauliX,
PauliY,
PauliZ,
TensorProduct,
)
from braket.default_simulator.operation import Observable
from braket.default_simulator.operation_helpers import ir_matrix_to_ndarray
from braket.default_simulator.simulation import StateVectorSimulation
from braket.ir import jaqcd
def from_braket_result_type(result_type) -> ResultType:
""" Creates a `ResultType` corresponding to the given Braket instruction.
Args:
result_type: Result type for a circuit specified using the `braket.ir.jacqd` format.
Returns:
ResultType: Instance of specific `ResultType` corresponding to the type of result_type
Raises:
ValueError: If no concrete `ResultType` class has been registered
for the Braket instruction type
"""
return _from_braket_result_type(result_type)
@singledispatch
def _from_braket_result_type(result_type):
raise ValueError(f"Result type {result_type} not recognized")
class ResultType(ABC):
"""
An abstract class that when implemented defines a calculation on a
quantum state simulation.
"""
@abstractmethod
def calculate(self, simulation: StateVectorSimulation) -> Any:
# Return type of any due to lack of sum type support in Python
""" Calculate a result from the given quantum state vector simulation.
Args:
simulation (StateVectorSimulation): The quantum state vector simulation
to use in the calculation
Returns:
Any: The result of the calculation
"""
class ObservableResultType(ResultType, ABC):
"""
Holds an observable to perform a calculation in conjunction with a state.
"""
def __init__(self, observable: Observable):
"""
Args:
observable (Observable): The observable for which the desired result is calculated
"""
self._observable = observable
@property
def observable(self):
""" Observable: The observable for which the desired result is calculated."""
return self._observable
def calculate(self, simulation: StateVectorSimulation) -> Union[float, List[float]]:
state = simulation.state_with_observables
qubit_count = simulation.qubit_count
eigenvalues = self._observable.eigenvalues
targets = self._observable.measured_qubits
if targets:
return ObservableResultType._calculate_for_targets(
state, qubit_count, targets, eigenvalues, self._calculate_from_prob_distribution,
)
else:
return [
ObservableResultType._calculate_for_targets(
state, qubit_count, [i], eigenvalues, self._calculate_from_prob_distribution,
)
for i in range(qubit_count)
]
@staticmethod
@abstractmethod
def _calculate_from_prob_distribution(
probabilities: np.ndarray, eigenvalues: np.ndarray
) -> float:
""" Calculates a result from the probabilities of eigenvalues.
Args:
probabilities (np.ndarray): The probability of measuring each eigenstate
eigenvalues (np.ndarray): The eigenvalue corresponding to each eigenstate
Returns:
float: The result of the calculation
"""
@staticmethod
def _calculate_for_targets(
state, qubit_count, targets, eigenvalues, calculate_from_prob_distribution
):
prob = _marginal_probability(state, qubit_count, targets)
return calculate_from_prob_distribution(prob, eigenvalues)
class StateVector(ResultType):
"""
Simply returns the given state vector.
"""
def calculate(self, simulation: StateVectorSimulation) -> np.ndarray:
""" Return the given state vector of the simulation.
Args:
simulation (StateVectorSimulation): The simulation whose state vector will be returned
Returns:
np.ndarray: The state vector (before observables) of the simulation
"""
return simulation.state_vector
@_from_braket_result_type.register
def _(statevector: jaqcd.StateVector):
return StateVector()
class Amplitude(ResultType):
"""
Extracts the amplitudes of the desired computational basis states.
"""
def __init__(self, states: List[str]):
"""
Args:
states (List[str]): The computational basis states whose amplitudes are desired
"""
self._states = states
def calculate(self, simulation: StateVectorSimulation) -> Dict[str, complex]:
""" Return the amplitudes of the desired computational basis states in the state
of the given simulation.
Args:
simulation (StateVectorSimulation): The simulation whose state vector amplitudes
will be returned
Returns:
Dict[str, complex]: A dict keyed on computational basis states as bitstrings,
with corresponding values the amplitudes
"""
state = simulation.state_vector
return {basis_state: state[int(basis_state, 2)] for basis_state in self._states}
@_from_braket_result_type.register
def _(amplitude: jaqcd.Amplitude):
return Amplitude(amplitude.states)
class Probability(ResultType):
"""
Computes the marginal probabilities of computational basis states on the desired qubits.
"""
def __init__(self, targets: Optional[List[int]] = None):
"""
Args:
targets (Optional[List[int]]): The qubit indices on which probabilities are desired.
If no targets are specified, the probabilities are calculated on the entire state.
Default: `None`
"""
self._targets = targets
def calculate(self, simulation: StateVectorSimulation) -> np.ndarray:
""" Return the marginal probabilities of computational basis states on the target qubits.
Probabilities are marginalized over all non-target qubits.
Args:
simulation (StateVectorSimulation): The simulation from which probabilities
are calculated
Returns:
np.ndarray: An array of probabilities of length equal to 2^(number of target qubits),
indexed by the decimal encoding of the computational basis state on the target qubits
"""
return _marginal_probability(simulation.state_vector, simulation.qubit_count, self._targets)
@_from_braket_result_type.register
def _(probability: jaqcd.Probability):
return Probability(probability.targets)
class Expectation(ObservableResultType):
"""
Holds an observable :math:`O` to calculate its expected value.
"""
def __init__(self, observable: Observable):
"""
Args:
observable (Observable): The observable for which expected value is calculated
"""
super().__init__(observable)
@staticmethod
def _calculate_from_prob_distribution(
probabilities: np.ndarray, eigenvalues: np.ndarray
) -> float:
return (probabilities @ eigenvalues).real
@_from_braket_result_type.register
def _(expectation: jaqcd.Expectation):
return Expectation(_from_braket_observable(expectation.observable, expectation.targets))
class Variance(ObservableResultType):
"""
Holds an observable :math:`O` to calculate its variance.
"""
def __init__(self, observable: Observable):
"""
Args:
observable (Observable): The observable for which variance is calculated
"""
super().__init__(observable)
@staticmethod
def _calculate_from_prob_distribution(
probabilities: np.ndarray, eigenvalues: np.ndarray
) -> float:
return probabilities @ (eigenvalues.real ** 2) - (probabilities @ eigenvalues).real ** 2
@_from_braket_result_type.register
def _(variance: jaqcd.Variance):
return Variance(_from_braket_observable(variance.observable, variance.targets))
def _from_braket_observable(
ir_observable: List[Union[str, List[List[List[float]]]]], ir_targets: Optional[List[int]] = None
) -> Observable:
targets = list(ir_targets) if ir_targets else None
if len(ir_observable) == 1:
return _from_single_observable(ir_observable[0], targets)
else:
observable = TensorProduct(
[_from_single_observable(factor, targets, is_factor=True) for factor in ir_observable]
)
if targets:
raise ValueError(
f"Found {len(targets)} more target qubits than the tensor product acts on"
)
return observable
def _from_single_observable(
observable: Union[str, List[List[List[float]]]],
targets: Optional[List[int]] = None,
# IR tensor product observables are decoupled from targets
is_factor: bool = False,
) -> Observable:
if observable == "i":
return Identity(_actual_targets(targets, 1, is_factor))
elif observable == "h":
return Hadamard(_actual_targets(targets, 1, is_factor))
elif observable == "x":
return PauliX(_actual_targets(targets, 1, is_factor))
elif observable == "y":
return PauliY(_actual_targets(targets, 1, is_factor))
elif observable == "z":
return PauliZ(_actual_targets(targets, 1, is_factor))
else:
try:
matrix = ir_matrix_to_ndarray(observable)
if is_factor:
num_qubits = int(np.log2(len(matrix)))
return Hermitian(matrix, _actual_targets(targets, num_qubits, True))
else:
return Hermitian(matrix, targets)
except Exception:
raise ValueError(f"Invalid observable specified: {observable}")
def _actual_targets(targets: List[int], num_qubits: int, is_factor: bool):
if not is_factor:
return targets
try:
return [targets.pop(0) for _ in range(num_qubits)]
except Exception:
raise ValueError("Insufficient qubits for tensor product")
def _marginal_probability(
state: np.ndarray, qubit_count: int, targets: List[int] = None
) -> np.ndarray:
""" Return the marginal probability of the computational basis states.
The marginal probability is obtained by summing the probabilities on
the unused qubits. If no targets are specified, then the probability
of all basis states is returned.
"""
probabilities = np.abs(state) ** 2
if targets is None or targets == list(range(qubit_count)):
# All qubits targeted, no need to marginalize
return probabilities
targets = np.hstack(targets)
# Find unused qubits and sum over them
unused_qubits = list(set(range(qubit_count)) - set(targets))
as_tensor = probabilities.reshape([2] * qubit_count)
marginal = np.apply_over_axes(np.sum, as_tensor, unused_qubits).flatten()
# Reorder qubits to match targets
basis_states = np.array(list(itertools.product([0, 1], repeat=len(targets))))
perm = np.ravel_multi_index(
basis_states[:, np.argsort(np.argsort(targets))].T, [2] * len(targets)
)
return marginal[perm]
| [
"numpy.abs",
"braket.default_simulator.operation_helpers.ir_matrix_to_ndarray",
"numpy.hstack",
"braket.default_simulator.observables.Hermitian",
"numpy.argsort",
"numpy.apply_over_axes"
] | [((11462, 11480), 'numpy.hstack', 'np.hstack', (['targets'], {}), '(targets)\n', (11471, 11480), True, 'import numpy as np\n'), ((11281, 11294), 'numpy.abs', 'np.abs', (['state'], {}), '(state)\n', (11287, 11294), True, 'import numpy as np\n'), ((11662, 11714), 'numpy.apply_over_axes', 'np.apply_over_axes', (['np.sum', 'as_tensor', 'unused_qubits'], {}), '(np.sum, as_tensor, unused_qubits)\n', (11680, 11714), True, 'import numpy as np\n'), ((11914, 11933), 'numpy.argsort', 'np.argsort', (['targets'], {}), '(targets)\n', (11924, 11933), True, 'import numpy as np\n'), ((10232, 10264), 'braket.default_simulator.operation_helpers.ir_matrix_to_ndarray', 'ir_matrix_to_ndarray', (['observable'], {}), '(observable)\n', (10252, 10264), False, 'from braket.default_simulator.operation_helpers import ir_matrix_to_ndarray\n'), ((10472, 10498), 'braket.default_simulator.observables.Hermitian', 'Hermitian', (['matrix', 'targets'], {}), '(matrix, targets)\n', (10481, 10498), False, 'from braket.default_simulator.observables import Hadamard, Hermitian, Identity, PauliX, PauliY, PauliZ, TensorProduct\n')] |
from .constraint import Constraint
from .constraint import ConstraintType, DiscretizationType
import numpy as np
class CanonicalConicConstraint(Constraint):
"""Base class for all canonical conic constraints.
A canonical conic constraint is one with the following form
.. math::
(a[i] + da[i]) u + (b[i] + db[i]) x + (c[i] + dc[i]) \leq 0, \\\\
[da[i, j], db[i, j], dc[i, j]]^\top = P[i, j] u, \|u\|_2 \leq 1,
where P[i, j] is a 3x3 matrix. Notice that by setting P[i, j] to
the zero matrix,
Constraints of this form can be translated to conic-quadratic
constraints. This transformation can be found in [1]. The
resulting conic-quadratic constraint is given below
.. math::
a[i, j]u + b[i, j]x + c[i, j] + \|P[i, j]^T [u, x, 1]^T \|_2 \leq 0,
where i is the stage index, and j is the constraint index.
Refs:
----
[1] <NAME>., & <NAME>. (2001). Lectures on modern convex
optimization: analysis, algorithms, and engineering applications
(Vol. 2). Siam.
"""
def __init__(self):
self.constraint_type = ConstraintType.CanonicalConic
self.discretization_type = DiscretizationType.Collocation
self.n_extra_vars = 0
self.dof = -1
self._format_string = ""
def compute_constraint_params(self, path, gridpoints):
raise NotImplementedError
class RobustCanonicalLinearConstraint(CanonicalConicConstraint):
"""The simple canonical conic constraint.
This constraint can be seen as a more robust version of a
CanonicalLinear constraint. In particular, the perturbations term,
[\Delta a[i, j], \Delta b[i, j], \Delta c[i, j]] is assumed to lie
in a centered ellipsoid:
.. math::
[\Delta a[i, j], \Delta b[i, j], \Delta c[i, j]]^\\top = diag(ru, rx, rc) \mathbf e,
where \|\mathbf e\|_2 \leq 1.
Parameters
----------
cnst: :class:`~toppra.constraint.CanonicalLinearConstraint`
The base constraint to robustify.
ellipsoid_axes_lengths: (3,)array
Lengths of the axes of the perturbation ellipsoid. Must all be
non-negative.
discretization_scheme: :class:`~.constraint.DiscretizationType`
Constraint discretization scheme to use.
"""
def __init__(self, cnst, ellipsoid_axes_lengths, discretization_scheme=DiscretizationType.Collocation):
super(RobustCanonicalLinearConstraint, self).__init__()
self.dof = cnst.get_dof()
assert cnst.get_constraint_type() == ConstraintType.CanonicalLinear
self.set_discretization_type(discretization_scheme)
if np.any(np.r_[ellipsoid_axes_lengths] < 0):
raise ValueError("Perturbation must be non-negative. Input {:}".format(ellipsoid_axes_lengths))
self.base_constraint = cnst
self.ellipsoid_axes_lengths = ellipsoid_axes_lengths
self._format_string += " Robust constraint generated from a canonical linear constraint\n"
def compute_constraint_params(self, path, gridpoints):
self.base_constraint.set_discretization_type(self.discretization_type)
a_, b_, c_, F_, g_, u_, _ = self.base_constraint.compute_constraint_params(path, gridpoints)
N = len(gridpoints) - 1
if self.base_constraint.identical:
d = F_.shape[0] # number of rows
else:
d = F_.shape[1]
a = np.zeros((N + 1, d + 2))
b = np.zeros((N + 1, d + 2))
c = np.zeros((N + 1, d + 2))
if self.base_constraint.identical:
for i in range(len(gridpoints)):
a[i, :d] = F_.dot(a_[i])
b[i, :d] = F_.dot(b_[i])
c[i, :d] = F_.dot(c_[i]) - g_
a[i, d:] = [1, -1]
c[i, d:] = [- u_[i, 1], u_[i, 0]]
else:
for i in range(len(gridpoints)):
a[i, :d] = F_[i].dot(a_[i])
b[i, :d] = F_[i].dot(b_[i])
c[i, :d] = F_[i].dot(c_[i]) - g_[i]
a[i, d:] = [1, -1]
c[i, d:] = [- u_[i, 1], u_[i, 0]]
P = np.zeros((N + 1, d + 2, 3, 3))
diag_ = np.diag(self.ellipsoid_axes_lengths)
P[:] = diag_
return a, b, c, P
| [
"numpy.zeros",
"numpy.any",
"numpy.diag"
] | [((2633, 2674), 'numpy.any', 'np.any', (['(np.r_[ellipsoid_axes_lengths] < 0)'], {}), '(np.r_[ellipsoid_axes_lengths] < 0)\n', (2639, 2674), True, 'import numpy as np\n'), ((3399, 3423), 'numpy.zeros', 'np.zeros', (['(N + 1, d + 2)'], {}), '((N + 1, d + 2))\n', (3407, 3423), True, 'import numpy as np\n'), ((3436, 3460), 'numpy.zeros', 'np.zeros', (['(N + 1, d + 2)'], {}), '((N + 1, d + 2))\n', (3444, 3460), True, 'import numpy as np\n'), ((3473, 3497), 'numpy.zeros', 'np.zeros', (['(N + 1, d + 2)'], {}), '((N + 1, d + 2))\n', (3481, 3497), True, 'import numpy as np\n'), ((4097, 4127), 'numpy.zeros', 'np.zeros', (['(N + 1, d + 2, 3, 3)'], {}), '((N + 1, d + 2, 3, 3))\n', (4105, 4127), True, 'import numpy as np\n'), ((4144, 4180), 'numpy.diag', 'np.diag', (['self.ellipsoid_axes_lengths'], {}), '(self.ellipsoid_axes_lengths)\n', (4151, 4180), True, 'import numpy as np\n')] |
import logging
import numpy as np
from sklearn import linear_model, preprocessing, pipeline
import matplotlib
matplotlib.use('Agg')
from pylab import plt
from matplotlib import pyplot
from competition import AdversarialCompetition
from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel
from gradient_descent import GradientDescent
pyplot.ioff()
logging.basicConfig(format="[%(filename)s:%(lineno)s - %(funcName)15s() %(asctime)-15s ] %(message)s", level=logging.DEBUG)
np.random.seed(0)
size_batch = 100
mu_param = 3
sigma_param = 1.0
sample_size = 1000
true_data1 = GenerativeNormalModel(mu_param,sigma_param).random(sample_size).reshape(-1)
true_data2 = GenerativeNormalModel(-mu_param,sigma_param).random(sample_size).reshape(-1)
true_data = np.concatenate((true_data1,true_data2))
competition1 = AdversarialCompetition(
size_batch=size_batch,
true_model=GenerativeNormalModel(mu_param, sigma_param),
discriminative=pipeline.make_pipeline(
preprocessing.PolynomialFeatures(4),
linear_model.LogisticRegression()),
generative=GenerativeNormalModel(
2.5,2.1,updates=["mu", "sigma"]),
gradient_descent=GradientDescent(
0.03, inertia=0.0, annealing=100),
x_dataset = true_data1
)
competition2 = AdversarialCompetition(
size_batch=size_batch,
true_model=GenerativeNormalModel(-mu_param, sigma_param),
discriminative=pipeline.make_pipeline(
preprocessing.PolynomialFeatures(4),
linear_model.LogisticRegression()),
generative=GenerativeNormalModel(
-2.5,2.1,updates=["mu", "sigma"]),
gradient_descent=GradientDescent(
0.03, inertia=0.0, annealing=100),
x_dataset = true_data2
)
competition = AdversarialCompetition(
size_batch=size_batch,
true_model=GenerativeNormalQuasiMixtureModel(mu_param, sigma_param),
discriminative=pipeline.make_pipeline(
preprocessing.PolynomialFeatures(4),
linear_model.LogisticRegression()),
generative=GenerativeNormalQuasiMixtureModel(
2.5,2.1,updates=["mu", "sigma"]),
gradient_descent=GradientDescent(
0.03, inertia=0.0, annealing=100),
x_dataset = true_data
)
print(competition)
for i in range(1001):
if i % 50 == 0:
plt.figure()
competition.plot()
pyplot.savefig('Pooling.png')
pyplot.close()
plt.figure()
competition1.plot()
competition2.plot()
pyplot.savefig('Separating.png')
pyplot.close()
pass
competition.iteration()
competition1.iteration()
competition2.iteration()
print("final model pooling %s" % competition.generatives[-1])
print("final model separating %s" % competition1.generatives[-1], competition2.generatives[-1])
#separated = GenerativeNormalMixtureModel([competition1.generatives[-1].params["mu"], competition2.generatives[-1].params["mu"]], [competition1.generatives[-1].params["sigma"], competition2.generatives[-1].params["sigma"]])
separated1 = GenerativeNormalModel(competition1.generatives[-1].params["mu"], competition1.generatives[-1].params["sigma"])
separated2 = GenerativeNormalModel(competition2.generatives[-1].params["mu"], competition2.generatives[-1].params["sigma"])
pooled = GenerativeNormalQuasiMixtureModel(competition.generatives[-1].params["mu"], competition.generatives[-1].params["sigma"])
#true_mixture_model = GenerativeNormalMixtureModel([mu_param, -mu_param], [sigma_param, sigma_param])
true_mixture_model2 = GenerativeNormalQuasiMixtureModel(mu_param,sigma_param)
plt.figure()
xplot = np.arange(-10, 10, 0.1).reshape((-1, 1))
#plt.plot(xplot, true_mixture_model.predict_proba(xplot), c="black")
plt.plot(xplot, true_mixture_model2.predict_proba(xplot), c="black")
plt.plot(xplot, pooled.predict_proba(xplot), c="red")
plt.plot(xplot, ( separated1.predict_proba(xplot) + separated2.predict_proba(xplot) ) / 2, c="blue")
pyplot.savefig('Combined.png')
pyplot.close()
'''
plt.figure()
competition.plot_params()
#plt.show()
pyplot.savefig('file2.png')
pyplot.close()
plt.figure()
competition.plot_auc()
#plt.show()
pyplot.savefig('file3.png')
pyplot.close()
'''
| [
"logging.basicConfig",
"models.GenerativeNormalModel",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"gradient_descent.GradientDescent",
"models.GenerativeNormalQuasiMixtureModel",
"matplotlib.pyplot.ioff",
"sklearn.linear_model.LogisticRegression",
"ma... | [((116, 137), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (130, 137), False, 'import matplotlib\n'), ((398, 411), 'matplotlib.pyplot.ioff', 'pyplot.ioff', ([], {}), '()\n', (409, 411), False, 'from matplotlib import pyplot\n'), ((415, 547), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(filename)s:%(lineno)s - %(funcName)15s() %(asctime)-15s ] %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '[%(filename)s:%(lineno)s - %(funcName)15s() %(asctime)-15s ] %(message)s',\n level=logging.DEBUG)\n", (434, 547), False, 'import logging\n'), ((542, 559), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (556, 559), True, 'import numpy as np\n'), ((825, 865), 'numpy.concatenate', 'np.concatenate', (['(true_data1, true_data2)'], {}), '((true_data1, true_data2))\n', (839, 865), True, 'import numpy as np\n'), ((3116, 3230), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (["competition1.generatives[-1].params['mu']", "competition1.generatives[-1].params['sigma']"], {}), "(competition1.generatives[-1].params['mu'],\n competition1.generatives[-1].params['sigma'])\n", (3137, 3230), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((3241, 3355), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (["competition2.generatives[-1].params['mu']", "competition2.generatives[-1].params['sigma']"], {}), "(competition2.generatives[-1].params['mu'],\n competition2.generatives[-1].params['sigma'])\n", (3262, 3355), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((3362, 3486), 'models.GenerativeNormalQuasiMixtureModel', 'GenerativeNormalQuasiMixtureModel', (["competition.generatives[-1].params['mu']", "competition.generatives[-1].params['sigma']"], {}), "(competition.generatives[-1].params['mu'],\n competition.generatives[-1].params['sigma'])\n", (3395, 3486), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((3609, 3665), 'models.GenerativeNormalQuasiMixtureModel', 'GenerativeNormalQuasiMixtureModel', (['mu_param', 'sigma_param'], {}), '(mu_param, sigma_param)\n', (3642, 3665), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((3668, 3680), 'pylab.plt.figure', 'plt.figure', ([], {}), '()\n', (3678, 3680), False, 'from pylab import plt\n'), ((4029, 4059), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""Combined.png"""'], {}), "('Combined.png')\n", (4043, 4059), False, 'from matplotlib import pyplot\n'), ((4061, 4075), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (4073, 4075), False, 'from matplotlib import pyplot\n'), ((951, 995), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (['mu_param', 'sigma_param'], {}), '(mu_param, sigma_param)\n', (972, 995), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((1148, 1204), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (['(2.5)', '(2.1)'], {'updates': "['mu', 'sigma']"}), "(2.5, 2.1, updates=['mu', 'sigma'])\n", (1169, 1204), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((1236, 1285), 'gradient_descent.GradientDescent', 'GradientDescent', (['(0.03)'], {'inertia': '(0.0)', 'annealing': '(100)'}), '(0.03, inertia=0.0, annealing=100)\n', (1251, 1285), False, 'from gradient_descent import GradientDescent\n'), ((1414, 1459), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (['(-mu_param)', 'sigma_param'], {}), '(-mu_param, sigma_param)\n', (1435, 1459), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((1612, 1669), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (['(-2.5)', '(2.1)'], {'updates': "['mu', 'sigma']"}), "(-2.5, 2.1, updates=['mu', 'sigma'])\n", (1633, 1669), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((1701, 1750), 'gradient_descent.GradientDescent', 'GradientDescent', (['(0.03)'], {'inertia': '(0.0)', 'annealing': '(100)'}), '(0.03, inertia=0.0, annealing=100)\n', (1716, 1750), False, 'from gradient_descent import GradientDescent\n'), ((1880, 1936), 'models.GenerativeNormalQuasiMixtureModel', 'GenerativeNormalQuasiMixtureModel', (['mu_param', 'sigma_param'], {}), '(mu_param, sigma_param)\n', (1913, 1936), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((2089, 2157), 'models.GenerativeNormalQuasiMixtureModel', 'GenerativeNormalQuasiMixtureModel', (['(2.5)', '(2.1)'], {'updates': "['mu', 'sigma']"}), "(2.5, 2.1, updates=['mu', 'sigma'])\n", (2122, 2157), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((2189, 2238), 'gradient_descent.GradientDescent', 'GradientDescent', (['(0.03)'], {'inertia': '(0.0)', 'annealing': '(100)'}), '(0.03, inertia=0.0, annealing=100)\n', (2204, 2238), False, 'from gradient_descent import GradientDescent\n'), ((2357, 2369), 'pylab.plt.figure', 'plt.figure', ([], {}), '()\n', (2367, 2369), False, 'from pylab import plt\n'), ((2410, 2439), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""Pooling.png"""'], {}), "('Pooling.png')\n", (2424, 2439), False, 'from matplotlib import pyplot\n'), ((2449, 2463), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (2461, 2463), False, 'from matplotlib import pyplot\n'), ((2473, 2485), 'pylab.plt.figure', 'plt.figure', ([], {}), '()\n', (2483, 2485), False, 'from pylab import plt\n'), ((2553, 2585), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""Separating.png"""'], {}), "('Separating.png')\n", (2567, 2585), False, 'from matplotlib import pyplot\n'), ((2595, 2609), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (2607, 2609), False, 'from matplotlib import pyplot\n'), ((3690, 3713), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.1)'], {}), '(-10, 10, 0.1)\n', (3699, 3713), True, 'import numpy as np\n'), ((1050, 1085), 'sklearn.preprocessing.PolynomialFeatures', 'preprocessing.PolynomialFeatures', (['(4)'], {}), '(4)\n', (1082, 1085), False, 'from sklearn import linear_model, preprocessing, pipeline\n'), ((1096, 1129), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (1127, 1129), False, 'from sklearn import linear_model, preprocessing, pipeline\n'), ((1514, 1549), 'sklearn.preprocessing.PolynomialFeatures', 'preprocessing.PolynomialFeatures', (['(4)'], {}), '(4)\n', (1546, 1549), False, 'from sklearn import linear_model, preprocessing, pipeline\n'), ((1560, 1593), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (1591, 1593), False, 'from sklearn import linear_model, preprocessing, pipeline\n'), ((1991, 2026), 'sklearn.preprocessing.PolynomialFeatures', 'preprocessing.PolynomialFeatures', (['(4)'], {}), '(4)\n', (2023, 2026), False, 'from sklearn import linear_model, preprocessing, pipeline\n'), ((2037, 2070), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (2068, 2070), False, 'from sklearn import linear_model, preprocessing, pipeline\n'), ((645, 689), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (['mu_param', 'sigma_param'], {}), '(mu_param, sigma_param)\n', (666, 689), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n'), ((735, 780), 'models.GenerativeNormalModel', 'GenerativeNormalModel', (['(-mu_param)', 'sigma_param'], {}), '(-mu_param, sigma_param)\n', (756, 780), False, 'from models import GenerativeNormalModel, GenerativeNormalMixtureModel, GenerativeNormalQuasiMixtureModel\n')] |
# coding=utf-8
# Copyright 2019 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfgan.losses.tuple_losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import tensorflow_gan as tfgan
from tensorflow_gan.python.losses.tuple_losses import __all__ as tuple_all
from tensorflow_gan.python.losses.tuple_losses import args_to_gan_model
class ArgsToGanModelTest(tf.test.TestCase):
def testargs_to_gan_model(self):
"""Test `args_to_gan_model`."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg3'])
def args_loss(arg1, arg2, arg3=3, arg4=4):
return arg1 + arg2 + arg3 + arg4
gan_model_loss = args_to_gan_model(args_loss)
# Value is correct.
self.assertEqual(1 + 2 + 5 + 6,
gan_model_loss(tuple_type(1, 2), arg2=5, arg4=6))
# Uses tuple argument with defaults.
self.assertEqual(1 + 5 + 3 + 7,
gan_model_loss(tuple_type(1, None), arg2=5, arg4=7))
# Uses non-tuple argument with defaults.
self.assertEqual(1 + 5 + 2 + 4, gan_model_loss(tuple_type(1, 2), arg2=5))
# Requires non-tuple, non-default arguments.
with self.assertRaisesRegexp(ValueError, '`arg2` must be supplied'):
gan_model_loss(tuple_type(1, 2))
# Can't pass tuple argument outside tuple.
with self.assertRaisesRegexp(ValueError,
'present in both the tuple and keyword args'):
gan_model_loss(tuple_type(1, 2), arg2=1, arg3=5)
def testargs_to_gan_model_name(self):
"""Test that `args_to_gan_model` produces correctly named functions."""
def loss_fn(x):
return x
new_loss_fn = args_to_gan_model(loss_fn)
self.assertEqual('loss_fn', new_loss_fn.__name__)
self.assertTrue('The gan_model version of' in new_loss_fn.__docstring__)
def test_tuple_respects_optional_args(self):
"""Test that optional args can be changed with tuple losses."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg2'])
def args_loss(arg1, arg2, arg3=3):
return arg1 + 2 * arg2 + 3 * arg3
loss_fn = args_to_gan_model(args_loss)
loss = loss_fn(tuple_type(arg1=-1, arg2=2), arg3=4)
# If `arg3` were not set properly, this value would be different.
self.assertEqual(-1 + 2 * 2 + 3 * 4, loss)
def test_works_with_child_classes(self):
"""`args_to_gan_model` should work with classes derived from namedtuple."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg2'])
class InheritedType(tuple_type):
pass
def args_loss(arg1, arg2, arg3=3):
return arg1 + 2 * arg2 + 3 * arg3
loss_fn = args_to_gan_model(args_loss)
loss = loss_fn(InheritedType(arg1=-1, arg2=2), arg3=4)
# If `arg3` were not set properly, this value would be different.
self.assertEqual(-1 + 2 * 2 + 3 * 4, loss)
class ConsistentLossesTest(tf.test.TestCase):
pass
def _tuple_from_dict(args_dict):
return collections.namedtuple('Tuple', args_dict.keys())(**args_dict)
def add_loss_consistency_test(test_class, loss_name_str, loss_args):
tuple_loss = getattr(tfgan.losses, loss_name_str)
arg_loss = getattr(tfgan.losses.wargs, loss_name_str)
def consistency_test(self):
self.assertEqual(arg_loss.__name__, tuple_loss.__name__)
with self.cached_session() as sess:
self.assertEqual(
sess.run(arg_loss(**loss_args)),
sess.run(tuple_loss(_tuple_from_dict(loss_args))))
test_name = 'test_loss_consistency_%s' % loss_name_str
setattr(test_class, test_name, consistency_test)
# A list of consistency tests which need to be manually written.
manual_tests = [
'acgan_discriminator_loss', 'acgan_generator_loss',
'combine_adversarial_loss', 'mutual_information_penalty',
'wasserstein_gradient_penalty', 'cycle_consistency_loss',
'stargan_generator_loss_wrapper', 'stargan_discriminator_loss_wrapper',
'stargan_gradient_penalty_wrapper'
]
discriminator_keyword_args = {
'discriminator_real_outputs':
np.array([[3.4, 2.3, -2.3], [6.3, -2.1, 0.2]]),
'discriminator_gen_outputs':
np.array([[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]]),
}
generator_keyword_args = {
'discriminator_gen_outputs':
np.array([[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]]),
}
class CycleConsistencyLossTest(tf.test.TestCase):
def setUp(self):
super(CycleConsistencyLossTest, self).setUp()
def _partial_model(generator_inputs_np):
model = tfgan.GANModel(*[None] * 11)
return model._replace(
generator_inputs=tf.constant(generator_inputs_np, dtype=tf.float32))
self._model_x2y = _partial_model([1, 2])
self._model_y2x = _partial_model([5, 6])
def test_model_type(self):
"""Test the input model type for `cycle_consistency_loss`."""
with self.assertRaises(ValueError):
tfgan.losses.cycle_consistency_loss(self._model_x2y)
def test_correct_loss(self):
"""Test the output of `cycle_consistency_loss`."""
loss = tfgan.losses.cycle_consistency_loss(
tfgan.CycleGANModel(
model_x2y=self._model_x2y,
model_y2x=self._model_y2x,
reconstructed_x=tf.constant([9, 8], dtype=tf.float32),
reconstructed_y=tf.constant([7, 2], dtype=tf.float32)))
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
self.assertNear(5.0, sess.run(loss), 1e-5)
class StarGANLossWrapperTest(tf.test.TestCase):
def setUp(self):
super(StarGANLossWrapperTest, self).setUp()
self.input_data = tf.ones([1, 2, 2, 3])
self.input_data_domain_label = tf.constant([[0, 1]])
self.generated_data = tf.ones([1, 2, 2, 3])
self.discriminator_input_data_source_predication = tf.ones([1])
self.discriminator_generated_data_source_predication = tf.ones([1])
def _discriminator_fn(inputs, num_domains):
"""Differentiable dummy discriminator for StarGAN."""
hidden = tf.compat.v1.layers.flatten(inputs)
output_src = tf.reduce_mean(input_tensor=hidden, axis=1)
output_cls = tf.compat.v1.layers.dense(hidden, num_domains)
return output_src, output_cls
with tf.compat.v1.variable_scope('discriminator') as dis_scope:
pass
self.model = tfgan.StarGANModel(
input_data=self.input_data,
input_data_domain_label=self.input_data_domain_label,
generated_data=self.generated_data,
generated_data_domain_target=None,
reconstructed_data=None,
discriminator_input_data_source_predication=self
.discriminator_input_data_source_predication,
discriminator_generated_data_source_predication=self
.discriminator_generated_data_source_predication,
discriminator_input_data_domain_predication=None,
discriminator_generated_data_domain_predication=None,
generator_variables=None,
generator_scope=None,
generator_fn=None,
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=_discriminator_fn)
self.discriminator_fn = _discriminator_fn
self.discriminator_scope = dis_scope
def test_stargan_generator_loss_wrapper(self):
"""Test StarGAN generator loss wrapper."""
loss_fn = tfgan.losses.wargs.wasserstein_generator_loss
wrapped_loss_fn = tfgan.losses.stargan_generator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
def test_stargan_discriminator_loss_wrapper(self):
"""Test StarGAN discriminator loss wrapper."""
loss_fn = tfgan.losses.wargs.wasserstein_discriminator_loss
wrapped_loss_fn = tfgan.losses.stargan_discriminator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication,
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
def test_stargan_gradient_penalty_wrapper(self):
"""Test StaGAN gradient penalty wrapper.
Notes:
The random interpolates are handled by given setting the reconstruction to
be the same as the input.
"""
if tf.executing_eagerly():
# Can't use `tf.gradient` when executing eagerly
return
loss_fn = tfgan.losses.wargs.wasserstein_gradient_penalty
tfgan.losses.stargan_gradient_penalty_wrapper(loss_fn)
wrapped_loss_fn = tfgan.losses.stargan_gradient_penalty_wrapper(loss_fn)
loss_result_tensor = loss_fn(
real_data=self.input_data,
generated_data=self.generated_data,
generator_inputs=self.input_data_domain_label.shape.as_list()[-1],
discriminator_fn=self.discriminator_fn,
discriminator_scope=self.discriminator_scope)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
if __name__ == '__main__':
for loss_name in tuple_all:
if loss_name in manual_tests:
continue
if 'generator' in loss_name:
keyword_args = generator_keyword_args
else:
keyword_args = discriminator_keyword_args
add_loss_consistency_test(ConsistentLossesTest, loss_name, keyword_args)
tf.test.main()
| [
"collections.namedtuple",
"tensorflow.compat.v1.variable_scope",
"tensorflow.ones",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.executing_eagerly",
"tensorflow.compat.v1.layers.dense",
"tensorflow_gan.losses.stargan_gradient_penalty_wrapper",
"tensorflow_gan.StarGANModel",
"tens... | [((4655, 4701), 'numpy.array', 'np.array', (['[[3.4, 2.3, -2.3], [6.3, -2.1, 0.2]]'], {}), '([[3.4, 2.3, -2.3], [6.3, -2.1, 0.2]])\n', (4663, 4701), True, 'import numpy as np\n'), ((4744, 4791), 'numpy.array', 'np.array', (['[[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]]'], {}), '([[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]])\n', (4752, 4791), True, 'import numpy as np\n'), ((4863, 4910), 'numpy.array', 'np.array', (['[[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]]'], {}), '([[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]])\n', (4871, 4910), True, 'import numpy as np\n'), ((10663, 10677), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (10675, 10677), True, 'import tensorflow as tf\n'), ((1139, 1192), 'collections.namedtuple', 'collections.namedtuple', (['"""fake_type"""', "['arg1', 'arg3']"], {}), "('fake_type', ['arg1', 'arg3'])\n", (1161, 1192), False, 'import collections\n'), ((1302, 1330), 'tensorflow_gan.python.losses.tuple_losses.args_to_gan_model', 'args_to_gan_model', (['args_loss'], {}), '(args_loss)\n', (1319, 1330), False, 'from tensorflow_gan.python.losses.tuple_losses import args_to_gan_model\n'), ((2301, 2327), 'tensorflow_gan.python.losses.tuple_losses.args_to_gan_model', 'args_to_gan_model', (['loss_fn'], {}), '(loss_fn)\n', (2318, 2327), False, 'from tensorflow_gan.python.losses.tuple_losses import args_to_gan_model\n'), ((2592, 2645), 'collections.namedtuple', 'collections.namedtuple', (['"""fake_type"""', "['arg1', 'arg2']"], {}), "('fake_type', ['arg1', 'arg2'])\n", (2614, 2645), False, 'import collections\n'), ((2741, 2769), 'tensorflow_gan.python.losses.tuple_losses.args_to_gan_model', 'args_to_gan_model', (['args_loss'], {}), '(args_loss)\n', (2758, 2769), False, 'from tensorflow_gan.python.losses.tuple_losses import args_to_gan_model\n'), ((3085, 3138), 'collections.namedtuple', 'collections.namedtuple', (['"""fake_type"""', "['arg1', 'arg2']"], {}), "('fake_type', ['arg1', 'arg2'])\n", (3107, 3138), False, 'import collections\n'), ((3283, 3311), 'tensorflow_gan.python.losses.tuple_losses.args_to_gan_model', 'args_to_gan_model', (['args_loss'], {}), '(args_loss)\n', (3300, 3311), False, 'from tensorflow_gan.python.losses.tuple_losses import args_to_gan_model\n'), ((6199, 6220), 'tensorflow.ones', 'tf.ones', (['[1, 2, 2, 3]'], {}), '([1, 2, 2, 3])\n', (6206, 6220), True, 'import tensorflow as tf\n'), ((6256, 6277), 'tensorflow.constant', 'tf.constant', (['[[0, 1]]'], {}), '([[0, 1]])\n', (6267, 6277), True, 'import tensorflow as tf\n'), ((6304, 6325), 'tensorflow.ones', 'tf.ones', (['[1, 2, 2, 3]'], {}), '([1, 2, 2, 3])\n', (6311, 6325), True, 'import tensorflow as tf\n'), ((6381, 6393), 'tensorflow.ones', 'tf.ones', (['[1]'], {}), '([1])\n', (6388, 6393), True, 'import tensorflow as tf\n'), ((6453, 6465), 'tensorflow.ones', 'tf.ones', (['[1]'], {}), '([1])\n', (6460, 6465), True, 'import tensorflow as tf\n'), ((6889, 7596), 'tensorflow_gan.StarGANModel', 'tfgan.StarGANModel', ([], {'input_data': 'self.input_data', 'input_data_domain_label': 'self.input_data_domain_label', 'generated_data': 'self.generated_data', 'generated_data_domain_target': 'None', 'reconstructed_data': 'None', 'discriminator_input_data_source_predication': 'self.discriminator_input_data_source_predication', 'discriminator_generated_data_source_predication': 'self.discriminator_generated_data_source_predication', 'discriminator_input_data_domain_predication': 'None', 'discriminator_generated_data_domain_predication': 'None', 'generator_variables': 'None', 'generator_scope': 'None', 'generator_fn': 'None', 'discriminator_variables': 'None', 'discriminator_scope': 'dis_scope', 'discriminator_fn': '_discriminator_fn'}), '(input_data=self.input_data, input_data_domain_label=self\n .input_data_domain_label, generated_data=self.generated_data,\n generated_data_domain_target=None, reconstructed_data=None,\n discriminator_input_data_source_predication=self.\n discriminator_input_data_source_predication,\n discriminator_generated_data_source_predication=self.\n discriminator_generated_data_source_predication,\n discriminator_input_data_domain_predication=None,\n discriminator_generated_data_domain_predication=None,\n generator_variables=None, generator_scope=None, generator_fn=None,\n discriminator_variables=None, discriminator_scope=dis_scope,\n discriminator_fn=_discriminator_fn)\n', (6907, 7596), True, 'import tensorflow_gan as tfgan\n'), ((7956, 8008), 'tensorflow_gan.losses.stargan_generator_loss_wrapper', 'tfgan.losses.stargan_generator_loss_wrapper', (['loss_fn'], {}), '(loss_fn)\n', (7999, 8008), True, 'import tensorflow_gan as tfgan\n'), ((8633, 8689), 'tensorflow_gan.losses.stargan_discriminator_loss_wrapper', 'tfgan.losses.stargan_discriminator_loss_wrapper', (['loss_fn'], {}), '(loss_fn)\n', (8680, 8689), True, 'import tensorflow_gan as tfgan\n'), ((9423, 9445), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (9443, 9445), True, 'import tensorflow as tf\n'), ((9581, 9635), 'tensorflow_gan.losses.stargan_gradient_penalty_wrapper', 'tfgan.losses.stargan_gradient_penalty_wrapper', (['loss_fn'], {}), '(loss_fn)\n', (9626, 9635), True, 'import tensorflow_gan as tfgan\n'), ((9658, 9712), 'tensorflow_gan.losses.stargan_gradient_penalty_wrapper', 'tfgan.losses.stargan_gradient_penalty_wrapper', (['loss_fn'], {}), '(loss_fn)\n', (9703, 9712), True, 'import tensorflow_gan as tfgan\n'), ((5096, 5126), 'tensorflow_gan.GANModel', 'tfgan.GANModel', (['*([None] * 11)'], {}), '(*([None] * 11))\n', (5110, 5126), True, 'import tensorflow_gan as tfgan\n'), ((5466, 5518), 'tensorflow_gan.losses.cycle_consistency_loss', 'tfgan.losses.cycle_consistency_loss', (['self._model_x2y'], {}), '(self._model_x2y)\n', (5501, 5518), True, 'import tensorflow_gan as tfgan\n'), ((6590, 6625), 'tensorflow.compat.v1.layers.flatten', 'tf.compat.v1.layers.flatten', (['inputs'], {}), '(inputs)\n', (6617, 6625), True, 'import tensorflow as tf\n'), ((6645, 6688), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': 'hidden', 'axis': '(1)'}), '(input_tensor=hidden, axis=1)\n', (6659, 6688), True, 'import tensorflow as tf\n'), ((6708, 6754), 'tensorflow.compat.v1.layers.dense', 'tf.compat.v1.layers.dense', (['hidden', 'num_domains'], {}), '(hidden, num_domains)\n', (6733, 6754), True, 'import tensorflow as tf\n'), ((6801, 6845), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""discriminator"""'], {}), "('discriminator')\n", (6828, 6845), True, 'import tensorflow as tf\n'), ((5963, 6006), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (6004, 6006), True, 'import tensorflow as tf\n'), ((8223, 8266), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (8264, 8266), True, 'import tensorflow as tf\n'), ((8966, 9009), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (9007, 9009), True, 'import tensorflow as tf\n'), ((10121, 10164), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (10162, 10164), True, 'import tensorflow as tf\n'), ((5181, 5231), 'tensorflow.constant', 'tf.constant', (['generator_inputs_np'], {'dtype': 'tf.float32'}), '(generator_inputs_np, dtype=tf.float32)\n', (5192, 5231), True, 'import tensorflow as tf\n'), ((5789, 5826), 'tensorflow.constant', 'tf.constant', (['[9, 8]'], {'dtype': 'tf.float32'}), '([9, 8], dtype=tf.float32)\n', (5800, 5826), True, 'import tensorflow as tf\n'), ((5856, 5893), 'tensorflow.constant', 'tf.constant', (['[7, 2]'], {'dtype': 'tf.float32'}), '([7, 2], dtype=tf.float32)\n', (5867, 5893), True, 'import tensorflow as tf\n')] |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
import os
import cv2
import random
from PIL import Image
from bbox.bbox_transform import clip_boxes
from math import floor
# TODO: This two functions should be merged with individual data loader
def get_image(roidb, config):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
assert os.path.exists(roi_rec['image']), '%s does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION)
if roidb[i]['flipped']:
im = im[:, ::-1, :]
new_rec = roi_rec.copy()
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
processed_ims.append(im_tensor)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['boxes'] = clip_boxes(np.round(roi_rec['boxes'].copy() * im_scale), im_info[:2])
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
def compute_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = float(right_line - left_line) * float(bottom_line - top_line)
#return intersect / float(sum_area - intersect)
return intersect / float(S_rec1)
def crop_image(img,n):
height, width, channel= img.shape[:]
grid_h = floor(height*1.0/(n-1))
grid_w = floor(width*1.0/(n-1))
step_h = floor(height*float(n-2)/float(pow((n-1),2)))
step_w = floor(width*float(n-2)/float(pow((n-1),2)))
croped_image = np.zeros((int(grid_h),int(grid_w),pow(n,2)*channel),dtype=int)
for i in range(n):
for j in range(n):
rect = [i*step_h,j*step_w,i*step_h+grid_h,j*step_w+grid_w]
#print rect
croped_img = img[int(rect[0]):int(rect[2]),int(rect[1]):int(rect[3]),:]
croped_image[:,:,(i*n+j)*channel:(i*n+j+1)*channel] = croped_img[:,:,:]
return croped_image
def filtBox(croped_rect,box):
t_box = box[:]
if t_box[0]<croped_rect[0]:
t_box[0] = croped_rect[0]
if t_box[1]<croped_rect[1]:
t_box[1] = croped_rect[1]
if t_box[2]>croped_rect[2]:
t_box[2] = croped_rect[2]
if t_box[3]>croped_rect[3]:
t_box[3] = croped_rect[3]
t_box[0] = t_box[0]-croped_rect[0]
t_box[2] = t_box[2]-croped_rect[0]
t_box[1] = t_box[1]-croped_rect[1]
t_box[3] = t_box[3]-croped_rect[1]
return t_box
def remap_boxes(temp_new_rec,n,im_size):
#box [x1, y1, x2, y2]
boxes = []
box_channels = []
gt_classes = []
gt_overlaps = []
max_classes = []
max_overlaps = []
height = im_size[0]
width = im_size[1]
grid_h = floor(height*1.0/(n-1))
grid_w = floor(width*1.0/(n-1))
step_h = floor(height*float(n-2)/float(pow((n-1),2)))
step_w = floor(width*float(n-2)/float(pow((n-1),2)))
for i in range(temp_new_rec['boxes'].shape[0]):
for j in range(n):
for k in range(n):
region = [step_w*k,step_h*j,step_w*k+grid_w,step_h*j+grid_h]
box = temp_new_rec['boxes'][i].tolist()
iou = compute_iou(box,region)
if iou>0.8:
t_box = filtBox(region,box)
boxes.append(t_box)
box_channels.append(i*n+k)
gt_classes.append(temp_new_rec['gt_classes'][i])
gt_overlaps.append(temp_new_rec['gt_overlaps'][i].tolist())
max_classes.append(temp_new_rec['max_classes'][i])
max_overlaps.append(temp_new_rec['max_overlaps'][i])
temp_new_rec['boxes'] = np.asarray(boxes,dtype=np.uint16)
temp_new_rec['box_channels'] = np.asarray(box_channels,dtype=np.uint16)
temp_new_rec['gt_classes'] = np.asarray(gt_classes)
temp_new_rec['gt_overlaps'] = np.asarray(gt_overlaps,dtype=np.float32)
temp_new_rec['max_classes'] = np.asarray(max_classes)
temp_new_rec['max_overlaps'] = np.asarray(max_overlaps)
return
def get_crop_image(roidb, config):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
assert os.path.exists(roi_rec['image']), '%s does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION)
ori_shape = im.shape
if roidb[i]['flipped']:
im = im[:, ::-1, :]
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
croped_im = crop_image(im,config.CROP_NUM)
im, im_scale = resize_crop(croped_im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform_crop(im, config.network.PIXEL_MEANS)
processed_ims.append(im_tensor)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
remap_boxes(roi_rec,config.CROP_NUM,ori_shape)
new_rec = roi_rec.copy()
new_rec['boxes'] = clip_boxes(np.round(roi_rec['boxes'].copy()* im_scale), im_info[:2])
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
#print "processed_ims.shape:"
#print processed_ims[0].shape
return processed_ims, processed_roidb
def get_segmentation_image(segdb, config):
"""
propocess image and return segdb
:param segdb: a list of segdb
:return: list of img as mxnet format
"""
num_images = len(segdb)
assert num_images > 0, 'No images'
processed_ims = []
processed_segdb = []
processed_seg_cls_gt = []
for i in range(num_images):
seg_rec = segdb[i]
assert os.path.exists(seg_rec['image']), '%s does not exist'.format(seg_rec['image'])
im = np.array(cv2.imread(seg_rec['image']))
new_rec = seg_rec.copy()
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
seg_cls_gt = np.array(Image.open(seg_rec['seg_cls_path']))
seg_cls_gt, seg_cls_gt_scale = resize(
seg_cls_gt, target_size, max_size, stride=config.network.IMAGE_STRIDE, interpolation=cv2.INTER_NEAREST)
seg_cls_gt_tensor = transform_seg_gt(seg_cls_gt)
processed_ims.append(im_tensor)
processed_segdb.append(new_rec)
processed_seg_cls_gt.append(seg_cls_gt_tensor)
return processed_ims, processed_seg_cls_gt, processed_segdb
def resize_crop(im, target_size, max_size, stride=0, interpolation = cv2.INTER_LINEAR):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:param interpolation: if given, using given interpolation method to resize image
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
channel = im.shape[2]
t_im = cv2.resize(im[:,:,0].astype(np.float32), None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
n_im = np.zeros((t_im.shape[0],t_im.shape[1],channel),dtype = int)
for i in range(channel/3):
n_im[:,:,i*3:(i+1)*3] = cv2.resize(im[:,:,i*3:(i+1)*3].astype(np.float32), None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
im = n_im
if stride == 0:
return im, im_scale
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
del im
return padded_im, im_scale
def transform_crop(im, pixel_means):
"""
transform into mxnet tensor
substract pixel size and transform to correct format
:param im: [height, width, channel] in BGR
:param pixel_means: [B, G, R pixel means]
:return: [batch, channel, height, width]
"""
channel = im.shape[2]
im_tensor = np.zeros((1, channel, im.shape[0], im.shape[1]))
for i in range(channel/3):
for j in range(3):
im_tensor[0, i*3+j, :, :] = im[:, :,i*3+ 2 - j] - pixel_means[2 - j]
return im_tensor
def resize(im, target_size, max_size, stride=0, interpolation = cv2.INTER_LINEAR):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:param interpolation: if given, using given interpolation method to resize image
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
if stride == 0:
return im, im_scale
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
return padded_im, im_scale
def transform(im, pixel_means):
"""
transform into mxnet tensor
substract pixel size and transform to correct format
:param im: [height, width, channel] in BGR
:param pixel_means: [B, G, R pixel means]
:return: [batch, channel, height, width]
"""
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[0, i, :, :] = im[:, :, 2 - i] - pixel_means[2 - i]
return im_tensor
def transform_seg_gt(gt):
"""
transform segmentation gt image into mxnet tensor
:param gt: [height, width, channel = 1]
:return: [batch, channel = 1, height, width]
"""
gt_tensor = np.zeros((1, 1, gt.shape[0], gt.shape[1]))
gt_tensor[0, 0, :, :] = gt[:, :]
return gt_tensor
def transform_inverse(im_tensor, pixel_means):
"""
transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param pixel_means: [B, G, R pixel means]
:return: im [height, width, channel(RGB)]
"""
assert im_tensor.shape[0] == 1
im_tensor = im_tensor.copy()
# put channel back
channel_swap = (0, 2, 3, 1)
im_tensor = im_tensor.transpose(channel_swap)
im = im_tensor[0]
assert im.shape[2] == 3
im += pixel_means[[2, 1, 0]]
im = im.astype(np.uint8)
return im
def tensor_vstack(tensor_list, pad=0):
"""
vertically stack tensors
:param tensor_list: list of tensor to be stacked vertically
:param pad: label to pad with
:return: tensor with max shape
"""
ndim = len(tensor_list[0].shape)
dtype = tensor_list[0].dtype
islice = tensor_list[0].shape[0]
dimensions = []
first_dim = sum([tensor.shape[0] for tensor in tensor_list])
dimensions.append(first_dim)
for dim in range(1, ndim):
dimensions.append(max([tensor.shape[dim] for tensor in tensor_list]))
if pad == 0:
all_tensor = np.zeros(tuple(dimensions), dtype=dtype)
elif pad == 1:
all_tensor = np.ones(tuple(dimensions), dtype=dtype)
else:
all_tensor = np.full(tuple(dimensions), pad, dtype=dtype)
if ndim == 1:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice] = tensor
elif ndim == 2:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice, :tensor.shape[1]] = tensor
elif ndim == 3:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice, :tensor.shape[1], :tensor.shape[2]] = tensor
elif ndim == 4:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice, :tensor.shape[1], :tensor.shape[2], :tensor.shape[3]] = tensor
else:
raise Exception('Sorry, unimplemented.')
return all_tensor
| [
"os.path.exists",
"PIL.Image.open",
"math.floor",
"numpy.asarray",
"numpy.max",
"numpy.zeros",
"numpy.min",
"cv2.resize",
"cv2.imread",
"numpy.round"
] | [((2736, 2765), 'math.floor', 'floor', (['(height * 1.0 / (n - 1))'], {}), '(height * 1.0 / (n - 1))\n', (2741, 2765), False, 'from math import floor\n'), ((2773, 2801), 'math.floor', 'floor', (['(width * 1.0 / (n - 1))'], {}), '(width * 1.0 / (n - 1))\n', (2778, 2801), False, 'from math import floor\n'), ((4017, 4046), 'math.floor', 'floor', (['(height * 1.0 / (n - 1))'], {}), '(height * 1.0 / (n - 1))\n', (4022, 4046), False, 'from math import floor\n'), ((4054, 4082), 'math.floor', 'floor', (['(width * 1.0 / (n - 1))'], {}), '(width * 1.0 / (n - 1))\n', (4059, 4082), False, 'from math import floor\n'), ((4986, 5020), 'numpy.asarray', 'np.asarray', (['boxes'], {'dtype': 'np.uint16'}), '(boxes, dtype=np.uint16)\n', (4996, 5020), True, 'import numpy as np\n'), ((5055, 5096), 'numpy.asarray', 'np.asarray', (['box_channels'], {'dtype': 'np.uint16'}), '(box_channels, dtype=np.uint16)\n', (5065, 5096), True, 'import numpy as np\n'), ((5129, 5151), 'numpy.asarray', 'np.asarray', (['gt_classes'], {}), '(gt_classes)\n', (5139, 5151), True, 'import numpy as np\n'), ((5186, 5227), 'numpy.asarray', 'np.asarray', (['gt_overlaps'], {'dtype': 'np.float32'}), '(gt_overlaps, dtype=np.float32)\n', (5196, 5227), True, 'import numpy as np\n'), ((5261, 5284), 'numpy.asarray', 'np.asarray', (['max_classes'], {}), '(max_classes)\n', (5271, 5284), True, 'import numpy as np\n'), ((5320, 5344), 'numpy.asarray', 'np.asarray', (['max_overlaps'], {}), '(max_overlaps)\n', (5330, 5344), True, 'import numpy as np\n'), ((8908, 8929), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (8914, 8929), True, 'import numpy as np\n'), ((8948, 8969), 'numpy.max', 'np.max', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (8954, 8969), True, 'import numpy as np\n'), ((9348, 9408), 'numpy.zeros', 'np.zeros', (['(t_im.shape[0], t_im.shape[1], channel)'], {'dtype': 'int'}), '((t_im.shape[0], t_im.shape[1], channel), dtype=int)\n', (9356, 9408), True, 'import numpy as np\n'), ((10374, 10422), 'numpy.zeros', 'np.zeros', (['(1, channel, im.shape[0], im.shape[1])'], {}), '((1, channel, im.shape[0], im.shape[1]))\n', (10382, 10422), True, 'import numpy as np\n'), ((11112, 11133), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (11118, 11133), True, 'import numpy as np\n'), ((11152, 11173), 'numpy.max', 'np.max', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (11158, 11173), True, 'import numpy as np\n'), ((11403, 11489), 'cv2.resize', 'cv2.resize', (['im', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'interpolation'}), '(im, None, None, fx=im_scale, fy=im_scale, interpolation=\n interpolation)\n', (11413, 11489), False, 'import cv2\n'), ((12198, 12240), 'numpy.zeros', 'np.zeros', (['(1, 3, im.shape[0], im.shape[1])'], {}), '((1, 3, im.shape[0], im.shape[1]))\n', (12206, 12240), True, 'import numpy as np\n'), ((12560, 12602), 'numpy.zeros', 'np.zeros', (['(1, 1, gt.shape[0], gt.shape[1])'], {}), '((1, 1, gt.shape[0], gt.shape[1]))\n', (12568, 12602), True, 'import numpy as np\n'), ((912, 944), 'os.path.exists', 'os.path.exists', (["roi_rec['image']"], {}), "(roi_rec['image'])\n", (926, 944), False, 'import os\n'), ((1004, 1082), 'cv2.imread', 'cv2.imread', (["roi_rec['image']", '(cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)'], {}), "(roi_rec['image'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n", (1014, 1082), False, 'import cv2\n'), ((5794, 5826), 'os.path.exists', 'os.path.exists', (["roi_rec['image']"], {}), "(roi_rec['image'])\n", (5808, 5826), False, 'import os\n'), ((5886, 5964), 'cv2.imread', 'cv2.imread', (["roi_rec['image']", '(cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)'], {}), "(roi_rec['image'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n", (5896, 5964), False, 'import cv2\n'), ((7304, 7336), 'os.path.exists', 'os.path.exists', (["seg_rec['image']"], {}), "(seg_rec['image'])\n", (7318, 7336), False, 'import os\n'), ((9089, 9121), 'numpy.round', 'np.round', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (9097, 9121), True, 'import numpy as np\n'), ((9903, 9946), 'numpy.zeros', 'np.zeros', (['(im_height, im_width, im_channel)'], {}), '((im_height, im_width, im_channel))\n', (9911, 9946), True, 'import numpy as np\n'), ((11293, 11325), 'numpy.round', 'np.round', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (11301, 11325), True, 'import numpy as np\n'), ((11773, 11816), 'numpy.zeros', 'np.zeros', (['(im_height, im_width, im_channel)'], {}), '((im_height, im_width, im_channel))\n', (11781, 11816), True, 'import numpy as np\n'), ((7405, 7433), 'cv2.imread', 'cv2.imread', (["seg_rec['image']"], {}), "(seg_rec['image'])\n", (7415, 7433), False, 'import cv2\n'), ((7916, 7951), 'PIL.Image.open', 'Image.open', (["seg_rec['seg_cls_path']"], {}), "(seg_rec['seg_cls_path'])\n", (7926, 7951), False, 'from PIL import Image\n')] |
import numpy as np
import torch, os, shutil, pickle
from datetime import datetime
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from kme.data.utils import get_loaders
from interpret.glassbox import ExplainableBoostingClassifier
from xgboost import XGBClassifier
from kme.models.utils import build_kme_net
from kme.tools.training import train_routine, test_routine
from copy import deepcopy
from kme.extern.senn.trainer import init_trainer
from sklearn.metrics import accuracy_score, roc_auc_score
N_EPOCHS = 10
def empty_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
BENCHMARK_DATASETS = ['heart', 'compas', 'adult', 'mammo']
# BENCHMARK_DATASETS = ['credit', ]
# BENCHMARK_DATASETS = ['bank', 'spambase', 'mammo'] #
# BENCHMARK_DATASETS = ['mushroom',]
SKLEARN_MODELS = {
'logistic': lambda : LogisticRegression(),
'tree_small': lambda : DecisionTreeClassifier(max_depth=10),
'tree_big': lambda : DecisionTreeClassifier(),
'ebm': lambda : ExplainableBoostingClassifier(),
'random_forest': lambda : RandomForestClassifier(n_estimators=100),
'xgboost': lambda : XGBClassifier(n_estimators=100, use_label_encoder=False)
}
DEEP_MODELS_ARGS = {
'mlp': {
"feature_net": "BaselineMLP1",
"classifier": "Linear",
"feature_net_args": {
"latent_dim": 16,
"n_feats": 11
},
"classifier_args": {
"latent_dim": 16,
"n_classes": 2
}
},
'flan': {
"feature_net": "SmallTabFeatNet2",
"classifier": "SmallClassifier2",
"feature_net_args": {
"latent_dim": 16,
"n_feats": 11
},
"classifier_args": {
"latent_dim": 16,
"n_classes": 2
}
}
}
SENN_CONFIG_BASE = {
"train": True,
"conceptizer": "IdentityConceptizer",
"parameterizer": "LinearParameterizer",
"hidden_sizes": [64, 128, 64, 32],
"num_concepts": 0,
"concept_names": [],
"num_classes": 2,
"dropout": 0.1,
"aggregator": "SumAggregator",
"lr": 5e-4,
"epochs": N_EPOCHS,
"robustness_loss": "compas_robustness_loss",
"robust_reg": 0.0,
"concept_reg": 0,
"print_freq": 100,
"exp_name": "benchmark",
"batch_size" : 200,
"sparsity_reg": 2e-5,
"eval_freq" : 30,
"manual_seed": 111
}
def update_model_params(n_feats, model):
args = deepcopy(DEEP_MODELS_ARGS[model])
args['feature_net_args']['n_feats'] = n_feats
return args
DEEP_MODELS = {
'mlp': lambda n_feats, device: build_kme_net(update_model_params(n_feats, 'mlp'), device=device),
'flan': lambda n_feats, device: build_kme_net(update_model_params(n_feats, 'flan'), device=device)
}
def loader2array(loader):
X = []
Y = []
for x, y in loader:
X.append(x.detach().numpy())
Y.append(y.detach().numpy())
X = np.concatenate(X, axis=0)
Y = np.concatenate(Y, axis=0)
return X, Y
def train_sklearn(x, y, x_test, y_test):
results = dict()
for k, constructor in SKLEARN_MODELS.items():
print('... ... Benchmarking {}'.format(k))
if k == 'logistic':
scaler = StandardScaler()
xx = scaler.fit_transform(x)
xx_test = scaler.transform(x_test)
else:
xx = x
xx_test = x_test
model = constructor()
model.fit(xx, y)
y_hat = model.predict_proba(xx_test)
results[k] = roc_auc_score(y_test, y_hat[:, 1])
print('{}] AUC = {}'.format(k, results[k]))
return results
def train_senn(train_loader, val_loader, test_loader, n_feats, random_seed, device):
print('... ... Benchmarking SENN')
configs = deepcopy(SENN_CONFIG_BASE)
configs['num_concepts'] = n_feats
configs['concept_names'] = [str(i) for i in range(n_feats)]
configs['exp_name'] = 'senn_benchmark_{}'.format(random_seed)
configs['device'] = device
configs['hidden_sizes'] = [n_feats, 64, 128, 128, 128, 128, 2*n_feats]
configs['manual_seed'] = random_seed
trainer = init_trainer(configs, train_loader, val_loader, test_loader)
trainer._save = False
empty_folder(trainer.checkpoint_dir)
empty_folder(trainer.log_dir)
trainer.run()
return trainer.test()
def train_dnns(train_loader, val_loader, test_loader, n_feats, device):
results = dict()
for k, dnn in DEEP_MODELS.items():
print('... ... Benchmarking {}'.format(k))
model = dnn(n_feats, device)
model.train()
optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=5e-4)
for e in range(N_EPOCHS):
train_routine(e, model, optimizer, train_loader, device, if_tqdm=False, if_print=False, norm_reg=0.)
model.eval()
#_, test_accuracy = test_routine(N_EPOCHS, model, test_loader, device)
true_labels = []
preds = []
for x, y in test_loader:
x = x.to(device)
y = y.detach().cpu().numpy()
true_labels.append(y)
preds.append(torch.softmax(model(x), dim=1).detach().cpu().numpy())
true_labels = np.concatenate(true_labels, axis=0)
preds = np.concatenate(preds, axis=0)
results[k] = roc_auc_score(true_labels, preds[:, 1])
print('{}] AUC = {}'.format(k, results[k]))
return results
def run_experiment(root, dataset, random_seed, device):
results = dict()
train_loader, valid_loader, test_loader = get_loaders(dataset, valid_split=0.1,
test_split=0.2, random_seed=random_seed,
dataroot=root)
train_X, train_Y = loader2array(train_loader)
test_X, test_Y = loader2array(test_loader)
results.update(train_dnns(train_loader, valid_loader, test_loader, n_feats=train_X.shape[1], device=device))
results.update(train_sklearn(train_X, train_Y, test_X, test_Y))
results['senn'] = train_senn(train_loader, valid_loader, test_loader, n_feats=train_X.shape[1],
random_seed=random_seed, device=device)
print('{}] AUC = {}'.format('senn', results['senn']))
return results
if __name__ == '__main__':
ROOT = '/home/phineas/Documents/repos/kme_net/results/benchmarking'
now = datetime.now()
RESULT_PATH = os.path.join(ROOT, 'tabular_benchmarking_{}.pkl'.format(datetime.timestamp(now)))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
RANDOM_SEEDS = [1234, 156, 125, 2834, 542, 438, 683, 255, 986, 776]
all_results = dict()
for dataset in BENCHMARK_DATASETS:
print('Running experiment for ...')
print('... dataset {}'.format(dataset))
curr_results = []
for seed in RANDOM_SEEDS:
print('... seed {}'.format(seed))
res = run_experiment('/home/phineas/Documents/repos/kme_net/data', dataset, seed, device)
curr_results.append(res)
curr_dataset_results = {k: [dic[k] for dic in curr_results] for k in curr_results[0]}
all_results[dataset] = curr_dataset_results
with open(RESULT_PATH, 'wb') as out_file:
pickle.dump(all_results, out_file)
with open(RESULT_PATH + '.backup', 'wb') as out_file:
pickle.dump(all_results, out_file)
| [
"kme.tools.training.train_routine",
"interpret.glassbox.ExplainableBoostingClassifier",
"sklearn.metrics.roc_auc_score",
"torch.cuda.is_available",
"copy.deepcopy",
"kme.extern.senn.trainer.init_trainer",
"os.path.islink",
"os.listdir",
"sklearn.tree.DecisionTreeClassifier",
"os.path.isdir",
"os... | [((716, 734), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (726, 734), False, 'import torch, os, shutil, pickle\n'), ((2906, 2939), 'copy.deepcopy', 'deepcopy', (['DEEP_MODELS_ARGS[model]'], {}), '(DEEP_MODELS_ARGS[model])\n', (2914, 2939), False, 'from copy import deepcopy\n'), ((3387, 3412), 'numpy.concatenate', 'np.concatenate', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3401, 3412), True, 'import numpy as np\n'), ((3421, 3446), 'numpy.concatenate', 'np.concatenate', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (3435, 3446), True, 'import numpy as np\n'), ((4212, 4238), 'copy.deepcopy', 'deepcopy', (['SENN_CONFIG_BASE'], {}), '(SENN_CONFIG_BASE)\n', (4220, 4238), False, 'from copy import deepcopy\n'), ((4569, 4629), 'kme.extern.senn.trainer.init_trainer', 'init_trainer', (['configs', 'train_loader', 'val_loader', 'test_loader'], {}), '(configs, train_loader, val_loader, test_loader)\n', (4581, 4629), False, 'from kme.extern.senn.trainer import init_trainer\n'), ((5982, 6080), 'kme.data.utils.get_loaders', 'get_loaders', (['dataset'], {'valid_split': '(0.1)', 'test_split': '(0.2)', 'random_seed': 'random_seed', 'dataroot': 'root'}), '(dataset, valid_split=0.1, test_split=0.2, random_seed=\n random_seed, dataroot=root)\n', (5993, 6080), False, 'from kme.data.utils import get_loaders\n'), ((6834, 6848), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6846, 6848), False, 'from datetime import datetime\n'), ((756, 786), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (768, 786), False, 'import torch, os, shutil, pickle\n'), ((1328, 1348), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1346, 1348), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1377, 1413), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(10)'}), '(max_depth=10)\n', (1399, 1413), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1440, 1464), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1462, 1464), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1486, 1517), 'interpret.glassbox.ExplainableBoostingClassifier', 'ExplainableBoostingClassifier', ([], {}), '()\n', (1515, 1517), False, 'from interpret.glassbox import ExplainableBoostingClassifier\n'), ((1549, 1589), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (1571, 1589), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1615, 1671), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'n_estimators': '(100)', 'use_label_encoder': '(False)'}), '(n_estimators=100, use_label_encoder=False)\n', (1628, 1671), False, 'from xgboost import XGBClassifier\n'), ((3966, 4000), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_hat[:, 1]'], {}), '(y_test, y_hat[:, 1])\n', (3979, 4000), False, 'from sklearn.metrics import accuracy_score, roc_auc_score\n'), ((5641, 5676), 'numpy.concatenate', 'np.concatenate', (['true_labels'], {'axis': '(0)'}), '(true_labels, axis=0)\n', (5655, 5676), True, 'import numpy as np\n'), ((5693, 5722), 'numpy.concatenate', 'np.concatenate', (['preds'], {'axis': '(0)'}), '(preds, axis=0)\n', (5707, 5722), True, 'import numpy as np\n'), ((5745, 5784), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['true_labels', 'preds[:, 1]'], {}), '(true_labels, preds[:, 1])\n', (5758, 5784), False, 'from sklearn.metrics import accuracy_score, roc_auc_score\n'), ((7705, 7739), 'pickle.dump', 'pickle.dump', (['all_results', 'out_file'], {}), '(all_results, out_file)\n', (7716, 7739), False, 'import torch, os, shutil, pickle\n'), ((7806, 7840), 'pickle.dump', 'pickle.dump', (['all_results', 'out_file'], {}), '(all_results, out_file)\n', (7817, 7840), False, 'import torch, os, shutil, pickle\n'), ((3677, 3693), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3691, 3693), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5153, 5258), 'kme.tools.training.train_routine', 'train_routine', (['e', 'model', 'optimizer', 'train_loader', 'device'], {'if_tqdm': '(False)', 'if_print': '(False)', 'norm_reg': '(0.0)'}), '(e, model, optimizer, train_loader, device, if_tqdm=False,\n if_print=False, norm_reg=0.0)\n', (5166, 5258), False, 'from kme.tools.training import train_routine, test_routine\n'), ((6923, 6946), 'datetime.datetime.timestamp', 'datetime.timestamp', (['now'], {}), '(now)\n', (6941, 6946), False, 'from datetime import datetime\n'), ((6987, 7012), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7010, 7012), False, 'import torch, os, shutil, pickle\n'), ((815, 840), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (829, 840), False, 'import torch, os, shutil, pickle\n'), ((844, 869), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (858, 869), False, 'import torch, os, shutil, pickle\n'), ((887, 907), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (896, 907), False, 'import torch, os, shutil, pickle\n'), ((925, 949), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (938, 949), False, 'import torch, os, shutil, pickle\n'), ((967, 991), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (980, 991), False, 'import torch, os, shutil, pickle\n')] |
#!/usr/bin/env python
import rospy
import numpy as np
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
lower_red = np.array([150,100,50])
upper_red = np.array([180,255,255])
lower_yellow = np.array([20,130,130])
upper_yellow = np.array([40,255,255])
class SegmentImage():
def __init__(self):
#Subscribirce al topico "/duckiebot/camera_node/image/raw"
self.image_subscriber = rospy.Subscriber("/duckiebot/camera_node/image/raw",Image,self._process_image)
#Clase necesaria para transformar el tipo de imagen
self.bridge = CvBridge()
#Ultima imagen adquirida
self.cv_image = Image()
#publisher
self.pub = rospy.Publisher("/duckiebot/patofiltrado",Image,queue_size=1)
print("explotando en 3, 2, 1...")
def _process_image(self,img):
#Se cambiar mensage tipo ros a imagen opencv
try:
self.cv_image = self.bridge.imgmsg_to_cv2(img, "bgr8")
except CvBridgeError as e:
print(e)
#Se deja en frame la imagen actual
frame = self.cv_image
#Cambiar tipo de color de BGR a HSV
color_space = cv2.COLOR_BGR2HSV
image_out = cv2.cvtColor(frame, color_space)
# Filtrar colores de la imagen en el rango utilizando
mask = cv2.inRange(image_out, lower_yellow, upper_yellow)
# Bitwise-AND mask and original image
segment_image = cv2.bitwise_and(frame,frame, mask= mask)
imga= self.bridge.cv2_to_imgmsg(segment_image, "bgr8")
self.pub.publish(imga)
def main():
rospy.init_node('SegmentImage')
SegmentImage()
rospy.spin()
if __name__ == '__main__':
main()
| [
"rospy.Publisher",
"rospy.init_node",
"cv2.inRange",
"sensor_msgs.msg.Image",
"cv2.bitwise_and",
"cv_bridge.CvBridge",
"numpy.array",
"rospy.spin",
"cv2.cvtColor",
"rospy.Subscriber"
] | [((162, 185), 'numpy.array', 'np.array', (['[110, 50, 50]'], {}), '([110, 50, 50])\n', (170, 185), True, 'import numpy as np\n'), ((197, 222), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (205, 222), True, 'import numpy as np\n'), ((233, 257), 'numpy.array', 'np.array', (['[150, 100, 50]'], {}), '([150, 100, 50])\n', (241, 257), True, 'import numpy as np\n'), ((268, 293), 'numpy.array', 'np.array', (['[180, 255, 255]'], {}), '([180, 255, 255])\n', (276, 293), True, 'import numpy as np\n'), ((307, 331), 'numpy.array', 'np.array', (['[20, 130, 130]'], {}), '([20, 130, 130])\n', (315, 331), True, 'import numpy as np\n'), ((345, 369), 'numpy.array', 'np.array', (['[40, 255, 255]'], {}), '([40, 255, 255])\n', (353, 369), True, 'import numpy as np\n'), ((1709, 1740), 'rospy.init_node', 'rospy.init_node', (['"""SegmentImage"""'], {}), "('SegmentImage')\n", (1724, 1740), False, 'import rospy\n'), ((1766, 1778), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1776, 1778), False, 'import rospy\n'), ((518, 603), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/duckiebot/camera_node/image/raw"""', 'Image', 'self._process_image'], {}), "('/duckiebot/camera_node/image/raw', Image, self._process_image\n )\n", (534, 603), False, 'import rospy\n'), ((680, 690), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (688, 690), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((749, 756), 'sensor_msgs.msg.Image', 'Image', ([], {}), '()\n', (754, 756), False, 'from sensor_msgs.msg import Image\n'), ((804, 867), 'rospy.Publisher', 'rospy.Publisher', (['"""/duckiebot/patofiltrado"""', 'Image'], {'queue_size': '(1)'}), "('/duckiebot/patofiltrado', Image, queue_size=1)\n", (819, 867), False, 'import rospy\n'), ((1321, 1353), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'color_space'], {}), '(frame, color_space)\n', (1333, 1353), False, 'import cv2\n'), ((1433, 1483), 'cv2.inRange', 'cv2.inRange', (['image_out', 'lower_yellow', 'upper_yellow'], {}), '(image_out, lower_yellow, upper_yellow)\n', (1444, 1483), False, 'import cv2\n'), ((1555, 1595), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (1570, 1595), False, 'import cv2\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import argparse
from enum import Enum
import math
import time
from typing import Any, List, Optional, cast
import numpy as np
import torch
import torch.autograd.profiler as profiler
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torchvision.datasets import FakeData
from torchvision.models import resnet101
from torchvision.transforms import ToTensor
from fairscale.nn.data_parallel import ShardedDataParallel
from fairscale.optim.oss import OSS
OPTIM = torch.optim.RMSprop
def dist_init(rank, world_size, backend):
print(f"Using backend: {backend}")
dist.init_process_group(backend=backend, init_method="tcp://localhost:29501", rank=rank, world_size=world_size)
def get_problem(rank, data_size, batch_size):
# Standard RN101
model = resnet101(pretrained=False, progress=True).to(rank)
# Data setup, dummy data
def collate(inputs: List[Any]):
return {
"inputs": torch.stack([i[0] for i in inputs]).to(torch.device(rank)),
"label": torch.stack([i[1] for i in inputs]).to(torch.device(rank)),
}
dataloader = DataLoader(
dataset=FakeData(transform=ToTensor(), size=data_size, random_offset=rank),
batch_size=batch_size,
collate_fn=collate,
)
loss_fn = nn.CrossEntropyLoss()
return model, dataloader, loss_fn
class OptimType(str, Enum):
vanilla = "pytorch"
oss = "oss"
oss_sdp = "oss_sdp"
everyone = "everyone"
def train(
rank: int,
world_size: int,
num_epochs: int = 10,
batch_size: int = 32,
data_size: int = 200,
backend: str = "gloo",
optim_type: OptimType = OptimType.vanilla,
profile: bool = False,
check_regression: bool = True,
reference_speed: float = -1.0,
reference_memory: float = -1.0,
reference_loss: float = -1.0,
):
# DDP
dist_init(rank=rank, world_size=world_size, backend=backend)
# Setup
torch.cuda.set_device(rank)
torch.cuda.manual_seed(0)
torch.manual_seed(0) # also sets the cuda seed
np.random.seed(0)
if backend == "nccl":
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
model, dataloader, loss_fn = get_problem(rank, data_size, batch_size)
# Shard the optimizer
optimizer: Optional[torch.optim.Optimizer] = None
if optim_type == OptimType.oss_sdp:
ddp = ShardedDataParallel(
module=model,
optimizer=OPTIM,
optimizer_params={"lr": 1e-4, "momentum": 0.9},
world_size=world_size,
broadcast_buffers=True,
)
ddp.train()
optimizer = ddp.optimizer
model = ddp
else:
model = DDP(model, device_ids=[rank], find_unused_parameters=True) # type: ignore
optimizer = (
OSS(params=model.parameters(), optim=OPTIM, lr=1e-4, momentum=0.9)
if optim_type == OptimType.oss
else OPTIM(model.parameters(), lr=1e-4, momentum=0.9)
)
# Reset the memory use counter
torch.cuda.reset_peak_memory_stats(rank)
# Dummy training loop
torch.cuda.synchronize(rank)
training_start = time.monotonic()
model.train()
measurements = []
final_loss: Optional[float] = -1.0
need_profiling = profile
for epoch in range(num_epochs):
epoch_start = time.monotonic()
for batch in dataloader:
def closure():
model.zero_grad()
outputs = model(batch["inputs"])
loss = loss_fn(outputs, batch["label"])
loss /= world_size
loss.backward()
if optim_type == OptimType.oss_sdp:
ddp.reduce() # Send the gradients to the appropriate shards
return loss
if need_profiling:
print("Profiling the run")
with profiler.profile(use_cuda=True) as prof: # type: ignore
with profiler.record_function("batch"):
final_loss = optimizer.step(closure)
print("profiling done, final loss ", cast(float, final_loss))
if rank == 0:
prof.export_chrome_trace(f"{optim_type}_trace.json")
need_profiling = False # only profile once
else:
final_loss = optimizer.step(closure)
epoch_end = time.monotonic()
if optim_type == OptimType.oss:
# Check the checkpointing in the case of the OSS optimizer
# Memory usage could spill over from there
optimizer = cast(OSS, optimizer)
optimizer.consolidate_state_dict()
if dist.get_rank() == 0:
_ = optimizer.state_dict()
print("... State dict collected")
measurements.append(data_size / (epoch_end - epoch_start))
if dist.get_rank() == 0:
print(f"Epoch {epoch} - processed {measurements[-1]:.2f} img per sec. Loss {final_loss:.3f}")
torch.cuda.synchronize(rank)
training_stop = time.monotonic()
img_per_sec = data_size / (training_stop - training_start) * num_epochs
max_memory = torch.cuda.max_memory_allocated(rank) / 2 ** 20
print(f"[{dist.get_rank()}] : Training done. {img_per_sec:.2f} img per sec overall")
print(f"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB")
# Compute the mean and average img per second
mean = sum(measurements) / len(measurements)
diff = map(lambda x: pow(x - mean, 2.0), measurements)
std = math.sqrt(sum(diff) / (len(measurements) - 1))
print(f"[{dist.get_rank()}] : Mean speed: {mean:.2f} +/- {std:.2f}")
if check_regression and dist.get_rank() == 0:
assert (mean + 3.0 * std) > reference_speed, "Speed regression detected"
assert max_memory < 1.05 * reference_memory, "Memory use regression detected"
assert abs(cast(float, final_loss) - reference_loss) < 1e-3, "Loss regression detected"
print("[Regression Test] VALID")
dist.destroy_process_group() # type: ignore
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Benchmark the optimizer state sharding, on a typical computer vision workload"
)
parser.add_argument("--world_size", action="store", default=2, type=int)
parser.add_argument("--epochs", action="store", default=10, type=int)
parser.add_argument("--batch_size", action="store", default=32, type=int)
parser.add_argument("--data_size", action="store", default=512, type=int)
parser.add_argument("--check_regression", action="store_true", default=False)
parser.add_argument("--reference_speed", action="store", default=29.7, type=float)
parser.add_argument("--reference_memory", action="store", default=4475, type=float)
parser.add_argument("--reference_loss", action="store", default=0.866, type=float)
parser.add_argument(
"--optim_type", type=OptimType, choices=[o.value for o in OptimType], default=OptimType.everyone
)
parser.add_argument("--gloo", action="store_true", default=False)
parser.add_argument("--profile", action="store_true", default=False)
args = parser.parse_args()
print(f"Benchmark arguments: {args}")
backend = "nccl" if not args.gloo or not torch.cuda.is_available() else "gloo"
if args.optim_type == OptimType.vanilla or args.optim_type == OptimType.everyone:
print("\nBenchmark vanilla optimizer")
mp.spawn(
train,
args=(
args.world_size,
args.epochs,
args.batch_size,
args.data_size,
backend,
OptimType.vanilla,
args.profile,
False, # no regression check
),
nprocs=args.world_size,
join=True,
)
if args.optim_type == OptimType.oss or args.optim_type == OptimType.everyone:
print("\nBenchmark OSS with DDP")
mp.spawn(
train,
args=(
args.world_size,
args.epochs,
args.batch_size,
args.data_size,
backend,
OptimType.oss,
args.profile,
args.check_regression,
args.reference_speed,
args.reference_memory,
args.reference_loss,
),
nprocs=args.world_size,
join=True,
)
if args.optim_type == OptimType.oss_sdp or args.optim_type == OptimType.everyone:
print("\nBenchmark OSS with SDP")
mp.spawn(
train,
args=(
args.world_size,
args.epochs,
args.batch_size,
args.data_size,
backend,
OptimType.oss_sdp,
args.profile,
False, # FIXME: @lefaudeux - SDP should give the same results
-1, # Not checking SDP for speed regression for now, still slower than OSS
args.reference_memory,
args.reference_loss,
),
nprocs=args.world_size,
join=True,
)
| [
"torch.nn.CrossEntropyLoss",
"torch.distributed.destroy_process_group",
"torch.cuda.synchronize",
"torch.cuda.is_available",
"torch.autograd.profiler.record_function",
"torch.distributed.get_rank",
"argparse.ArgumentParser",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"fairscale.nn.dat... | [((787, 903), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'backend', 'init_method': '"""tcp://localhost:29501"""', 'rank': 'rank', 'world_size': 'world_size'}), "(backend=backend, init_method=\n 'tcp://localhost:29501', rank=rank, world_size=world_size)\n", (810, 903), True, 'import torch.distributed as dist\n'), ((1481, 1502), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1500, 1502), True, 'import torch.nn as nn\n'), ((2124, 2151), 'torch.cuda.set_device', 'torch.cuda.set_device', (['rank'], {}), '(rank)\n', (2145, 2151), False, 'import torch\n'), ((2156, 2181), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (2178, 2181), False, 'import torch\n'), ((2186, 2206), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2203, 2206), False, 'import torch\n'), ((2238, 2255), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2252, 2255), True, 'import numpy as np\n'), ((3243, 3283), 'torch.cuda.reset_peak_memory_stats', 'torch.cuda.reset_peak_memory_stats', (['rank'], {}), '(rank)\n', (3277, 3283), False, 'import torch\n'), ((3315, 3343), 'torch.cuda.synchronize', 'torch.cuda.synchronize', (['rank'], {}), '(rank)\n', (3337, 3343), False, 'import torch\n'), ((3365, 3381), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3379, 3381), False, 'import time\n'), ((5234, 5262), 'torch.cuda.synchronize', 'torch.cuda.synchronize', (['rank'], {}), '(rank)\n', (5256, 5262), False, 'import torch\n'), ((5283, 5299), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (5297, 5299), False, 'import time\n'), ((6249, 6277), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (6275, 6277), True, 'import torch.distributed as dist\n'), ((6336, 6462), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Benchmark the optimizer state sharding, on a typical computer vision workload"""'}), "(description=\n 'Benchmark the optimizer state sharding, on a typical computer vision workload'\n )\n", (6359, 6462), False, 'import argparse\n'), ((2591, 2743), 'fairscale.nn.data_parallel.ShardedDataParallel', 'ShardedDataParallel', ([], {'module': 'model', 'optimizer': 'OPTIM', 'optimizer_params': "{'lr': 0.0001, 'momentum': 0.9}", 'world_size': 'world_size', 'broadcast_buffers': '(True)'}), "(module=model, optimizer=OPTIM, optimizer_params={'lr': \n 0.0001, 'momentum': 0.9}, world_size=world_size, broadcast_buffers=True)\n", (2610, 2743), False, 'from fairscale.nn.data_parallel import ShardedDataParallel\n'), ((2908, 2966), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {'device_ids': '[rank]', 'find_unused_parameters': '(True)'}), '(model, device_ids=[rank], find_unused_parameters=True)\n', (2911, 2966), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((3550, 3566), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3564, 3566), False, 'import time\n'), ((4616, 4632), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (4630, 4632), False, 'import time\n'), ((5393, 5430), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', (['rank'], {}), '(rank)\n', (5424, 5430), False, 'import torch\n'), ((7697, 7876), 'torch.multiprocessing.spawn', 'mp.spawn', (['train'], {'args': '(args.world_size, args.epochs, args.batch_size, args.data_size, backend,\n OptimType.vanilla, args.profile, False)', 'nprocs': 'args.world_size', 'join': '(True)'}), '(train, args=(args.world_size, args.epochs, args.batch_size, args.\n data_size, backend, OptimType.vanilla, args.profile, False), nprocs=\n args.world_size, join=True)\n', (7705, 7876), True, 'import torch.multiprocessing as mp\n'), ((8225, 8485), 'torch.multiprocessing.spawn', 'mp.spawn', (['train'], {'args': '(args.world_size, args.epochs, args.batch_size, args.data_size, backend,\n OptimType.oss, args.profile, args.check_regression, args.\n reference_speed, args.reference_memory, args.reference_loss)', 'nprocs': 'args.world_size', 'join': '(True)'}), '(train, args=(args.world_size, args.epochs, args.batch_size, args.\n data_size, backend, OptimType.oss, args.profile, args.check_regression,\n args.reference_speed, args.reference_memory, args.reference_loss),\n nprocs=args.world_size, join=True)\n', (8233, 8485), True, 'import torch.multiprocessing as mp\n'), ((8860, 9087), 'torch.multiprocessing.spawn', 'mp.spawn', (['train'], {'args': '(args.world_size, args.epochs, args.batch_size, args.data_size, backend,\n OptimType.oss_sdp, args.profile, False, -1, args.reference_memory, args\n .reference_loss)', 'nprocs': 'args.world_size', 'join': '(True)'}), '(train, args=(args.world_size, args.epochs, args.batch_size, args.\n data_size, backend, OptimType.oss_sdp, args.profile, False, -1, args.\n reference_memory, args.reference_loss), nprocs=args.world_size, join=True)\n', (8868, 9087), True, 'import torch.multiprocessing as mp\n'), ((980, 1022), 'torchvision.models.resnet101', 'resnet101', ([], {'pretrained': '(False)', 'progress': '(True)'}), '(pretrained=False, progress=True)\n', (989, 1022), False, 'from torchvision.models import resnet101\n'), ((4824, 4844), 'typing.cast', 'cast', (['OSS', 'optimizer'], {}), '(OSS, optimizer)\n', (4828, 4844), False, 'from typing import Any, List, Optional, cast\n'), ((5101, 5116), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5114, 5116), True, 'import torch.distributed as dist\n'), ((5917, 5932), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5930, 5932), True, 'import torch.distributed as dist\n'), ((1176, 1194), 'torch.device', 'torch.device', (['rank'], {}), '(rank)\n', (1188, 1194), False, 'import torch\n'), ((1257, 1275), 'torch.device', 'torch.device', (['rank'], {}), '(rank)\n', (1269, 1275), False, 'import torch\n'), ((4907, 4922), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4920, 4922), True, 'import torch.distributed as dist\n'), ((5456, 5471), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5469, 5471), True, 'import torch.distributed as dist\n'), ((5545, 5560), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5558, 5560), True, 'import torch.distributed as dist\n'), ((5829, 5844), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5842, 5844), True, 'import torch.distributed as dist\n'), ((7517, 7542), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7540, 7542), False, 'import torch\n'), ((1137, 1172), 'torch.stack', 'torch.stack', (['[i[0] for i in inputs]'], {}), '([i[0] for i in inputs])\n', (1148, 1172), False, 'import torch\n'), ((1218, 1253), 'torch.stack', 'torch.stack', (['[i[1] for i in inputs]'], {}), '([i[1] for i in inputs])\n', (1229, 1253), False, 'import torch\n'), ((1353, 1363), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1361, 1363), False, 'from torchvision.transforms import ToTensor\n'), ((4094, 4125), 'torch.autograd.profiler.profile', 'profiler.profile', ([], {'use_cuda': '(True)'}), '(use_cuda=True)\n', (4110, 4125), True, 'import torch.autograd.profiler as profiler\n'), ((6125, 6148), 'typing.cast', 'cast', (['float', 'final_loss'], {}), '(float, final_loss)\n', (6129, 6148), False, 'from typing import Any, List, Optional, cast\n'), ((4176, 4209), 'torch.autograd.profiler.record_function', 'profiler.record_function', (['"""batch"""'], {}), "('batch')\n", (4200, 4209), True, 'import torch.autograd.profiler as profiler\n'), ((4333, 4356), 'typing.cast', 'cast', (['float', 'final_loss'], {}), '(float, final_loss)\n', (4337, 4356), False, 'from typing import Any, List, Optional, cast\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import glob, re, itertools, inspect
import gzip
from datetime import datetime
from collections import OrderedDict
from os import path
import numpy as np
import six
encoding = 'utf-8'
vr_parsers = {
'CS': lambda x: str.strip(x.decode(encoding)), # Code String
'SH': lambda x: str.strip(x.decode(encoding)), # Short String
'LO': lambda x: str.strip(x.decode(encoding)), # Long String
'ST': lambda x: str.strip(x.decode(encoding)), # Short Text
'LT': lambda x: str.strip(x.decode(encoding)), # Long Text
'UT': lambda x: str.strip(x.decode(encoding)), # Unlimited Text
'PN': lambda x: str.strip(x.decode(encoding)), # Person Name
'AS': lambda x: str.strip(x.decode(encoding)), # Age String ??? e.g., 039Y
'DA': lambda x: datetime.strptime(x.decode(encoding), '%Y%m%d').date(), # Date
'TM': lambda x: datetime.strptime(x.decode(encoding).strip(), '%H%M%S.%f').time(), # Time
'IS': lambda x: np.int_(x.decode(encoding).split('\\')).squeeze()[()], # Integer String
'DS': lambda x: np.float_(x.decode(encoding).split('\\')).squeeze()[()], # Decimal String
'SS': lambda x: np.frombuffer(x, dtype=np.int16).squeeze()[()], # Signed Short
'US': lambda x: np.frombuffer(x, dtype=np.uint16).squeeze()[()], # Unsigned Short
'SL': lambda x: np.frombuffer(x, dtype=np.int32).squeeze()[()], # Signed Long
'UL': lambda x: np.frombuffer(x, dtype=np.uint32).squeeze()[()], # Unsigned Long
'FL': lambda x: np.frombuffer(x, dtype=np.float32).squeeze()[()], # Floating Point Single
'FD': lambda x: np.frombuffer(x, dtype=np.float64).squeeze()[()], # Floating Point Double
}
# `gdcmdump --csa input.dcm`
# `dcmstack` in Python
# https://nipy.org/nibabel/dicom/siemens_csa.html
# https://neurostars.org/t/determining-bids-phaseencodingdirection-from-dicom/612 # For PhaseEncodingDirection
def parse_Siemens_CSA(b):
return {}
def parse_Siemens_CSA2(b):
def parse_value(pattern, b, default=None):
match = re.search(pattern, b)
return match.group(1) if match else default
CSA2 = {
'ReferenceAmplitude': float(parse_value(rb'sTXSPEC\.asNucleusInfo\[0\]\.flReferenceAmplitude\s+=\s+(\S+)\s', b, default=np.nan)),
'PhasePartialFourier': re.search(rb'sKSpace\.ucPhasePartialFourier\s+=\s+(\S+)\s', b).group(1).decode(encoding),
'SlicePartialFourier': re.search(rb'sKSpace\.ucSlicePartialFourier\s+=\s+(\S+)\s', b).group(1).decode(encoding),
'RefLinesPE': int(parse_value(rb'sPat\.lRefLinesPE\s+=\s+(\S+)\s', b, default=0)),
'PATMode': re.search(rb'sPat.ucPATMode\s+=\s+(\S+)\s', b).group(1).decode(encoding),
'RefScanMode': re.search(rb'sPat\.ucRefScanMode\s+=\s+(\S+)\s', b).group(1).decode(encoding),
'TotalScanTimeSec': float(re.search(rb'lTotalScanTimeSec\s+=\s+(\S+)\s', b).group(1)),
# 'SlicePosition': np.float_(re.findall(rb'sSliceArray\.asSlice\[\d+\]\.sPosition\.\w{4}\s+=\s(\S+)\s', b)).reshape(-1,3), # Unfortunately, no temporal order
}
return CSA2
Siemens = {
'PartialFourier': {
'0x10': None,
'0x8': '7/8',
'0x4': '6/8',
'0x2': '5/8',
},
'PATMode': {
'0x1': None, #??
'0x2': 'GRAPPA',
},
'RefScanMode': {
'0x1': None, #??
'0x4': 'GRE',
},
}
# http://dicom.nema.org/medical/dicom/current/output/chtml/part06/chapter_6.html
# https://dicom.innolitics.com/ciods
# Note that the hex codes must be in upper case.
tag_parsers = {
'0010,0010': ('PatientName', vr_parsers['PN']),
'0010,0030': ('PatientBirthDate', vr_parsers['DA']),
'0010,0040': ('PatientSex', vr_parsers['CS']),
'0010,1010': ('PatientAge', vr_parsers['AS']),
'0010,1030': ('PatientWeight', vr_parsers['DS']),
'0002,0013': ('ImplementationVersionName', vr_parsers['SH']),
'0008,0022': ('AcquisitionDate', vr_parsers['DA']),
'0008,0032': ('AcquisitionTime', vr_parsers['TM']),
'0008,103E': ('SeriesDescription', vr_parsers['LO']),
'0018,0020': ('ScanningSequence', vr_parsers['CS']), # SE Spin Echo, IR Inversion Recovery, GR Gradient Recalled, EP Echo Planar, RM Research Mode
'0018,0021': ('SequenceVariant', vr_parsers['CS']), # SK segmented k-space, MTC magnetization transfer contrast, SS steady state, TRSS time reversed steady state, SP spoiled, MP MAG prepared, OSP oversampling phase, NONE no sequence variant
'0018,0023': ('MRAcquisitionType', vr_parsers['CS']), # 2D frequency x phase, 3D frequency x phase x phase
'0018,0024': ('SequenceName', vr_parsers['SH']), # User defined name for the combination of Scanning Sequence (0018,0020) and Sequence Variant (0018,0021)
'0018,0050': ('SliceThickness', vr_parsers['DS']),
'0018,0080': ('RepetitionTime', vr_parsers['DS']),
'0018,0081': ('EchoTime', vr_parsers['DS']),
'0018,0082': ('InversionTime', vr_parsers['DS']),
'0018,0084': ('ImagingFrequency', vr_parsers['DS']),
# '0018,0086': ('EchoNumber', vr_parsers['IS']), # The echo number used in generating this image. In the case of segmented k-space, it is the effective Echo Number. (However, could be 1 for MB GE EPI)
'0018,0087': ('MagneticFieldStrength', vr_parsers['DS']),
'0018,0088': ('SpacingBetweenSlices', vr_parsers['DS']), # Thickness plus gap
# '0018,0091': ('EchoTrainLength', vr_parsers['IS']), # Number of lines in k-space acquired per excitation per image. (However, could be 1 for MB GE EPI)
'0018,0095': ('PixelBandwidth', vr_parsers['DS']),
'0018,1020': ('SoftwareVersion', vr_parsers['LO']),
'0018,1030': ('ProtocolName', vr_parsers['LO']),
'0018,1251': ('TransmittingCoil', vr_parsers['SH']),
'0018,1310': ('AcquisitionMatrix', lambda x : np.array([a for a in vr_parsers['US'](x) if a != 0])), # Dimensions of the acquired frequency/phase data before reconstruction: frequency rows\frequency columns\phase rows\phase columns
'0018,1312': ('PhaseEncodingDirection', vr_parsers['CS']),
'0018,1314': ('FlipAngle', vr_parsers['DS']),
'0018,1316': ('SAR', vr_parsers['DS']),
'0019,100A': ('n_slices', vr_parsers['US']),
# https://wiki.humanconnectome.org/download/attachments/40534057/CMRR_MB_Slice_Order.pdf
# https://wiki.humanconnectome.org/download/attachments/40534057/CMRR_MB_Slice_Order.pdf?version=2&modificationDate=1386950067494&api=v2
# About Siemens slice timing: "For odd, the most inferior slice is acquired first. For even, the most inferior slice is acquired second."
# However, "slice excitation always starts with slice0 in CMRR multiband C2P sequences" (i.e., different from Siemens default behavior).
# "Slice cross-talk effects are minimized with interleaved slice series, hence this is the selected option in the default protocol."
# "The most convenient and practical way to determine slice timing is by referencing the timing information for each slice under "MosaicRefAcqTimes"
# ([ms], ordered corresponding to the slice numbering) in vendor private field of the DICOM header.
# This slice-by-slice timing information is generic (transparent to the multiband factor) for any protocol."
'0019,1029': ('MosaicRefAcqTimes', vr_parsers['FD']),
'0020,0010': ('StudyID', lambda x : int(vr_parsers['SH'](x))),
'0020,0011': ('SeriesNumber', vr_parsers['IS']),
'0020,0012': ('AcquisitionNumber', vr_parsers['IS']),
'0020,0013': ('InstanceNumber', vr_parsers['IS']),
'0020,4000': ('ImageComments', vr_parsers['LT']),
'0028,0030': ('PixelSpacing', vr_parsers['DS']),
'0029,1010': ('CSA', parse_Siemens_CSA), # Siemens private element
'0029,1020': ('CSA2', parse_Siemens_CSA2), # Siemens private element
}
Siemens_parsers = {
'0051,100E': ('slice_orientation', vr_parsers['SH']),
'0051,1011': ('acceleration_factor', vr_parsers['SH']),
'0051,1016': ('reconstruction', vr_parsers['SH']),
}
custom_parsers = {
'resolution': lambda header: np.r_[header['PixelSpacing'], header['SliceThickness']],
'FOV': lambda header: header['resolution'][:2] * header['AcquisitionMatrix'], # Bug: Is "PixelSpacing" also ordered as frequency/phase like "AcquisitionMatrix" does??
'orientation': lambda header: ('oblique-' if len(header['slice_orientation'])>3 else '') + {'Sag': 'sagittal', 'Cor': 'coronal', 'Tra': 'transversal'}[header['slice_orientation'][:3]],
'GRAPPA': lambda header: int(header['acceleration_factor'].split()[0][1:]) if 'acceleration_factor' in header and header['acceleration_factor'].startswith('p') else 0, # TODO: The text can be "p2" or "p2 s4". What does "s4" mean?
'PhasePartialFourier': lambda header: Siemens['PartialFourier'][header['CSA2']['PhasePartialFourier']],
'SlicePartialFourier': lambda header: Siemens['PartialFourier'][header['CSA2']['SlicePartialFourier']],
'MultiBand': lambda header: int(re.search('MB(\d+)', header['ImageComments']).group(1)) if 'MB' in header['ImageComments'] else None, # https://github.com/CMRR-C2P/MB/issues/223
'distortion_correction': lambda header: re.search('(ND|DIS2D|DIS3D)', header['reconstruction']).group(1),
'ReferenceAmplitude': lambda header: header['CSA2']['ReferenceAmplitude'],
'PATMode': lambda header: Siemens['PATMode'][header['CSA2']['PATMode']],
'RefLinesPE': lambda header: header['CSA2']['RefLinesPE'],
'RefScanMode': lambda header: Siemens['RefScanMode'][header['CSA2']['RefScanMode']],
'TotalScanTime': lambda header: header['CSA2']['TotalScanTimeSec'],
'timestamp': lambda header: datetime.combine(header['AcquisitionDate'], header['AcquisitionTime']).timestamp(),
}
def parse_SQ_data_element(fi):
'''
We only support Data Element with Explicit VR at present (Table 7.1-2).
We don't support nested Item at present (2018-09-26).
References
----------
[1] http://dicom.nema.org/Dicom/2013/output/chtml/part05/chapter_7.html
'''
while True:
# Parse an item
item_tag = '{0:04X},{1:04X}'.format(*np.frombuffer(fi.read(4), dtype=np.uint16, count=2))
if item_tag == 'FFFE,E000': # Item (Mark the start of an item)
item_length = int.from_bytes(fi.read(4), byteorder='little', signed=False)
if item_length == 4294967295: # 0xFFFFFFFF: Undefined Length
# Bruteforce scan the byte stream until we hit "FFFE,E00D"
# Bug: However, this may fail if the payload also contains item with undefined length!
while True:
# Until we hit "FFFE,E00D"
if fi.read(2) == b'\xfe\xff':
if fi.read(2) == b'\x0d\xe0':
if fi.read(4) == b'\x00\x00\x00\x00':
break
else:
fi.seek(item_length, 1)
elif item_tag == 'FFFE,E00D': # Item Delimitation Item (Mark the end of an item with undefined length)
item_length = int.from_bytes(fi.read(4), byteorder='little', signed=False)
assert(item_length == 0)
elif item_tag == 'FFFE,E0DD': # Sequence Delimitation Item (Mark the end of an SQ with undefined length)
item_length = int.from_bytes(fi.read(4), byteorder='little', signed=False)
assert(item_length == 0)
break
def parse_dicom_header(fname, search_for_tags=None, **kwargs):
'''
Parameters
----------
fname : str
search_for_tags : set
Search for specific dicom tags, and stop file scanning early if all tags of interest are seen.
e.g., search_for_tags={'0020,0011', '0020,0013'} will search for SeriesNumber and InstanceNumber.
This will save you some time, esp. when the remote file is accessed via slow data link.
**kwargs :
This is only for backward compatibility.
Notes
-----
"Implicit and Explicit VR Data Elements shall not coexist in a Data Set and Data Sets nested within it
(see Section 7.5). Whether a Data Set uses Explicit or Implicit VR, among other characteristics,
is determined by the negotiated Transfer Syntax (see Section 10 and Annex A)." [1]
References
----------
[1] http://dicom.nema.org/Dicom/2013/output/chtml/part05/chapter_7.html
[2] https://stackoverflow.com/questions/119684/parse-dicom-files-in-native-python
'''
elements = []
header = OrderedDict()
n_tags_seen = 0
tag_parsers.update(Siemens_parsers)
if fname.endswith('.gz'):
opener = gzip.open
else:
opener = open
with opener(fname, 'rb') as fi:
# The preamble
fi.seek(128) # The first 128 bytes are 0x00
assert(fi.read(4).decode(encoding) == 'DICM') # The next 4 bytes are "DICM"
# Data Elements
while True:
element = {}
group = fi.read(2)
if not group:
break
element['group'] = '{0:04X}'.format(int.from_bytes(group, byteorder='little', signed=False))
element['element'] = '{0:04X}'.format(int.from_bytes(fi.read(2), byteorder='little', signed=False))
tag = ','.join((element['group'], element['element']))
# print(tag, end='')
element['VR'] = fi.read(2).decode(encoding)
# print(':', element['VR'])
if element['VR'] in ['OB', 'OW', 'OF', 'SQ', 'UT', 'UN']:
fi.seek(2, 1)
element['length'] = int.from_bytes(fi.read(4), byteorder='little', signed=False)
else:
element['length'] = int.from_bytes(fi.read(2), byteorder='little', signed=False)
if element['length'] == 4294967295: # 0xFFFFFFFF: Undefined Length
if element['VR'] == 'SQ':
parse_SQ_data_element(fi)
else:
# print(element['VR'])
raise NotImplementedError('** Undefined Length')
elif tag in tag_parsers:
header[tag_parsers[tag][0]] = tag_parsers[tag][1](fi.read(element['length']))
else:
fi.seek(element['length'], 1)
elements.append(element)
if search_for_tags is not None and tag in search_for_tags:
n_tags_seen += 1
if n_tags_seen == len(search_for_tags):
break
# elements = pd.DataFrame(elements, columns=['group', 'element', 'VR', 'length'])
# Custom header fields
for field, parser in custom_parsers.items():
try:
header[field] = parser(header)
except KeyError:
pass
header['filename'] = path.realpath(fname)
return header
def sort_dicom_series(folder):
'''
Parameters
----------
folder : string
Path to the folder containing all the dicom files.
Returns
-------
studies : list of dicts
[{'0001': [file0, file1, ...], '0002': [files], ...}, {study1}, ...]
'''
exts = ['.IMA', '.dcm', '.dcm.gz']
files = sorted(itertools.chain.from_iterable(glob.glob(path.join(folder, '*'+ext)) for ext in exts))
headers = [parse_dicom_header(f, search_for_tags={'0020,0010', '0020,0011', '0020,0013'}) for f in files]
studies = []
for study_id in np.unique([header['StudyID'] for header in headers]):
study = OrderedDict()
for series_id in np.unique([header['SeriesNumber'] for header in headers if header['StudyID'] == study_id]):
series = sorted([(path.basename(header['filename']), header['InstanceNumber']) for header in headers \
if header['StudyID']==study_id and header['SeriesNumber']==series_id], key=lambda x: x[1])
study['{0:04d}'.format(series_id)] = [x[0] for x in series]
studies.append(study)
return studies
def parse_series_info(dicom_files, dicom_ext=None, parser=None, return_headers=False):
'''
Parameters
----------
dicom_files : list or str
A list of dicom files (e.g., as provided by sort_dicom_series), or
a folder that contains a single series (e.g., "../raw_fmri/func01"), or
a single dicom file.
'''
if dicom_ext is None:
dicom_ext = '.IMA'
if parser is None:
parser = parse_dicom_header
if isinstance(dicom_files, six.string_types): # A single file or a folder
if path.isdir(dicom_files):
# Assume there is only one series in the folder
dicom_files = sorted(glob.glob(path.join(dicom_files, '*'+dicom_ext)))
else:
dicom_files = [dicom_files]
# Parse dicom headers
headers = [parser(f) for f in dicom_files]
info = OrderedDict(headers[0])
assert(np.all(np.array([header['StudyID'] for header in headers])==info['StudyID']))
assert(np.all(np.array([header['SeriesNumber'] for header in headers])==info['SeriesNumber']))
# n_volumes, n_slices, TR
info['n_volumes'] = headers[-1]['AcquisitionNumber'] - headers[0]['AcquisitionNumber'] + 1
if 'n_slices' not in info:
info['n_slices'] = int(len(headers) / info['n_volumes'])
info['first'] = headers[0]['timestamp']
info['last'] = headers[-1]['timestamp']
info['TR'] = (info['last']-info['first'])/(info['n_volumes']-1) if info['n_volumes'] > 1 else None
# Slice timing
# if shift_time == 'CMRR':
# shift_time = 0
# if info['TR'] is not None and 'n_slices' in info and np.mod(info['n_slices'], 2)==0:
# slice_order = pares_slice_order(files)[0]
# if slice_order == 'interleaved':
# shift_time = -info['TR']/2
# elif shift_time is None:
# shift_time = 0
# info['first'] += shift_time
# info['last'] += shift_time
info['start'] = info['first']
info['stop'] = (info['last'] + info['TR']) if info['TR'] is not None else info['last']
info['time'] = np.array([header['timestamp'] for header in headers]) - info['timestamp']
if return_headers:
info['headers'] = headers
return info
if __name__ == '__main__':
print(parse_dicom_header('20180626_S18_EP2DBR_S07.MR.S18_APPROVED.0010.0001.2018.06.26.12.47.59.31250.120791562.IMA'))
print(parse_dicom_header('20180918_S18_FACEID_VIS_S01.MR.S18_APPROVED.0007.0001.2018.09.18.15.52.59.828125.140479.IMA'))
pass | [
"collections.OrderedDict",
"numpy.unique",
"os.path.join",
"os.path.realpath",
"numpy.array",
"os.path.isdir",
"os.path.basename",
"numpy.frombuffer",
"datetime.datetime.combine",
"re.search"
] | [((12433, 12446), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12444, 12446), False, 'from collections import OrderedDict\n'), ((14668, 14688), 'os.path.realpath', 'path.realpath', (['fname'], {}), '(fname)\n', (14681, 14688), False, 'from os import path\n'), ((15286, 15338), 'numpy.unique', 'np.unique', (["[header['StudyID'] for header in headers]"], {}), "([header['StudyID'] for header in headers])\n", (15295, 15338), True, 'import numpy as np\n'), ((16687, 16710), 'collections.OrderedDict', 'OrderedDict', (['headers[0]'], {}), '(headers[0])\n', (16698, 16710), False, 'from collections import OrderedDict\n'), ((2103, 2124), 're.search', 're.search', (['pattern', 'b'], {}), '(pattern, b)\n', (2112, 2124), False, 'import glob, re, itertools, inspect\n'), ((15356, 15369), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15367, 15369), False, 'from collections import OrderedDict\n'), ((15395, 15490), 'numpy.unique', 'np.unique', (["[header['SeriesNumber'] for header in headers if header['StudyID'] == study_id]"], {}), "([header['SeriesNumber'] for header in headers if header['StudyID'\n ] == study_id])\n", (15404, 15490), True, 'import numpy as np\n'), ((16381, 16404), 'os.path.isdir', 'path.isdir', (['dicom_files'], {}), '(dicom_files)\n', (16391, 16404), False, 'from os import path\n'), ((17896, 17949), 'numpy.array', 'np.array', (["[header['timestamp'] for header in headers]"], {}), "([header['timestamp'] for header in headers])\n", (17904, 17949), True, 'import numpy as np\n'), ((16729, 16780), 'numpy.array', 'np.array', (["[header['StudyID'] for header in headers]"], {}), "([header['StudyID'] for header in headers])\n", (16737, 16780), True, 'import numpy as np\n'), ((16818, 16874), 'numpy.array', 'np.array', (["[header['SeriesNumber'] for header in headers]"], {}), "([header['SeriesNumber'] for header in headers])\n", (16826, 16874), True, 'import numpy as np\n'), ((9135, 9190), 're.search', 're.search', (['"""(ND|DIS2D|DIS3D)"""', "header['reconstruction']"], {}), "('(ND|DIS2D|DIS3D)', header['reconstruction'])\n", (9144, 9190), False, 'import glob, re, itertools, inspect\n'), ((9613, 9683), 'datetime.datetime.combine', 'datetime.combine', (["header['AcquisitionDate']", "header['AcquisitionTime']"], {}), "(header['AcquisitionDate'], header['AcquisitionTime'])\n", (9629, 9683), False, 'from datetime import datetime\n'), ((1252, 1284), 'numpy.frombuffer', 'np.frombuffer', (['x'], {'dtype': 'np.int16'}), '(x, dtype=np.int16)\n', (1265, 1284), True, 'import numpy as np\n'), ((1335, 1368), 'numpy.frombuffer', 'np.frombuffer', (['x'], {'dtype': 'np.uint16'}), '(x, dtype=np.uint16)\n', (1348, 1368), True, 'import numpy as np\n'), ((1421, 1453), 'numpy.frombuffer', 'np.frombuffer', (['x'], {'dtype': 'np.int32'}), '(x, dtype=np.int32)\n', (1434, 1453), True, 'import numpy as np\n'), ((1503, 1536), 'numpy.frombuffer', 'np.frombuffer', (['x'], {'dtype': 'np.uint32'}), '(x, dtype=np.uint32)\n', (1516, 1536), True, 'import numpy as np\n'), ((1588, 1622), 'numpy.frombuffer', 'np.frombuffer', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (1601, 1622), True, 'import numpy as np\n'), ((1682, 1716), 'numpy.frombuffer', 'np.frombuffer', (['x'], {'dtype': 'np.float64'}), '(x, dtype=np.float64)\n', (1695, 1716), True, 'import numpy as np\n'), ((2898, 2950), 're.search', 're.search', (["b'lTotalScanTimeSec\\\\s+=\\\\s+(\\\\S+)\\\\s'", 'b'], {}), "(b'lTotalScanTimeSec\\\\s+=\\\\s+(\\\\S+)\\\\s', b)\n", (2907, 2950), False, 'import glob, re, itertools, inspect\n'), ((15093, 15121), 'os.path.join', 'path.join', (['folder', "('*' + ext)"], {}), "(folder, '*' + ext)\n", (15102, 15121), False, 'from os import path\n'), ((16509, 16548), 'os.path.join', 'path.join', (['dicom_files', "('*' + dicom_ext)"], {}), "(dicom_files, '*' + dicom_ext)\n", (16518, 16548), False, 'from os import path\n'), ((2367, 2433), 're.search', 're.search', (["b'sKSpace\\\\.ucPhasePartialFourier\\\\s+=\\\\s+(\\\\S+)\\\\s'", 'b'], {}), "(b'sKSpace\\\\.ucPhasePartialFourier\\\\s+=\\\\s+(\\\\S+)\\\\s', b)\n", (2376, 2433), False, 'import glob, re, itertools, inspect\n'), ((2488, 2554), 're.search', 're.search', (["b'sKSpace\\\\.ucSlicePartialFourier\\\\s+=\\\\s+(\\\\S+)\\\\s'", 'b'], {}), "(b'sKSpace\\\\.ucSlicePartialFourier\\\\s+=\\\\s+(\\\\S+)\\\\s', b)\n", (2497, 2554), False, 'import glob, re, itertools, inspect\n'), ((2688, 2737), 're.search', 're.search', (["b'sPat.ucPATMode\\\\s+=\\\\s+(\\\\S+)\\\\s'", 'b'], {}), "(b'sPat.ucPATMode\\\\s+=\\\\s+(\\\\S+)\\\\s', b)\n", (2697, 2737), False, 'import glob, re, itertools, inspect\n'), ((2785, 2840), 're.search', 're.search', (["b'sPat\\\\.ucRefScanMode\\\\s+=\\\\s+(\\\\S+)\\\\s'", 'b'], {}), "(b'sPat\\\\.ucRefScanMode\\\\s+=\\\\s+(\\\\S+)\\\\s', b)\n", (2794, 2840), False, 'import glob, re, itertools, inspect\n'), ((8945, 8991), 're.search', 're.search', (['"""MB(\\\\d+)"""', "header['ImageComments']"], {}), "('MB(\\\\d+)', header['ImageComments'])\n", (8954, 8991), False, 'import glob, re, itertools, inspect\n'), ((15517, 15550), 'os.path.basename', 'path.basename', (["header['filename']"], {}), "(header['filename'])\n", (15530, 15550), False, 'from os import path\n')] |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import droidlet.base_util
from droidlet.perception.craftassist import rotation, heuristic_perception
from droidlet.base_util import to_block_center, to_block_pos
from droidlet.shared_data_struct.craftassist_shared_utils import arrange
from droidlet.shared_data_structs import ErrorWithResponse
DEFAULT_NUM_STEPS = 5
def post_process_loc(loc, interpreter):
return to_block_pos(loc)
class ComputeLocations:
def __call__(
self,
interpreter,
speaker,
mems,
steps,
reldir,
repeat_num=1,
repeat_dir=None,
objects=[],
padding=(1, 1, 1),
):
repeat_num = max(repeat_num, len(objects))
player_mem = interpreter.memory.get_player_by_name(speaker)
get_locs_from_entity = interpreter.get_locs_from_entity
origin = compute_location_heuristic(player_mem, mems, steps, reldir, get_locs_from_entity)
if repeat_num > 1:
schematic = None if len(objects) == 0 else objects[0][0]
offsets = get_repeat_arrangement(
player_mem, repeat_num, repeat_dir, mems, schematic, padding
)
else:
offsets = [(0, 0, 0)]
origin = post_process_loc(origin, interpreter)
offsets = [post_process_loc(o, interpreter) for o in offsets]
return origin, offsets
# There will be at least one mem in mems
def compute_location_heuristic(player_mem, mems, steps, reldir, get_locs_from_entity):
loc = mems[0].get_pos()
if reldir is not None:
steps = steps or DEFAULT_NUM_STEPS
if reldir == "BETWEEN":
loc = (np.add(mems[0].get_pos(), mems[1].get_pos())) / 2
loc = (loc[0], loc[1], loc[2])
elif reldir == "INSIDE":
for i in range(len(mems)):
mem = mems[i]
# FIXME
locs = heuristic_perception.find_inside(mem, get_locs_from_entity)
if len(locs) > 0:
break
if len(locs) == 0:
raise ErrorWithResponse("I don't know how to go inside there")
else:
loc = locs[0]
elif reldir == "NEAR":
pass
elif reldir == "AROUND":
pass
else: # LEFT, RIGHT, etc...
reldir_vec = rotation.DIRECTIONS[reldir]
# this should be an inverse transform so we set inverted=True
yaw, _ = player_mem.get_yaw_pitch()
dir_vec = rotation.transform(reldir_vec, yaw, 0, inverted=True)
loc = steps * np.array(dir_vec) + to_block_center(loc)
elif steps is not None:
loc = to_block_center(loc) + [0, 0, steps]
return to_block_pos(loc)
def get_repeat_arrangement(
player_mem, repeat_num, repeat_dir, ref_mems, schematic=None, padding=(1, 1, 1)
):
shapeparams = {}
# default repeat dir is LEFT
if not repeat_dir:
repeat_dir = "LEFT"
# eventually fix this to allow number based on shape
shapeparams["N"] = repeat_num
if repeat_dir == "AROUND":
# TODO vertical "around"
shapeparams["orient"] = "xy"
shapeparams["extra_space"] = max(padding)
central_object = ref_mems[0]
bounds = central_object.get_bounds()
b = max(bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])
shapeparams["encircled_object_radius"] = b
offsets = arrange("circle", schematic, shapeparams)
else:
reldir_vec = rotation.DIRECTIONS[repeat_dir]
# this should be an inverse transform so we set inverted=True
yaw, _ = player_mem.get_yaw_pitch()
dir_vec = rotation.transform(reldir_vec, yaw, 0, inverted=True)
max_ind = np.argmax(dir_vec)
shapeparams["extra_space"] = padding[max_ind]
shapeparams["orient"] = dir_vec
offsets = arrange("line", schematic, shapeparams)
offsets = [tuple(to_block_pos(o)) for o in offsets]
return offsets
| [
"droidlet.shared_data_structs.ErrorWithResponse",
"droidlet.perception.craftassist.rotation.transform",
"numpy.argmax",
"droidlet.base_util.to_block_center",
"numpy.array",
"droidlet.base_util.to_block_pos",
"droidlet.perception.craftassist.heuristic_perception.find_inside",
"droidlet.shared_data_stru... | [((448, 465), 'droidlet.base_util.to_block_pos', 'to_block_pos', (['loc'], {}), '(loc)\n', (460, 465), False, 'from droidlet.base_util import to_block_center, to_block_pos\n'), ((2769, 2786), 'droidlet.base_util.to_block_pos', 'to_block_pos', (['loc'], {}), '(loc)\n', (2781, 2786), False, 'from droidlet.base_util import to_block_center, to_block_pos\n'), ((3489, 3530), 'droidlet.shared_data_struct.craftassist_shared_utils.arrange', 'arrange', (['"""circle"""', 'schematic', 'shapeparams'], {}), "('circle', schematic, shapeparams)\n", (3496, 3530), False, 'from droidlet.shared_data_struct.craftassist_shared_utils import arrange\n'), ((3726, 3779), 'droidlet.perception.craftassist.rotation.transform', 'rotation.transform', (['reldir_vec', 'yaw', '(0)'], {'inverted': '(True)'}), '(reldir_vec, yaw, 0, inverted=True)\n', (3744, 3779), False, 'from droidlet.perception.craftassist import rotation, heuristic_perception\n'), ((3798, 3816), 'numpy.argmax', 'np.argmax', (['dir_vec'], {}), '(dir_vec)\n', (3807, 3816), True, 'import numpy as np\n'), ((3929, 3968), 'droidlet.shared_data_struct.craftassist_shared_utils.arrange', 'arrange', (['"""line"""', 'schematic', 'shapeparams'], {}), "('line', schematic, shapeparams)\n", (3936, 3968), False, 'from droidlet.shared_data_struct.craftassist_shared_utils import arrange\n'), ((3990, 4005), 'droidlet.base_util.to_block_pos', 'to_block_pos', (['o'], {}), '(o)\n', (4002, 4005), False, 'from droidlet.base_util import to_block_center, to_block_pos\n'), ((2721, 2741), 'droidlet.base_util.to_block_center', 'to_block_center', (['loc'], {}), '(loc)\n', (2736, 2741), False, 'from droidlet.base_util import to_block_center, to_block_pos\n'), ((1948, 2007), 'droidlet.perception.craftassist.heuristic_perception.find_inside', 'heuristic_perception.find_inside', (['mem', 'get_locs_from_entity'], {}), '(mem, get_locs_from_entity)\n', (1980, 2007), False, 'from droidlet.perception.craftassist import rotation, heuristic_perception\n'), ((2121, 2177), 'droidlet.shared_data_structs.ErrorWithResponse', 'ErrorWithResponse', (['"""I don\'t know how to go inside there"""'], {}), '("I don\'t know how to go inside there")\n', (2138, 2177), False, 'from droidlet.shared_data_structs import ErrorWithResponse\n'), ((2558, 2611), 'droidlet.perception.craftassist.rotation.transform', 'rotation.transform', (['reldir_vec', 'yaw', '(0)'], {'inverted': '(True)'}), '(reldir_vec, yaw, 0, inverted=True)\n', (2576, 2611), False, 'from droidlet.perception.craftassist import rotation, heuristic_perception\n'), ((2658, 2678), 'droidlet.base_util.to_block_center', 'to_block_center', (['loc'], {}), '(loc)\n', (2673, 2678), False, 'from droidlet.base_util import to_block_center, to_block_pos\n'), ((2638, 2655), 'numpy.array', 'np.array', (['dir_vec'], {}), '(dir_vec)\n', (2646, 2655), True, 'import numpy as np\n')] |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for resegmentation.
Resegmentation is local segmentation targeted to specific points in an already
segmented volume. The results of resegmentation can be compared to the original
segments in order to perform object agglomeration.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import logging
import os
import numpy as np
from scipy import ndimage
from scipy.special import expit
from tensorflow import gfile
from . import storage
from .inference_utils import timer_counter
from ..utils import bounding_box
def get_starting_location(dists, exclusion_radius):
z, y, x = np.unravel_index(np.argmax(dists), tuple(dists.shape))
# Mark area around the new point as 'excluded' by clearing the distance
# map around it.
er = exclusion_radius
dists[max(z - er.z, 0):z + er.z + 1,
max(y - er.y, 0):y + er.y + 1,
max(x - er.x, 0):x + er.x + 1] = 0
return z, y, x
def get_target_path(request, point_num):
"""Computes the output path for a specific point.
Args:
request: ResegmentationRequest proto
point_num: index of the point of interest within the proto
Returns:
path to the output file where resegmentation results will be saved
"""
# Prepare the output directory.
output_dir = request.output_directory
id_a = request.points[point_num].id_a
id_b = request.points[point_num].id_b
if request.subdir_digits > 1:
m = hashlib.md5()
m.update(str(id_a))
m.update(str(id_b))
output_dir = os.path.join(output_dir, m.hexdigest()[:request.subdir_digits])
gfile.MakeDirs(output_dir)
# Terminate early if the output already exists.
dp = request.points[point_num].point
target_path = os.path.join(output_dir, '%d-%d_at_%d_%d_%d.npz' % (
id_a, id_b, dp.x, dp.y, dp.z))
if gfile.Exists(target_path):
logging.info('Output already exists: %s', target_path)
return
return target_path
def get_canvas(point, radius, runner):
"""Creates an FFN Canvas.
Args:
point: decision point as (z, y, x)
radius: radius around decision point as (z, y, x)
runner: inference Runner object
Returns:
inference Canvas object
"""
origin = np.array(point)
radius = np.array(radius)
corner = origin - radius
subvol_size = radius * 2 + 1
end = subvol_size + corner
if (np.any(corner < 0) or
runner.init_seg_volstore.size.z <= end[0] or
runner.init_seg_volstore.size.y <= end[1] or
runner.init_seg_volstore.size.x <= end[2]):
logging.error('Not enough context for: %d, %d, %d; corner: %r; end: %r',
point[2], point[1], point[0], corner, end)
return None, None
return runner.make_canvas(corner, subvol_size, keep_history=True)
def process_point(request, runner, point_num):
"""Runs resegmentation for a specific point.
Args:
request: ResegmentationRequest proto
runner: inference Runner object
point_num: index of the point of interest within the proto
"""
with timer_counter(runner.counters, 'resegmentation'):
target_path = get_target_path(request, point_num)
if target_path is None:
return
curr = request.points[point_num]
point = curr.point
point = point.z, point.y, point.x
radius = (request.radius.z, request.radius.y, request.radius.x)
canvas, alignment = get_canvas(point, radius, runner)
if canvas is None:
logging.warning('Could not get a canvas object.')
return
def unalign_prob(prob):
return alignment.align_and_crop(
canvas.corner_zyx,
prob,
alignment.corner,
alignment.size,
forward=False)
is_shift = (canvas.restrictor is not None and
np.any(canvas.restrictor.shift_mask))
is_endpoint = not curr.HasField('id_b')
seg_a = canvas.segmentation == canvas.local_id(curr.id_a)
size_a = np.sum(seg_a)
if is_endpoint:
size_b = -1
todo = [seg_a]
else:
seg_b = canvas.segmentation == canvas.local_id(curr.id_b)
size_b = np.sum(seg_b)
todo = [seg_a, seg_b]
if size_a == 0 or size_b == 0:
logging.warning('Segments (%d, %d) local ids (%d, %d) not found in input '
'at %r. Current values are: %r.',
curr.id_a, curr.id_b, canvas.local_id(curr.id_a),
canvas.local_id(curr.id_b), point,
np.unique(canvas.segmentation))
canvas._deregister_client() # pylint:disable=protected-access
return
if is_endpoint:
canvas.seg_prob[:] = 0.0
canvas.segmentation[:] = 0
else:
# Clear the two segments in question, but keep everything else as
# context.
canvas.segmentation[seg_a] = 0
canvas.segmentation[seg_b] = 0
canvas.seg_prob[seg_a] = 0.0
canvas.seg_prob[seg_b] = 0.0
transformed_point = alignment.transform(np.array([point]).T)
tz, ty, tx = transformed_point[:, 0]
oz, oy, ox = canvas.corner_zyx
tz -= oz
ty -= oy
tx -= ox
# First index enumerates the original segments. Second index,
# when present, enumerates segmentation attempts.
raw_probs = []
probs = []
deletes = []
histories = []
start_points = [[], []]
if request.HasField('analysis_radius'):
ar = request.analysis_radius
analysis_box = bounding_box.BoundingBox(
start=(radius[2] - ar.x,
radius[1] - ar.y,
radius[0] - ar.z),
size=(2 * ar.x + 1, 2 * ar.y + 1, 2 * ar.z + 1))
else:
analysis_box = bounding_box.BoundingBox(
(0, 0, 0), canvas.image.shape[::-1])
options = request.inference.inference_options
for i, seg in enumerate(todo):
logging.info('processing object %d', i)
with timer_counter(canvas.counters, 'edt'):
ps = runner.init_seg_volstore.info.pixelsize
dists = ndimage.distance_transform_edt(seg, sampling=(ps.z, ps.y, ps.x))
# Do not seed where not enough context is available.
dists[:canvas.margin[0], :, :] = 0
dists[:, :canvas.margin[1], :] = 0
dists[:, :, :canvas.margin[2]] = 0
dists[-canvas.margin[0]:, :, :] = 0
dists[:, -canvas.margin[1]:, :] = 0
dists[:, :, -canvas.margin[2]:] = 0
canvas.log_info('EDT computation done')
# Optionally exclude a region around the decision point from seeding.
if request.HasField('init_exclusion_radius'):
ier = request.init_exclusion_radius
dists[tz - ier.z:tz + ier.z + 1,
ty - ier.y:ty + ier.y + 1,
tx - ier.x:tx + ier.x + 1] = 0
seg_prob = None
recovered = False
for _ in range(request.max_retry_iters):
z0, y0, x0 = get_starting_location(dists, request.exclusion_radius)
if not seg[z0, y0, x0]:
continue
canvas.log_info('.. starting segmentation at (xyz): %d %d %d',
x0, y0, z0)
canvas.segment_at((z0, y0, x0))
seg_prob = expit(canvas.seed)
start_points[i].append((x0, y0, z0))
# Check if we recovered an acceptable fraction of the initial segment
# in which the seed was located.
recovered = True
crop_seg = seg[analysis_box.to_slice()]
crop_prob = seg_prob[analysis_box.to_slice()]
start_size = np.sum(crop_seg)
segmented_voxels = np.sum((crop_prob >= options.segment_threshold) &
crop_seg)
if request.segment_recovery_fraction > 0:
if segmented_voxels / start_size >= request.segment_recovery_fraction:
break
elif segmented_voxels >= options.min_segment_size:
break
recovered = False
# Store resegmentation results.
if seg_prob is not None:
qprob = storage.quantize_probability(seg_prob)
raw_probs.append(qprob)
probs.append(unalign_prob(qprob))
deletes.append(np.array(canvas.history_deleted))
histories.append(np.array(canvas.history))
if request.terminate_early:
if not recovered:
break
if (request.segment_recovery_fraction > 0 and i == 0 and
len(todo) > 1):
seg2 = todo[1]
crop_seg = seg2[analysis_box.to_slice()]
size2 = np.sum(crop_seg)
segmented_voxels2 = np.sum(
(crop_prob >= options.segment_threshold) & crop_seg)
if segmented_voxels2 / size2 < request.segment_recovery_fraction:
break
canvas.log_info('saving results to %s', target_path)
with storage.atomic_file(target_path) as fd:
np.savez_compressed(fd,
probs=np.array(probs),
raw_probs=np.array(raw_probs),
deletes=np.array(deletes),
histories=np.array(histories),
start_points=start_points,
request=request.SerializeToString(),
counters=canvas.counters.dumps(),
corner_zyx=canvas.corner_zyx,
is_shift=is_shift)
canvas.log_info('.. save complete')
# Cannot `del canvas` here in Python 2 -- deleting an object referenced
# in a nested scope is a syntax error.
canvas._deregister_client() # pylint:disable=protected-access
def process(request, runner):
num_points = len(request.points)
for i in range(num_points):
logging.info('processing %d/%d', i, num_points)
process_point(request, runner, i)
| [
"scipy.ndimage.distance_transform_edt",
"tensorflow.gfile.Exists",
"hashlib.md5",
"numpy.unique",
"os.path.join",
"numpy.argmax",
"numpy.any",
"logging.warning",
"scipy.special.expit",
"numpy.array",
"numpy.sum",
"tensorflow.gfile.MakeDirs",
"logging.info",
"logging.error"
] | [((2284, 2310), 'tensorflow.gfile.MakeDirs', 'gfile.MakeDirs', (['output_dir'], {}), '(output_dir)\n', (2298, 2310), False, 'from tensorflow import gfile\n'), ((2417, 2503), 'os.path.join', 'os.path.join', (['output_dir', "('%d-%d_at_%d_%d_%d.npz' % (id_a, id_b, dp.x, dp.y, dp.z))"], {}), "(output_dir, '%d-%d_at_%d_%d_%d.npz' % (id_a, id_b, dp.x, dp.y,\n dp.z))\n", (2429, 2503), False, 'import os\n'), ((2512, 2537), 'tensorflow.gfile.Exists', 'gfile.Exists', (['target_path'], {}), '(target_path)\n', (2524, 2537), False, 'from tensorflow import gfile\n'), ((2896, 2911), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (2904, 2911), True, 'import numpy as np\n'), ((2923, 2939), 'numpy.array', 'np.array', (['radius'], {}), '(radius)\n', (2931, 2939), True, 'import numpy as np\n'), ((1353, 1369), 'numpy.argmax', 'np.argmax', (['dists'], {}), '(dists)\n', (1362, 1369), True, 'import numpy as np\n'), ((2139, 2152), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (2150, 2152), False, 'import hashlib\n'), ((2543, 2597), 'logging.info', 'logging.info', (['"""Output already exists: %s"""', 'target_path'], {}), "('Output already exists: %s', target_path)\n", (2555, 2597), False, 'import logging\n'), ((3034, 3052), 'numpy.any', 'np.any', (['(corner < 0)'], {}), '(corner < 0)\n', (3040, 3052), True, 'import numpy as np\n'), ((3212, 3331), 'logging.error', 'logging.error', (['"""Not enough context for: %d, %d, %d; corner: %r; end: %r"""', 'point[2]', 'point[1]', 'point[0]', 'corner', 'end'], {}), "('Not enough context for: %d, %d, %d; corner: %r; end: %r',\n point[2], point[1], point[0], corner, end)\n", (3225, 3331), False, 'import logging\n'), ((4574, 4587), 'numpy.sum', 'np.sum', (['seg_a'], {}), '(seg_a)\n', (4580, 4587), True, 'import numpy as np\n'), ((10153, 10200), 'logging.info', 'logging.info', (['"""processing %d/%d"""', 'i', 'num_points'], {}), "('processing %d/%d', i, num_points)\n", (10165, 10200), False, 'import logging\n'), ((4094, 4143), 'logging.warning', 'logging.warning', (['"""Could not get a canvas object."""'], {}), "('Could not get a canvas object.')\n", (4109, 4143), False, 'import logging\n'), ((4416, 4452), 'numpy.any', 'np.any', (['canvas.restrictor.shift_mask'], {}), '(canvas.restrictor.shift_mask)\n', (4422, 4452), True, 'import numpy as np\n'), ((4737, 4750), 'numpy.sum', 'np.sum', (['seg_b'], {}), '(seg_b)\n', (4743, 4750), True, 'import numpy as np\n'), ((6434, 6473), 'logging.info', 'logging.info', (['"""processing object %d"""', 'i'], {}), "('processing object %d', i)\n", (6446, 6473), False, 'import logging\n'), ((5104, 5134), 'numpy.unique', 'np.unique', (['canvas.segmentation'], {}), '(canvas.segmentation)\n', (5113, 5134), True, 'import numpy as np\n'), ((5591, 5608), 'numpy.array', 'np.array', (['[point]'], {}), '([point])\n', (5599, 5608), True, 'import numpy as np\n'), ((6594, 6658), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['seg'], {'sampling': '(ps.z, ps.y, ps.x)'}), '(seg, sampling=(ps.z, ps.y, ps.x))\n', (6624, 6658), False, 'from scipy import ndimage\n'), ((7718, 7736), 'scipy.special.expit', 'expit', (['canvas.seed'], {}), '(canvas.seed)\n', (7723, 7736), False, 'from scipy.special import expit\n'), ((8051, 8067), 'numpy.sum', 'np.sum', (['crop_seg'], {}), '(crop_seg)\n', (8057, 8067), True, 'import numpy as np\n'), ((8095, 8154), 'numpy.sum', 'np.sum', (['((crop_prob >= options.segment_threshold) & crop_seg)'], {}), '((crop_prob >= options.segment_threshold) & crop_seg)\n', (8101, 8154), True, 'import numpy as np\n'), ((9389, 9404), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (9397, 9404), True, 'import numpy as np\n'), ((9440, 9459), 'numpy.array', 'np.array', (['raw_probs'], {}), '(raw_probs)\n', (9448, 9459), True, 'import numpy as np\n'), ((9493, 9510), 'numpy.array', 'np.array', (['deletes'], {}), '(deletes)\n', (9501, 9510), True, 'import numpy as np\n'), ((9546, 9565), 'numpy.array', 'np.array', (['histories'], {}), '(histories)\n', (9554, 9565), True, 'import numpy as np\n'), ((8662, 8694), 'numpy.array', 'np.array', (['canvas.history_deleted'], {}), '(canvas.history_deleted)\n', (8670, 8694), True, 'import numpy as np\n'), ((8721, 8745), 'numpy.array', 'np.array', (['canvas.history'], {}), '(canvas.history)\n', (8729, 8745), True, 'import numpy as np\n'), ((9011, 9027), 'numpy.sum', 'np.sum', (['crop_seg'], {}), '(crop_seg)\n', (9017, 9027), True, 'import numpy as np\n'), ((9058, 9117), 'numpy.sum', 'np.sum', (['((crop_prob >= options.segment_threshold) & crop_seg)'], {}), '((crop_prob >= options.segment_threshold) & crop_seg)\n', (9064, 9117), True, 'import numpy as np\n')] |
from model import MusicVAE
from loader import load_noteseqs
import numpy as np
import tensorflow as tf
import argparse
tf.reset_default_graph()
ap = argparse.ArgumentParser()
ap.add_argument("-bs", "--batch_size", default=32, type=int)
ap.add_argument("-s", "--save_path", default="vae/", type=str)
ap.add_argument("-e", "--epochs", default=100, type=int)
ap.add_argument("--train_set", default="data/Jsbtr.pkl data/Nmdtr.pkl", type=str)
ap.add_argument("--test_set", default="data/Jsbte.pkl data/Nmdte.pkl", type=str)
ap.add_argument("--x_depth", default="89 33 33", type=str)
ap.add_argument("--enc_rnn", default="hyperlstm", type=str)
ap.add_argument("--enc_rnn_dim", default=512, type=int)
ap.add_argument("--enc_hyper_unit", default=256, type=int)
ap.add_argument("--enc_dropout", default=0.25, type=float)
ap.add_argument("--enc_rnn_layer", default=1, type=int)
ap.add_argument("--dec_rnn", default="hyperlstm", type=str)
ap.add_argument("--dec_rnn_dim", default=512, type=int)
ap.add_argument("--dec_hyper_unit", default=256, type=int)
ap.add_argument("--dec_dropout", default=0.25, type=float)
ap.add_argument("--dec_rnn_layer", default=1, type=int)
ap.add_argument("--attention", default=128, type=int)
ap.add_argument("--cont_dim", default=100, type=int)
ap.add_argument("--cat_dim", default=2, type=int)
ap.add_argument("--style_embed_dim", default=100, type=int)
ap.add_argument("--mu_force", default=2.0, type=float)
ap.add_argument("--gumbel", default=0.67, type=float)
ap.add_argument("--kl_reg", default=1.0, type=float)
ap.add_argument("--kl_anneal", default=1000, type=int)
ap.add_argument("--restore_path", default=None, type=str)
args = ap.parse_args()
x_depth = args.x_depth.split()
x_depth = [int(i) for i in x_depth]
train_set = args.train_set.split()
test_set = args.test_set.split()
train_graph = tf.Graph()
val_graph = tf.Graph()
with train_graph.as_default():
t_it, t_x, t_s, t_l = load_noteseqs(train_set, x_depth,
batch_size=args.batch_size, augment=True).get_iterator()
m = MusicVAE(x_depth=x_depth,
enc_rnn_dim=args.enc_rnn_dim, enc_hyper_unit=args.enc_hyper_unit, enc_dropout=args.enc_dropout,
dec_rnn_dim=args.dec_rnn_dim, dec_hyper_unit=args.dec_hyper_unit, dec_dropout=args.dec_dropout,
enc_rnn_layer=args.enc_rnn_layer, dec_rnn_layer=args.dec_rnn_layer,
enc_rnn=args.enc_rnn, dec_rnn=args.dec_rnn,
attention=args.attention,
cont_dim=args.cont_dim, cat_dim=args.cat_dim, mu_force=args.mu_force,
gumbel=args.gumbel, style_embed_dim=args.style_embed_dim,
kl_reg=args.kl_reg,
training=True, beta_anneal_steps=args.kl_anneal)
m.build(t_x, t_s, t_l, None)
with val_graph.as_default():
v_it, v_x, v_s, v_l = load_noteseqs(test_set, x_depth,
batch_size=20).get_iterator()
n = MusicVAE(x_depth=x_depth,
enc_rnn_dim=args.enc_rnn_dim, enc_hyper_unit=args.enc_hyper_unit, enc_dropout=0.0,
dec_rnn_dim=args.dec_rnn_dim, dec_hyper_unit=args.dec_hyper_unit, dec_dropout=0.0,
enc_rnn_layer=args.enc_rnn_layer, dec_rnn_layer=args.dec_rnn_layer,
enc_rnn=args.enc_rnn, dec_rnn=args.dec_rnn,
attention=args.attention,
cont_dim=args.cont_dim, cat_dim=args.cat_dim, mu_force=args.mu_force,
gumbel=args.gumbel, style_embed_dim=args.style_embed_dim,
kl_reg=args.kl_reg,
training=False, beta_anneal_steps=args.kl_anneal)
n.build(v_x, v_s, v_l, None)
tf_config = tf.ConfigProto()
tf_config.allow_soft_placement = True
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config, graph=train_graph)
ss = tf.Session(config=tf_config, graph=val_graph)
if args.restore_path:
print("[INFO] Restoring from checkpoint {}".format(args.restore_path))
m.saver.restore(sess, args.restore_path)
else:
sess.run(m.init)
step = 0
tw = tf.summary.FileWriter(args.save_path+"train", sess.graph)
vw = tf.summary.FileWriter(args.save_path+"val", ss.graph)
print("[INFO] Start training...")
for epoch in range(args.epochs):
sess.run(t_it.initializer)
train_loss = []
train_kl = []
while True:
try:
if (step+1)%20 == 0 or step == 0:
_, tmp_loss, tmp_kl, step, summ = sess.run([m.op, m.recon_loss, m.kl_loss, m.step, m.summ_op])
tw.add_summary(summ, step)
else:
_, tmp_loss, tmp_kl, step = sess.run([m.op, m.recon_loss, m.kl_loss, m.step])
train_loss.append(tmp_loss)
train_kl.append(tmp_kl)
except tf.errors.OutOfRangeError:
break
m.saver.save(sess, args.save_path + "vae-epoch{}".format(epoch+1))
n.saver.restore(ss, args.save_path + "vae-epoch{}".format(epoch+1))
val_loss = []
val_kl = []
ss.run(v_it.initializer)
while True:
try:
tmp_loss, tmp_kl, summ = ss.run([n.recon_loss, n.kl_loss, n.summ_op])
val_loss.append(tmp_loss)
val_kl.append(tmp_kl)
except tf.errors.OutOfRangeError:
vw.add_summary(summ, step)
break
train_loss = np.mean(train_loss)
train_kl = np.mean(train_kl)
val_loss = np.mean(val_loss)
val_kl = np.mean(val_kl)
print("{} Train Loss: {:.4f} Train KL: {:.2f} Val Loss: {:.4f} Val KL: {:.2f}".format(epoch+1, train_loss, train_kl, val_loss, val_kl))
| [
"tensorflow.Graph",
"numpy.mean",
"loader.load_noteseqs",
"tensorflow.reset_default_graph",
"argparse.ArgumentParser",
"tensorflow.Session",
"model.MusicVAE",
"tensorflow.ConfigProto",
"tensorflow.summary.FileWriter"
] | [((120, 144), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (142, 144), True, 'import tensorflow as tf\n'), ((151, 176), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (174, 176), False, 'import argparse\n'), ((1834, 1844), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1842, 1844), True, 'import tensorflow as tf\n'), ((1857, 1867), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1865, 1867), True, 'import tensorflow as tf\n'), ((3707, 3723), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3721, 3723), True, 'import tensorflow as tf\n'), ((3812, 3859), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config', 'graph': 'train_graph'}), '(config=tf_config, graph=train_graph)\n', (3822, 3859), True, 'import tensorflow as tf\n'), ((3865, 3910), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config', 'graph': 'val_graph'}), '(config=tf_config, graph=val_graph)\n', (3875, 3910), True, 'import tensorflow as tf\n'), ((4100, 4159), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(args.save_path + 'train')", 'sess.graph'], {}), "(args.save_path + 'train', sess.graph)\n", (4121, 4159), True, 'import tensorflow as tf\n'), ((4163, 4218), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(args.save_path + 'val')", 'ss.graph'], {}), "(args.save_path + 'val', ss.graph)\n", (4184, 4218), True, 'import tensorflow as tf\n'), ((2067, 2653), 'model.MusicVAE', 'MusicVAE', ([], {'x_depth': 'x_depth', 'enc_rnn_dim': 'args.enc_rnn_dim', 'enc_hyper_unit': 'args.enc_hyper_unit', 'enc_dropout': 'args.enc_dropout', 'dec_rnn_dim': 'args.dec_rnn_dim', 'dec_hyper_unit': 'args.dec_hyper_unit', 'dec_dropout': 'args.dec_dropout', 'enc_rnn_layer': 'args.enc_rnn_layer', 'dec_rnn_layer': 'args.dec_rnn_layer', 'enc_rnn': 'args.enc_rnn', 'dec_rnn': 'args.dec_rnn', 'attention': 'args.attention', 'cont_dim': 'args.cont_dim', 'cat_dim': 'args.cat_dim', 'mu_force': 'args.mu_force', 'gumbel': 'args.gumbel', 'style_embed_dim': 'args.style_embed_dim', 'kl_reg': 'args.kl_reg', 'training': '(True)', 'beta_anneal_steps': 'args.kl_anneal'}), '(x_depth=x_depth, enc_rnn_dim=args.enc_rnn_dim, enc_hyper_unit=args\n .enc_hyper_unit, enc_dropout=args.enc_dropout, dec_rnn_dim=args.\n dec_rnn_dim, dec_hyper_unit=args.dec_hyper_unit, dec_dropout=args.\n dec_dropout, enc_rnn_layer=args.enc_rnn_layer, dec_rnn_layer=args.\n dec_rnn_layer, enc_rnn=args.enc_rnn, dec_rnn=args.dec_rnn, attention=\n args.attention, cont_dim=args.cont_dim, cat_dim=args.cat_dim, mu_force=\n args.mu_force, gumbel=args.gumbel, style_embed_dim=args.style_embed_dim,\n kl_reg=args.kl_reg, training=True, beta_anneal_steps=args.kl_anneal)\n', (2075, 2653), False, 'from model import MusicVAE\n'), ((2979, 3537), 'model.MusicVAE', 'MusicVAE', ([], {'x_depth': 'x_depth', 'enc_rnn_dim': 'args.enc_rnn_dim', 'enc_hyper_unit': 'args.enc_hyper_unit', 'enc_dropout': '(0.0)', 'dec_rnn_dim': 'args.dec_rnn_dim', 'dec_hyper_unit': 'args.dec_hyper_unit', 'dec_dropout': '(0.0)', 'enc_rnn_layer': 'args.enc_rnn_layer', 'dec_rnn_layer': 'args.dec_rnn_layer', 'enc_rnn': 'args.enc_rnn', 'dec_rnn': 'args.dec_rnn', 'attention': 'args.attention', 'cont_dim': 'args.cont_dim', 'cat_dim': 'args.cat_dim', 'mu_force': 'args.mu_force', 'gumbel': 'args.gumbel', 'style_embed_dim': 'args.style_embed_dim', 'kl_reg': 'args.kl_reg', 'training': '(False)', 'beta_anneal_steps': 'args.kl_anneal'}), '(x_depth=x_depth, enc_rnn_dim=args.enc_rnn_dim, enc_hyper_unit=args\n .enc_hyper_unit, enc_dropout=0.0, dec_rnn_dim=args.dec_rnn_dim,\n dec_hyper_unit=args.dec_hyper_unit, dec_dropout=0.0, enc_rnn_layer=args\n .enc_rnn_layer, dec_rnn_layer=args.dec_rnn_layer, enc_rnn=args.enc_rnn,\n dec_rnn=args.dec_rnn, attention=args.attention, cont_dim=args.cont_dim,\n cat_dim=args.cat_dim, mu_force=args.mu_force, gumbel=args.gumbel,\n style_embed_dim=args.style_embed_dim, kl_reg=args.kl_reg, training=\n False, beta_anneal_steps=args.kl_anneal)\n', (2987, 3537), False, 'from model import MusicVAE\n'), ((5423, 5442), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (5430, 5442), True, 'import numpy as np\n'), ((5458, 5475), 'numpy.mean', 'np.mean', (['train_kl'], {}), '(train_kl)\n', (5465, 5475), True, 'import numpy as np\n'), ((5491, 5508), 'numpy.mean', 'np.mean', (['val_loss'], {}), '(val_loss)\n', (5498, 5508), True, 'import numpy as np\n'), ((5522, 5537), 'numpy.mean', 'np.mean', (['val_kl'], {}), '(val_kl)\n', (5529, 5537), True, 'import numpy as np\n'), ((1927, 2002), 'loader.load_noteseqs', 'load_noteseqs', (['train_set', 'x_depth'], {'batch_size': 'args.batch_size', 'augment': '(True)'}), '(train_set, x_depth, batch_size=args.batch_size, augment=True)\n', (1940, 2002), False, 'from loader import load_noteseqs\n'), ((2867, 2914), 'loader.load_noteseqs', 'load_noteseqs', (['test_set', 'x_depth'], {'batch_size': '(20)'}), '(test_set, x_depth, batch_size=20)\n', (2880, 2914), False, 'from loader import load_noteseqs\n')] |
import numpy as np
import cv2
import serial
import pygame
from pygame.locals import *
import socket
import time
import os
server_socket = socket.socket()
server_socket.bind(('192.168.1.106', 12349))
server_socket.listen(0)
# accept a single connection
connection = server_socket.accept()[0].makefile('rb')
#self.ser = serial.Serial('/dev/tty.usbmodem1421', 115200, timeout=1)
send_inst = True
# create labels
k = np.zeros((4, 4), 'float')
for i in range(4):
k[i, i] = 1
temp_label = np.zeros((1, 4), 'float')
pygame.init()
saved_frame = 0
total_frame = 0
# collect images for training
print ('Start collecting images...')
e1 = cv2.getTickCount()
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 4), 'float')
stream_bytes = b' '
frame = 1
while send_inst:
stream_bytes += connection.read(1024)
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
roi = image[120:240, :]
cv2.imwrite('training_img/frame{:>05}.jpg'.format(frame), image)
#cv2.imshow('image', image)
#print(image.shape)
temp_array = roi.reshape(1, 38400).astype(np.float32)
#temp_array = roi.reshape(1, 76800).astype(np.float32)
frame += 1
total_frame += 1
time.sleep(0.5)
a=input("enter: ")
if a=='w':
print("Forward")
saved_frame += 1
# print(roi.shape)
# print(temp_array.shape)
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, k[2]))
#self.ser.write(chr(1))
elif a=='s':
print("Reverse")
saved_frame += 1
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, k[3]))
#self.ser.write(chr(2))
elif a=='d':
print("Right")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, k[1]))
saved_frame += 1
#self.ser.write(chr(3))
elif a=='a':
print("Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, k[0]))
saved_frame += 1
#self.ser.write(chr(4))
elif a=='wd':
print("Forward Right")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, k[1]))
saved_frame += 1
#self.ser.write(chr(6))
elif a=='wa':
print("Forward Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, k[0]))
saved_frame += 1
#self.ser.write(chr(7))
elif a=='x':
print ('exit')
send_inst = False
#ser.write(chr(0))
break
#elif event.type == pygame.KEYUP:
#self.ser.write(chr(0))
# print("extra")
train = image_array[1:, :]
train_labels = label_array[1:, :]
#print(train)
#print(train_labels)
# save training data as a numpy file
file_name = str(int(time.time()))
directory = "training_data"
if not os.path.exists(directory):
os.makedirs(directory)
try:
#np.save(directory + '/' + file_name + '.jpg')
#cv2.imwrite(directory+'/'+file_name+ '.jpg',train)
np.savez(directory + '/' + file_name + '.npz', train=train, train_labels=train_labels)
except IOError as e:
print(e)
e2 = cv2.getTickCount()
time0 = (e2 - e1) / cv2.getTickFrequency()
print ('Streaming duration:', time0)
#print(train.shape)
#print(train_labels.shape)
#print ('Total frame:', total_frame)
print ('Saved frame:', saved_frame)
#print ('Dropped frame', total_frame - saved_frame)
# connection.close()
server_socket.close()
| [
"os.path.exists",
"numpy.savez",
"pygame.init",
"socket.socket",
"os.makedirs",
"time.sleep",
"numpy.zeros",
"cv2.getTickCount",
"numpy.vstack",
"numpy.fromstring",
"time.time",
"cv2.getTickFrequency"
] | [((148, 163), 'socket.socket', 'socket.socket', ([], {}), '()\n', (161, 163), False, 'import socket\n'), ((433, 458), 'numpy.zeros', 'np.zeros', (['(4, 4)', '"""float"""'], {}), "((4, 4), 'float')\n", (441, 458), True, 'import numpy as np\n'), ((510, 535), 'numpy.zeros', 'np.zeros', (['(1, 4)', '"""float"""'], {}), "((1, 4), 'float')\n", (518, 535), True, 'import numpy as np\n'), ((537, 550), 'pygame.init', 'pygame.init', ([], {}), '()\n', (548, 550), False, 'import pygame\n'), ((660, 678), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (676, 678), False, 'import cv2\n'), ((694, 714), 'numpy.zeros', 'np.zeros', (['(1, 38400)'], {}), '((1, 38400))\n', (702, 714), True, 'import numpy as np\n'), ((730, 755), 'numpy.zeros', 'np.zeros', (['(1, 4)', '"""float"""'], {}), "((1, 4), 'float')\n", (738, 755), True, 'import numpy as np\n'), ((1517, 1532), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1527, 1532), False, 'import time\n'), ((4087, 4105), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (4103, 4105), False, 'import cv2\n'), ((1104, 1138), 'numpy.fromstring', 'np.fromstring', (['jpg'], {'dtype': 'np.uint8'}), '(jpg, dtype=np.uint8)\n', (1117, 1138), True, 'import numpy as np\n'), ((1761, 1797), 'numpy.vstack', 'np.vstack', (['(image_array, temp_array)'], {}), '((image_array, temp_array))\n', (1770, 1797), True, 'import numpy as np\n'), ((1825, 1855), 'numpy.vstack', 'np.vstack', (['(label_array, k[2])'], {}), '((label_array, k[2]))\n', (1834, 1855), True, 'import numpy as np\n'), ((3715, 3740), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (3729, 3740), False, 'import os\n'), ((3755, 3777), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (3766, 3777), False, 'import os\n'), ((3930, 4021), 'numpy.savez', 'np.savez', (["(directory + '/' + file_name + '.npz')"], {'train': 'train', 'train_labels': 'train_labels'}), "(directory + '/' + file_name + '.npz', train=train, train_labels=\n train_labels)\n", (3938, 4021), True, 'import numpy as np\n'), ((4135, 4157), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (4155, 4157), False, 'import cv2\n'), ((2008, 2044), 'numpy.vstack', 'np.vstack', (['(image_array, temp_array)'], {}), '((image_array, temp_array))\n', (2017, 2044), True, 'import numpy as np\n'), ((2072, 2102), 'numpy.vstack', 'np.vstack', (['(label_array, k[3])'], {}), '((label_array, k[3]))\n', (2081, 2102), True, 'import numpy as np\n'), ((3648, 3659), 'time.time', 'time.time', ([], {}), '()\n', (3657, 3659), False, 'import time\n'), ((2255, 2291), 'numpy.vstack', 'np.vstack', (['(image_array, temp_array)'], {}), '((image_array, temp_array))\n', (2264, 2291), True, 'import numpy as np\n'), ((2319, 2349), 'numpy.vstack', 'np.vstack', (['(label_array, k[1])'], {}), '((label_array, k[1]))\n', (2328, 2349), True, 'import numpy as np\n'), ((2503, 2539), 'numpy.vstack', 'np.vstack', (['(image_array, temp_array)'], {}), '((image_array, temp_array))\n', (2512, 2539), True, 'import numpy as np\n'), ((2567, 2597), 'numpy.vstack', 'np.vstack', (['(label_array, k[0])'], {}), '((label_array, k[0]))\n', (2576, 2597), True, 'import numpy as np\n'), ((2761, 2797), 'numpy.vstack', 'np.vstack', (['(image_array, temp_array)'], {}), '((image_array, temp_array))\n', (2770, 2797), True, 'import numpy as np\n'), ((2825, 2855), 'numpy.vstack', 'np.vstack', (['(label_array, k[1])'], {}), '((label_array, k[1]))\n', (2834, 2855), True, 'import numpy as np\n'), ((3010, 3046), 'numpy.vstack', 'np.vstack', (['(image_array, temp_array)'], {}), '((image_array, temp_array))\n', (3019, 3046), True, 'import numpy as np\n'), ((3074, 3104), 'numpy.vstack', 'np.vstack', (['(label_array, k[0])'], {}), '((label_array, k[0]))\n', (3083, 3104), True, 'import numpy as np\n')] |
# Generates data from adult ICUs including demographics, lab results and vital measurements
import argparse
import os
import random
import numpy as np
import pandas as pd
import psycopg2
def replace(group):
""" Replace missing values in measurements using mean imputation
takes in a pandas group, and replaces the null value with the mean of the none null
values of the same group
"""
mask = group.isnull()
group[mask] = group[~mask].mean()
return group
def main(sqluser, sqlpass):
random.seed(22891)
# Output directory to generate the files
mimicdir = "./data/mimic/"
if not os.path.exists(mimicdir):
os.mkdir(mimicdir)
# create a database connection and connect to local postgres version of mimic
dbname = "mimic"
schema_name = "mimiciii"
con = psycopg2.connect(dbname=dbname, user=sqluser, host="127.0.0.1", password=sqlpass)
cur = con.cursor()
cur.execute("SET search_path to " + schema_name)
# ========get the icu details
# this query extracts the following:
# Unique ids for the admission, patient and icu stay
# Patient gender
# diagnosis
# age
# ethnicity
# admission type
# first hospital stay
# first icu stay?
# mortality within a week
denquery = """
--ie is the icustays table
--adm is the admissions table
SELECT ie.subject_id, ie.hadm_id, ie.icustay_id
, pat.gender
, adm.admittime, adm.dischtime, adm.diagnosis
, ROUND( (CAST(adm.dischtime AS DATE) - CAST(adm.admittime AS DATE)) , 4) AS los_hospital
, ROUND( (CAST(adm.admittime AS DATE) - CAST(pat.dob AS DATE)) / 365, 4) AS age
, adm.ethnicity, adm.ADMISSION_TYPE
--, adm.hospital_expire_flag
, CASE when adm.deathtime between ie.intime and ie.outtime THEN 1 ELSE 0 END AS mort_icu
, DENSE_RANK() OVER (PARTITION BY adm.subject_id ORDER BY adm.admittime) AS hospstay_seq
, CASE
WHEN DENSE_RANK() OVER (PARTITION BY adm.subject_id ORDER BY adm.admittime) = 1 THEN 1
ELSE 0 END AS first_hosp_stay
-- icu level factors
, ie.intime, ie.outtime
, ie.FIRST_CAREUNIT
, ROUND( (CAST(ie.outtime AS DATE) - CAST(ie.intime AS DATE)) , 4) AS los_icu
, DENSE_RANK() OVER (PARTITION BY ie.hadm_id ORDER BY ie.intime) AS icustay_seq
, CASE
WHEN adm.deathtime between ie.intime and ie.intime + interval '168' hour THEN 1 ELSE 0 END AS mort_week
-- first ICU stay *for the current hospitalization*
, CASE
WHEN DENSE_RANK() OVER (PARTITION BY ie.hadm_id ORDER BY ie.intime) = 1 THEN 1
ELSE 0 END AS first_icu_stay
FROM icustays ie
INNER JOIN admissions adm
ON ie.hadm_id = adm.hadm_id
INNER JOIN patients pat
ON ie.subject_id = pat.subject_id
WHERE adm.has_chartevents_data = 1
ORDER BY ie.subject_id, adm.admittime, ie.intime;
"""
den = pd.read_sql_query(denquery, con)
## drop patients with less than 48 hour
den["los_icu_hr"] = (den.outtime - den.intime).astype("timedelta64[h]")
den = den[(den.los_icu_hr >= 48)]
den = den[(den.age < 300)]
den.drop("los_icu_hr", 1, inplace=True)
## clean up
den["adult_icu"] = np.where(den["first_careunit"].isin(["PICU", "NICU"]), 0, 1)
den["gender"] = np.where(den["gender"] == "M", 1, 0)
den.ethnicity = den.ethnicity.str.lower()
den.ethnicity.loc[(den.ethnicity.str.contains("^white"))] = "white"
den.ethnicity.loc[(den.ethnicity.str.contains("^black"))] = "black"
den.ethnicity.loc[(den.ethnicity.str.contains("^hisp")) | (den.ethnicity.str.contains("^latin"))] = "hispanic"
den.ethnicity.loc[(den.ethnicity.str.contains("^asia"))] = "asian"
den.ethnicity.loc[~(den.ethnicity.str.contains("|".join(["white", "black", "hispanic", "asian"])))] = "other"
den.drop(
[
"hospstay_seq",
"los_icu",
"icustay_seq",
"admittime",
"dischtime",
"los_hospital",
"intime",
"outtime",
"first_careunit",
],
1,
inplace=True,
)
# ========= 48 hour vitals query
# these are the normal ranges. useful to clean up the data
vitquery = """
-- This query pivots the vital signs for the first 48 hours of a patient's stay
-- Vital signs include heart rate, blood pressure, respiration rate, and temperature
-- DROP MATERIALIZED VIEW IF EXISTS vitalsfirstday CASCADE;
-- create materialized view vitalsfirstday as
SELECT pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.VitalID, pvt.VitalValue, pvt.VitalChartTime
FROM (
select ie.subject_id, ie.hadm_id, ie.icustay_id, ce.charttime as VitalChartTime
, case
when itemid in (211,220045) and valuenum > 0 and valuenum < 300 then 'HeartRate'
when itemid in (51,442,455,6701,220179,220050) and valuenum > 0 and valuenum < 400 then 'SysBP'
when itemid in (8368,8440,8441,8555,220180,220051) and valuenum > 0 and valuenum < 300 then 'DiasBP'
when itemid in (456,52,6702,443,220052,220181,225312) and valuenum > 0 and valuenum < 300 then 'MeanBP'
when itemid in (615,618,220210,224690) and valuenum > 0 and valuenum < 70 then 'RespRate'
when itemid in (223761,678) and valuenum > 70 and valuenum < 120 then 'Temp' -- converted to degC in valuenum call
when itemid in (223762,676) and valuenum > 10 and valuenum < 50 then 'Temp'
when itemid in (646,220277) and valuenum > 0 and valuenum <= 100 then 'SpO2'
when itemid in (807,811,1529,3745,3744,225664,220621,226537) and valuenum > 0 then 'Glucose'
else null end as VitalID
, case
when itemid in (211,220045) and valuenum > 0 and valuenum < 300 then valuenum -- HeartRate
when itemid in (51,442,455,6701,220179,220050) and valuenum > 0 and valuenum < 400 then valuenum -- SysBP
when itemid in (8368,8440,8441,8555,220180,220051) and valuenum > 0 and valuenum < 300 then valuenum -- DiasBP
when itemid in (456,52,6702,443,220052,220181,225312) and valuenum > 0 and valuenum < 300 then valuenum -- MeanBP
when itemid in (615,618,220210,224690) and valuenum > 0 and valuenum < 70 then valuenum -- RespRate
when itemid in (223761,678) and valuenum > 70 and valuenum < 120 then (valuenum-32)/1.8 -- TempF, convert to degC
when itemid in (223762,676) and valuenum > 10 and valuenum < 50 then valuenum -- TempC
when itemid in (646,220277) and valuenum > 0 and valuenum <= 100 then valuenum -- SpO2
when itemid in (807,811,1529,3745,3744,225664,220621,226537) and valuenum > 0 then valuenum -- Glucose
else null end as VitalValue
from icustays ie
left join chartevents ce
on ie.subject_id = ce.subject_id and ie.hadm_id = ce.hadm_id and ie.icustay_id = ce.icustay_id
and ce.charttime between ie.intime and ie.intime + interval '48' hour
-- exclude rows marked as error
and ce.error IS DISTINCT FROM 1
where ce.itemid in
(
-- HEART RATE
211, --"Heart Rate"
220045, --"Heart Rate"
-- Systolic/diastolic
51, -- Arterial BP [Systolic]
442, -- Manual BP [Systolic]
455, -- NBP [Systolic]
6701, -- Arterial BP #2 [Systolic]
220179, -- Non Invasive Blood Pressure systolic
220050, -- Arterial Blood Pressure systolic
8368, -- Arterial BP [Diastolic]
8440, -- Manual BP [Diastolic]
8441, -- NBP [Diastolic]
8555, -- Arterial BP #2 [Diastolic]
220180, -- Non Invasive Blood Pressure diastolic
220051, -- Arterial Blood Pressure diastolic
-- MEAN ARTERIAL PRESSURE
456, --"NBP Mean"
52, --"Arterial BP Mean"
6702, -- Arterial BP Mean #2
443, -- Manual BP Mean(calc)
220052, --"Arterial Blood Pressure mean"
220181, --"Non Invasive Blood Pressure mean"
225312, --"ART BP mean"
-- RESPIRATORY RATE
618,-- Respiratory Rate
615,-- Resp Rate (Total)
220210,-- Respiratory Rate
224690, -- Respiratory Rate (Total)
-- SPO2, peripheral
646, 220277,
-- GLUCOSE, both lab and fingerstick
807,-- Fingerstick Glucose
811,-- Glucose (70-105)
1529,-- Glucose
3745,-- BloodGlucose
3744,-- Blood Glucose
225664,-- Glucose finger stick
220621,-- Glucose (serum)
226537,-- Glucose (whole blood)
-- TEMPERATURE
223762, -- "Temperature Celsius"
676, -- "Temperature C"
223761, -- "Temperature Fahrenheit"
678 -- "Temperature F"
)
) pvt
where VitalID is not null
order by pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.VitalID, pvt.VitalChartTime;
"""
vit48 = pd.read_sql_query(vitquery, con)
vit48.isnull().sum()
# ===============48 hour labs query
# This query extracts the lab events in the first 48 hours
labquery = """
WITH pvt AS (
--- ie is the icu stay
--- ad is the admissions table
--- le is the lab events table
SELECT ie.subject_id, ie.hadm_id, ie.icustay_id, le.charttime as LabChartTime
, CASE
when le.itemid = 50868 then 'ANION GAP'
when le.itemid = 50862 then 'ALBUMIN'
when le.itemid = 50882 then 'BICARBONATE'
when le.itemid = 50885 then 'BILIRUBIN'
when le.itemid = 50912 then 'CREATININE'
when le.itemid = 50806 then 'CHLORIDE'
when le.itemid = 50902 then 'CHLORIDE'
when le.itemid = 50809 then 'GLUCOSE'
when le.itemid = 50931 then 'GLUCOSE'
when le.itemid = 50810 then 'HEMATOCRIT'
when le.itemid = 51221 then 'HEMATOCRIT'
when le.itemid = 50811 then 'HEMOGLOBIN'
when le.itemid = 51222 then 'HEMOGLOBIN'
when le.itemid = 50813 then 'LACTATE'
when le.itemid = 50960 then 'MAGNESIUM'
when le.itemid = 50970 then 'PHOSPHATE'
when le.itemid = 51265 then 'PLATELET'
when le.itemid = 50822 then 'POTASSIUM'
when le.itemid = 50971 then 'POTASSIUM'
when le.itemid = 51275 then 'PTT'
when le.itemid = 51237 then 'INR'
when le.itemid = 51274 then 'PT'
when le.itemid = 50824 then 'SODIUM'
when le.itemid = 50983 then 'SODIUM'
when le.itemid = 51006 then 'BUN'
when le.itemid = 51300 then 'WBC'
when le.itemid = 51301 then 'WBC'
ELSE null
END AS label
, -- add in some sanity checks on the values
CASE
when le.itemid = 50862 and le.valuenum > 10 then null -- g/dL 'ALBUMIN'
when le.itemid = 50868 and le.valuenum > 10000 then null -- mEq/L 'ANION GAP'
when le.itemid = 50882 and le.valuenum > 10000 then null -- mEq/L 'BICARBONATE'
when le.itemid = 50885 and le.valuenum > 150 then null -- mg/dL 'BILIRUBIN'
when le.itemid = 50806 and le.valuenum > 10000 then null -- mEq/L 'CHLORIDE'
when le.itemid = 50902 and le.valuenum > 10000 then null -- mEq/L 'CHLORIDE'
when le.itemid = 50912 and le.valuenum > 150 then null -- mg/dL 'CREATININE'
when le.itemid = 50809 and le.valuenum > 10000 then null -- mg/dL 'GLUCOSE'
when le.itemid = 50931 and le.valuenum > 10000 then null -- mg/dL 'GLUCOSE'
when le.itemid = 50810 and le.valuenum > 100 then null -- % 'HEMATOCRIT'
when le.itemid = 51221 and le.valuenum > 100 then null -- % 'HEMATOCRIT'
when le.itemid = 50811 and le.valuenum > 50 then null -- g/dL 'HEMOGLOBIN'
when le.itemid = 51222 and le.valuenum > 50 then null -- g/dL 'HEMOGLOBIN'
when le.itemid = 50813 and le.valuenum > 50 then null -- mmol/L 'LACTATE'
when le.itemid = 50960 and le.valuenum > 60 then null -- mmol/L 'MAGNESIUM'
when le.itemid = 50970 and le.valuenum > 60 then null -- mg/dL 'PHOSPHATE'
when le.itemid = 51265 and le.valuenum > 10000 then null -- K/uL 'PLATELET'
when le.itemid = 50822 and le.valuenum > 30 then null -- mEq/L 'POTASSIUM'
when le.itemid = 50971 and le.valuenum > 30 then null -- mEq/L 'POTASSIUM'
when le.itemid = 51275 and le.valuenum > 150 then null -- sec 'PTT'
when le.itemid = 51237 and le.valuenum > 50 then null -- 'INR'
when le.itemid = 51274 and le.valuenum > 150 then null -- sec 'PT'
when le.itemid = 50824 and le.valuenum > 200 then null -- mEq/L == mmol/L 'SODIUM'
when le.itemid = 50983 and le.valuenum > 200 then null -- mEq/L == mmol/L 'SODIUM'
when le.itemid = 51006 and le.valuenum > 300 then null -- 'BUN'
when le.itemid = 51300 and le.valuenum > 1000 then null -- 'WBC'
when le.itemid = 51301 and le.valuenum > 1000 then null -- 'WBC'
ELSE le.valuenum
END AS LabValue
FROM icustays ie
LEFT JOIN labevents le
ON le.subject_id = ie.subject_id
AND le.hadm_id = ie.hadm_id
AND le.charttime between (ie.intime) AND (ie.intime + interval '48' hour)
AND le.itemid IN
(
-- comment is: LABEL | CATEGORY | FLUID | NUMBER OF ROWS IN LABEVENTS
50868, -- ANION GAP | CHEMISTRY | BLOOD | 769895
50862, -- ALBUMIN | CHEMISTRY | BLOOD | 146697
50882, -- BICARBONATE | CHEMISTRY | BLOOD | 780733
50885, -- BILIRUBIN, TOTAL | CHEMISTRY | BLOOD | 238277
50912, -- CREATININE | CHEMISTRY | BLOOD | 797476
50902, -- CHLORIDE | CHEMISTRY | BLOOD | 795568
50806, -- CHLORIDE, WHOLE BLOOD | BLOOD GAS | BLOOD | 48187
50931, -- GLUCOSE | CHEMISTRY | BLOOD | 748981
50809, -- GLUCOSE | BLOOD GAS | BLOOD | 196734
51221, -- HEMATOCRIT | HEMATOLOGY | BLOOD | 881846
50810, -- HEMATOCRIT, CALCULATED | BLOOD GAS | BLOOD | 89715
51222, -- HEMOGLOBIN | HEMATOLOGY | BLOOD | 752523
50811, -- HEMOGLOBIN | BLOOD GAS | BLOOD | 89712
50813, -- LACTATE | BLOOD GAS | BLOOD | 187124
50960, -- MAGNESIUM | CHEMISTRY | BLOOD | 664191
50970, -- PHOSPHATE | CHEMISTRY | BLOOD | 590524
51265, -- PLATELET COUNT | HEMATOLOGY | BLOOD | 778444
50971, -- POTASSIUM | CHEMISTRY | BLOOD | 845825
50822, -- POTASSIUM, WHOLE BLOOD | BLOOD GAS | BLOOD | 192946
51275, -- PTT | HEMATOLOGY | BLOOD | 474937
51237, -- INR(PT) | HEMATOLOGY | BLOOD | 471183
51274, -- PT | HEMATOLOGY | BLOOD | 469090
50983, -- SODIUM | CHEMISTRY | BLOOD | 808489
50824, -- SODIUM, WHOLE BLOOD | BLOOD GAS | BLOOD | 71503
51006, -- UREA NITROGEN | CHEMISTRY | BLOOD | 791925
51301, -- WHITE BLOOD CELLS | HEMATOLOGY | BLOOD | 753301
51300 -- WBC COUNT | HEMATOLOGY | BLOOD | 2371
)
AND le.valuenum IS NOT null
AND le.valuenum > 0 -- lab values cannot be 0 and cannot be negative
LEFT JOIN admissions ad
ON ie.subject_id = ad.subject_id
AND ie.hadm_id = ad.hadm_id
)
SELECT pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.LabChartTime, pvt.label, pvt.LabValue
From pvt
where pvt.label is not NULL
ORDER BY pvt.subject_id, pvt.hadm_id, pvt.icustay_id, pvt.label, pvt.LabChartTime;
"""
lab48 = pd.read_sql_query(labquery, con)
# =====combine all variables
mort_vital = den.merge(vit48, how="left", on=["subject_id", "hadm_id", "icustay_id"])
mort_lab = den.merge(lab48, how="left", on=["subject_id", "hadm_id", "icustay_id"])
# create means by age group and gender
mort_vital["age_group"] = pd.cut(
mort_vital["age"],
[-1, 5, 10, 15, 20, 25, 40, 60, 80, 200],
labels=["l5", "5_10", "10_15", "15_20", "20_25", "25_40", "40_60", "60_80", "80p"],
)
mort_lab["age_group"] = pd.cut(
mort_lab["age"],
[-1, 5, 10, 15, 20, 25, 40, 60, 80, 200],
labels=["l5", "5_10", "10_15", "15_20", "20_25", "25_40", "40_60", "60_80", "80p"],
)
# one missing variable
adult_vital = mort_vital[(mort_vital.adult_icu == 1)]
adult_lab = mort_lab[(mort_lab.adult_icu == 1)]
adult_vital.drop(columns=["adult_icu"], inplace=True)
adult_lab.drop(columns=["adult_icu"], inplace=True)
adult_vital.to_csv(os.path.join(mimicdir, "adult_icu_vital.gz"), compression="gzip", index=False)
mort_lab.to_csv(os.path.join(mimicdir, "adult_icu_lab.gz"), compression="gzip", index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Query ICU mortality data from mimic database")
parser.add_argument("--sqluser", type=str, default="mimicuser", help="postgres user to access mimic database")
parser.add_argument(
"--sqlpass", type=str, default="<PASSWORD>", help="postgres user password to access mimic database"
)
args = parser.parse_args()
main(args.sqluser, args.sqlpass)
| [
"psycopg2.connect",
"pandas.read_sql_query",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.where",
"os.path.join",
"pandas.cut",
"random.seed",
"os.mkdir"
] | [((520, 538), 'random.seed', 'random.seed', (['(22891)'], {}), '(22891)\n', (531, 538), False, 'import random\n'), ((822, 908), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': 'dbname', 'user': 'sqluser', 'host': '"""127.0.0.1"""', 'password': 'sqlpass'}), "(dbname=dbname, user=sqluser, host='127.0.0.1', password=\n sqlpass)\n", (838, 908), False, 'import psycopg2\n'), ((2909, 2941), 'pandas.read_sql_query', 'pd.read_sql_query', (['denquery', 'con'], {}), '(denquery, con)\n', (2926, 2941), True, 'import pandas as pd\n'), ((3296, 3332), 'numpy.where', 'np.where', (["(den['gender'] == 'M')", '(1)', '(0)'], {}), "(den['gender'] == 'M', 1, 0)\n", (3304, 3332), True, 'import numpy as np\n'), ((8987, 9019), 'pandas.read_sql_query', 'pd.read_sql_query', (['vitquery', 'con'], {}), '(vitquery, con)\n', (9004, 9019), True, 'import pandas as pd\n'), ((15936, 15968), 'pandas.read_sql_query', 'pd.read_sql_query', (['labquery', 'con'], {}), '(labquery, con)\n', (15953, 15968), True, 'import pandas as pd\n'), ((16255, 16416), 'pandas.cut', 'pd.cut', (["mort_vital['age']", '[-1, 5, 10, 15, 20, 25, 40, 60, 80, 200]'], {'labels': "['l5', '5_10', '10_15', '15_20', '20_25', '25_40', '40_60', '60_80', '80p']"}), "(mort_vital['age'], [-1, 5, 10, 15, 20, 25, 40, 60, 80, 200], labels=\n ['l5', '5_10', '10_15', '15_20', '20_25', '25_40', '40_60', '60_80', '80p']\n )\n", (16261, 16416), True, 'import pandas as pd\n'), ((16466, 16620), 'pandas.cut', 'pd.cut', (["mort_lab['age']", '[-1, 5, 10, 15, 20, 25, 40, 60, 80, 200]'], {'labels': "['l5', '5_10', '10_15', '15_20', '20_25', '25_40', '40_60', '60_80', '80p']"}), "(mort_lab['age'], [-1, 5, 10, 15, 20, 25, 40, 60, 80, 200], labels=[\n 'l5', '5_10', '10_15', '15_20', '20_25', '25_40', '40_60', '60_80', '80p'])\n", (16472, 16620), True, 'import pandas as pd\n'), ((17142, 17230), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query ICU mortality data from mimic database"""'}), "(description=\n 'Query ICU mortality data from mimic database')\n", (17165, 17230), False, 'import argparse\n'), ((626, 650), 'os.path.exists', 'os.path.exists', (['mimicdir'], {}), '(mimicdir)\n', (640, 650), False, 'import os\n'), ((660, 678), 'os.mkdir', 'os.mkdir', (['mimicdir'], {}), '(mimicdir)\n', (668, 678), False, 'import os\n'), ((16923, 16967), 'os.path.join', 'os.path.join', (['mimicdir', '"""adult_icu_vital.gz"""'], {}), "(mimicdir, 'adult_icu_vital.gz')\n", (16935, 16967), False, 'import os\n'), ((17022, 17064), 'os.path.join', 'os.path.join', (['mimicdir', '"""adult_icu_lab.gz"""'], {}), "(mimicdir, 'adult_icu_lab.gz')\n", (17034, 17064), False, 'import os\n')] |
#!/usr/bin/env python3
import csv
import subprocess
import sys
import numpy
from matplotlib import pyplot
from matplotlib import cm
bin_dir = sys.argv[1]
output_dir = sys.argv[2]
trials = 5
containers = ["segmented_tree_seq", "btree_seq", "bpt_sequence", "avl_array",
"deque", "vector"]
single_8_labels = ["256", "7936", "246016", "7626496"]
single_8_args = [(256, 2107779313, 15865477950454414828),
(7936, 976634119, 3238950223561105499),
(246016, 3515491141, 5644467892570804156),
(7626496, 1106530671, 6965094249249093704)]
single_64_labels = ["32", "992", "30752", "953312"]
single_64_args = [(32, 2397254571, 4723602420748635361),
(992, 463092544, 12966777589746855639),
(30752, 430452927, 751509891372566603),
(953312, 3109453262, 10176667110359292238)]
range_8_labels = ["1x7626496", "31x246016", "961x7936", "29791x256"]
range_8_args = [(1, 7626496, 1319121800, 13937110459523125406),
(31, 246016, 3037209825, 7094631787510567342),
(961, 7936, 2984486723, 3526507821286439548),
(29791, 256, 197924070, 9499073426478457076)]
range_64_labels = ["1x953312", "31x30752", "961x992", "29791x32"]
range_64_args = [(1, 953312, 235951511, 7803621008785366632),
(31, 30752, 1082972474, 11846815057285548515),
(961, 992, 5659033, 14482810490810820797),
(29791, 32, 3727649439, 10804193997107502541)]
def graph(size, kind, name, results, labels):
N = len(labels)
M = len(containers)
ind = numpy.arange(N)
border = 0.05
width = (1.0 - border * 2) / M
fig, ax = pyplot.subplots()
legend_ids = []
legend_names = []
rects_list = []
n = 0
for container in containers:
times = results[container]
rects = ax.bar(ind + width * n + border, times, width, color=cm.jet(n / M))
legend_ids.append(rects[0])
legend_names.append(container)
rects_list.append(rects)
n += 1
ax.set_xlabel('Container size')
ax.set_ylabel('Milliseconds')
ax.set_yscale('log')
ax.set_title('%s of std::uint%d_t' % (name, size))
ax.set_xticks(ind + width * M / 2 + border)
ax.set_xticklabels(labels)
ax.legend(legend_ids, legend_names, loc='upper left')
pyplot.savefig('%s/%s-%s-%d.svg' % (output_dir,
name.lower().replace(' ', '_'),
kind,
size))
pyplot.close()
def raw(size, kind, name, results, labels):
file = open('%s/%s-%s-%d.txt' % (output_dir,
name.lower().replace(' ', '_'),
kind,
size),
mode='w')
file.write('#container,')
file.write(','.join(labels))
file.write('\n')
for container in containers:
times = results[container]
file.write('%s,%s\n' % (container, ','.join([str(i) for i in times])))
file.close()
def handle_sizes(size, kind, labels, args_list):
blah = {}
for args in args_list:
for container in containers:
best = {}
for i in range(trials):
pipe = subprocess.Popen(
["%s/%s_%s_%d" % (bin_dir, kind, container, size)] +
[str(i) for i in args],
stdout = subprocess.PIPE, universal_newlines = True)
measurements = list(csv.reader(pipe.stdout))
for (benchmark, string) in measurements:
ms = float(string)
if benchmark in best: best[benchmark] = min(best[benchmark], ms)
else: best[benchmark] = ms
for (benchmark, ms) in best.items():
if benchmark not in blah: blah[benchmark] = {}
if container not in blah[benchmark]: blah[benchmark][container] = []
blah[benchmark][container].append(ms)
for (name, results) in blah.items():
graph(size, kind, name, results, labels)
raw(size, kind, name, results, labels)
handle_sizes(8, "single", single_8_labels, single_8_args)
handle_sizes(64, "single", single_64_labels, single_64_args)
handle_sizes(8, "range", range_8_labels, range_8_args)
handle_sizes(64, "range", range_64_labels, range_64_args)
| [
"matplotlib.pyplot.close",
"matplotlib.cm.jet",
"csv.reader",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((1618, 1633), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (1630, 1633), False, 'import numpy\n'), ((1695, 1712), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {}), '()\n', (1710, 1712), False, 'from matplotlib import pyplot\n'), ((2504, 2518), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (2516, 2518), False, 'from matplotlib import pyplot\n'), ((1905, 1918), 'matplotlib.cm.jet', 'cm.jet', (['(n / M)'], {}), '(n / M)\n', (1911, 1918), False, 'from matplotlib import cm\n'), ((3418, 3441), 'csv.reader', 'csv.reader', (['pipe.stdout'], {}), '(pipe.stdout)\n', (3428, 3441), False, 'import csv\n')] |
# Bayesian Binary logistic regression in 2d for iris flwoers
# Code is based on
# https://github.com/aloctavodia/BAP/blob/master/code/Chp4/04_Generalizing_linear_models.ipynb
import pymc3 as pm
import numpy as np
import pandas as pd
import theano.tensor as tt
#import seaborn as sns
import scipy.stats as stats
from scipy.special import expit as logistic
import matplotlib.pyplot as plt
import arviz as az
from sklearn.datasets import load_iris
import pyprobml_utils as pml
iris = load_iris()
X = iris.data
y = iris.target
# Convert to pandas dataframe
df_iris = pd.DataFrame(data=iris.data,
columns=['sepal_length', 'sepal_width',
'petal_length', 'petal_width'])
df_iris['species'] = pd.Series(iris.target_names[y], dtype='category')
df = df_iris.query("species == ('setosa', 'versicolor')")
# We reduce the sample size from 50 to 25 per class,
# or to 5 + 45 in the unbalanced setting.
# The latter will increase posterior uncertainty
unbalanced = False # True
if unbalanced:
df = df[45:95]
else:
df = df[25:75]
assert(len(df)==50)
y_1 = pd.Categorical(df['species']).codes
x_n = ['sepal_length', 'sepal_width']
x_1 = df[x_n].values
with pm.Model() as model_1:
α = pm.Normal('α', mu=0, sd=10)
β = pm.Normal('β', mu=0, sd=2, shape=len(x_n))
μ = α + pm.math.dot(x_1, β)
θ = pm.Deterministic('θ', 1 / (1 + pm.math.exp(-μ)))
bd = pm.Deterministic('bd', -α/β[1] - β[0]/β[1] * x_1[:,0])
yl = pm.Bernoulli('yl', p=θ, observed=y_1)
trace_1 = pm.sample(2000, cores=1, chains=2)
varnames = ['α', 'β']
#az.plot_forest(trace_1, var_names=varnames);
idx = np.argsort(x_1[:,0])
bd = trace_1['bd'].mean(0)[idx]
plt.figure()
plt.scatter(x_1[:,0], x_1[:,1], c=[f'C{x}' for x in y_1])
plt.plot(x_1[:,0][idx], bd, color='k');
az.plot_hdi(x_1[:,0], trace_1['bd'], color='k')
plt.xlabel(x_n[0])
plt.ylabel(x_n[1])
plt.tight_layout()
if unbalanced:
pml.savefig('logreg_iris_bayes_2d_unbalanced.pdf', dpi=300)
else:
pml.savefig('logreg_iris_bayes_2d.pdf', dpi=300)
plt.show() | [
"pymc3.math.exp",
"matplotlib.pyplot.ylabel",
"numpy.argsort",
"pymc3.sample",
"pymc3.Bernoulli",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pandas.Categorical",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"pymc3.Deterministic",
"arviz.plot_hdi"... | [((485, 496), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (494, 496), False, 'from sklearn.datasets import load_iris\n'), ((570, 674), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'iris.data', 'columns': "['sepal_length', 'sepal_width', 'petal_length', 'petal_width']"}), "(data=iris.data, columns=['sepal_length', 'sepal_width',\n 'petal_length', 'petal_width'])\n", (582, 674), True, 'import pandas as pd\n'), ((743, 792), 'pandas.Series', 'pd.Series', (['iris.target_names[y]'], {'dtype': '"""category"""'}), "(iris.target_names[y], dtype='category')\n", (752, 792), True, 'import pandas as pd\n'), ((1675, 1696), 'numpy.argsort', 'np.argsort', (['x_1[:, 0]'], {}), '(x_1[:, 0])\n', (1685, 1696), True, 'import numpy as np\n'), ((1731, 1743), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1741, 1743), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1803), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_1[:, 0]', 'x_1[:, 1]'], {'c': "[f'C{x}' for x in y_1]"}), "(x_1[:, 0], x_1[:, 1], c=[f'C{x}' for x in y_1])\n", (1755, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1842), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1[:, 0][idx]', 'bd'], {'color': '"""k"""'}), "(x_1[:, 0][idx], bd, color='k')\n", (1811, 1842), True, 'import matplotlib.pyplot as plt\n'), ((1845, 1893), 'arviz.plot_hdi', 'az.plot_hdi', (['x_1[:, 0]', "trace_1['bd']"], {'color': '"""k"""'}), "(x_1[:, 0], trace_1['bd'], color='k')\n", (1856, 1893), True, 'import arviz as az\n'), ((1895, 1913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_n[0]'], {}), '(x_n[0])\n', (1905, 1913), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1933), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['x_n[1]'], {}), '(x_n[1])\n', (1925, 1933), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1953), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1951, 1953), True, 'import matplotlib.pyplot as plt\n'), ((2101, 2111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2109, 2111), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1140), 'pandas.Categorical', 'pd.Categorical', (["df['species']"], {}), "(df['species'])\n", (1125, 1140), True, 'import pandas as pd\n'), ((1215, 1225), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (1223, 1225), True, 'import pymc3 as pm\n'), ((1248, 1275), 'pymc3.Normal', 'pm.Normal', (['"""α"""'], {'mu': '(0)', 'sd': '(10)'}), "('α', mu=0, sd=10)\n", (1257, 1275), True, 'import pymc3 as pm\n'), ((1434, 1493), 'pymc3.Deterministic', 'pm.Deterministic', (['"""bd"""', '(-α / β[1] - β[0] / β[1] * x_1[:, 0])'], {}), "('bd', -α / β[1] - β[0] / β[1] * x_1[:, 0])\n", (1450, 1493), True, 'import pymc3 as pm\n'), ((1504, 1541), 'pymc3.Bernoulli', 'pm.Bernoulli', (['"""yl"""'], {'p': 'θ', 'observed': 'y_1'}), "('yl', p=θ, observed=y_1)\n", (1516, 1541), True, 'import pymc3 as pm\n'), ((1559, 1593), 'pymc3.sample', 'pm.sample', (['(2000)'], {'cores': '(1)', 'chains': '(2)'}), '(2000, cores=1, chains=2)\n', (1568, 1593), True, 'import pymc3 as pm\n'), ((1973, 2032), 'pyprobml_utils.savefig', 'pml.savefig', (['"""logreg_iris_bayes_2d_unbalanced.pdf"""'], {'dpi': '(300)'}), "('logreg_iris_bayes_2d_unbalanced.pdf', dpi=300)\n", (1984, 2032), True, 'import pyprobml_utils as pml\n'), ((2043, 2091), 'pyprobml_utils.savefig', 'pml.savefig', (['"""logreg_iris_bayes_2d.pdf"""'], {'dpi': '(300)'}), "('logreg_iris_bayes_2d.pdf', dpi=300)\n", (2054, 2091), True, 'import pyprobml_utils as pml\n'), ((1348, 1367), 'pymc3.math.dot', 'pm.math.dot', (['x_1', 'β'], {}), '(x_1, β)\n', (1359, 1367), True, 'import pymc3 as pm\n'), ((1408, 1423), 'pymc3.math.exp', 'pm.math.exp', (['(-μ)'], {}), '(-μ)\n', (1419, 1423), True, 'import pymc3 as pm\n')] |
import copy
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from torch.utils.data import Dataset
from mohou.encoding_rule import EncodingRule
from mohou.types import MultiEpisodeChunk, TerminateFlag
from mohou.utils import assert_equal_with_message, assert_two_sequences_same_length
logger = logging.getLogger(__name__)
@dataclass
class SequenceDatasetConfig:
n_aug: int = 20
cov_scale: float = 0.1
def __new__(cls, *args, **kwargs):
assert len(args) == 0, "please instantiate config only using kwargs"
return super(SequenceDatasetConfig, cls).__new__(cls)
def __post_init__(self):
assert self.n_aug >= 0
assert self.cov_scale < 1.0
logger.info("sequence dataset config: {}".format(self))
@dataclass
class SequenceDataAugmentor: # functor
config: SequenceDatasetConfig
take_diff: bool = True
@staticmethod
def compute_covariance(state_seq_list: List[np.ndarray]) -> np.ndarray:
state_diffs = np.vstack(state_seq_list)
cov_mat = np.cov(state_diffs.T)
return cov_mat
@staticmethod
def compute_diff_covariance(state_seq_list: List[np.ndarray]) -> np.ndarray:
state_diff_list = []
for state_seq in state_seq_list:
diff = state_seq[1:, :] - state_seq[:-1, :]
state_diff_list.append(diff)
state_diffs = np.vstack(state_diff_list)
cov_mat = np.cov(state_diffs.T)
return cov_mat
def apply(
self, state_seq_list: List[np.ndarray], other_seq_list_list: List[List[np.ndarray]]
) -> Tuple[List[np.ndarray], List[List[np.ndarray]]]:
"""apply augmentation
state_seq_list will be randomized.
each seq_list in other_seq_list_list will not be randomized. But just augmented so that they are
compatible with augmented state_seq_list.
"""
if self.take_diff:
cov_mat = self.compute_diff_covariance(state_seq_list)
else:
cov_mat = self.compute_covariance(state_seq_list)
cov_mat_scaled = cov_mat * self.config.cov_scale**2
noised_state_seq_list = []
for _ in range(self.config.n_aug):
for state_seq in state_seq_list:
n_seq, n_dim = state_seq.shape
mean = np.zeros(n_dim)
noise_seq = np.random.multivariate_normal(mean, cov_mat_scaled, n_seq)
noised_state_seq_list.append(state_seq + noise_seq)
state_seq_list_auged = copy.deepcopy(state_seq_list)
state_seq_list_auged.extend(noised_state_seq_list)
# Just increase the number keeping its order matches with state_seq_list_auged
other_seq_list_auged_list = []
for other_seq_list in other_seq_list_list:
other_seq_list_auged = copy.deepcopy(other_seq_list)
for _ in range(self.config.n_aug):
for weight_seq in other_seq_list:
other_seq_list_auged.append(copy.deepcopy(weight_seq))
other_seq_list_auged_list.append(other_seq_list_auged)
return state_seq_list_auged, other_seq_list_auged_list
class WeightPolicy(ABC):
@abstractmethod
def __call__(self, n_seq_lne: int) -> np.ndarray:
pass
class ConstantWeightPolicy(WeightPolicy):
def __call__(self, n_seq_lne: int) -> np.ndarray:
return np.ones(n_seq_lne)
@dataclass
class PWLinearWeightPolicy(WeightPolicy):
w_left: float
w_right: float
def __call__(self, n_seq_len: int) -> np.ndarray:
return np.linspace(self.w_left, self.w_right, n_seq_len)
@dataclass
class AutoRegressiveDatasetConfig(SequenceDatasetConfig):
n_dummy_after_termination: int = 20
@dataclass
class AutoRegressiveDataset(Dataset):
state_seq_list: List[np.ndarray] # with flag info
static_context_list: List[np.ndarray]
weight_seq_list: List[np.ndarray]
encoding_rule: EncodingRule
def __len__(self) -> int:
return len(self.state_seq_list)
def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
state = torch.from_numpy(self.state_seq_list[idx]).float()
context = torch.from_numpy(self.static_context_list[idx]).float()
weight = torch.tensor(self.weight_seq_list[idx]).float()
return state, context, weight
def __post_init__(self): # validation
assert_two_sequences_same_length(self.state_seq_list, self.weight_seq_list)
assert_equal_with_message(
len(self.static_context_list), len(self.state_seq_list), "length of sequence"
)
@classmethod
def from_chunk(
cls,
chunk: MultiEpisodeChunk,
encoding_rule: EncodingRule,
augconfig: Optional[AutoRegressiveDatasetConfig] = None,
static_context_list: Optional[List[np.ndarray]] = None,
weighting: Optional[Union[WeightPolicy, List[np.ndarray]]] = None,
) -> "AutoRegressiveDataset":
last_key_of_rule = list(encoding_rule.keys())[-1]
assert last_key_of_rule == TerminateFlag
if augconfig is None:
augconfig = AutoRegressiveDatasetConfig()
state_seq_list = encoding_rule.apply_to_multi_episode_chunk(chunk)
# setting up weighting
if weighting is None:
weighting = ConstantWeightPolicy()
if isinstance(weighting, list):
weight_seq_list: List[np.ndarray] = weighting
logger.info("use user-provided numpy weighting")
else:
logger.info("use weight policy: {}".format(weighting))
weight_seq_list = [weighting(len(seq)) for seq in state_seq_list]
assert_two_sequences_same_length(state_seq_list, weight_seq_list)
# setting up biases
if static_context_list is None: # create sequence of 0-dim vector
static_context_list = [np.zeros((0)) for _ in range(len(state_seq_list))]
assert_equal_with_message(
len(static_context_list), len(state_seq_list), "length of sequence"
)
# augmentation
augmentor = SequenceDataAugmentor(augconfig, take_diff=True)
state_seq_list_auged, [weight_seq_list_auged, static_context_list_auged] = augmentor.apply(
state_seq_list, [weight_seq_list, static_context_list]
)
assert weight_seq_list_auged is not None # for mypy
# make all sequence to the same length due to torch batch computation requirement
state_seq_list_auged_adjusted, weight_seq_list_auged_adjusted = cls.make_same_length(
state_seq_list_auged, weight_seq_list_auged, augconfig
)
return cls(
state_seq_list_auged_adjusted,
static_context_list_auged,
weight_seq_list_auged_adjusted,
encoding_rule,
)
@staticmethod
def make_same_length(
state_seq_list: List[np.ndarray],
weight_seq_list: List[np.ndarray],
augconfig: AutoRegressiveDatasetConfig,
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Makes all sequences have the same length"""
n_max_in_dataset_raw = max([len(seq) for seq in state_seq_list])
n_max_in_dataset = n_max_in_dataset_raw + augconfig.n_dummy_after_termination
for i in range(len(state_seq_list)):
state_seq = state_seq_list[i]
weight_seq = weight_seq_list[i]
n_seq = len(state_seq)
n_padding = n_max_in_dataset - n_seq
padding_state_seq = np.tile(state_seq[-1], (n_padding, 1))
padded_state_seq = np.vstack((state_seq, padding_state_seq))
padding_weight_seq = np.array([weight_seq[-1]] * n_padding)
padded_weight_seq = np.hstack((weight_seq, padding_weight_seq))
assert len(padded_state_seq) == n_max_in_dataset
assert len(padded_weight_seq) == n_max_in_dataset
state_seq_list[i] = padded_state_seq
weight_seq_list[i] = padded_weight_seq
assert_two_sequences_same_length(state_seq_list, weight_seq_list)
return state_seq_list, weight_seq_list
@dataclass
class MarkovControlSystemDataset(Dataset):
"""o_{t+1} = f(o_{t}, u_t{t})"""
inp_ctrl_seq: np.ndarray
inp_obs_seq: np.ndarray
out_obs_seq: np.ndarray
def __len__(self) -> int:
return len(self.inp_ctrl_seq)
def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
inp_ctrl = torch.from_numpy(self.inp_ctrl_seq[idx]).float()
inp_obs = torch.from_numpy(self.inp_obs_seq[idx]).float()
out_obs = torch.from_numpy(self.out_obs_seq[idx]).float()
return inp_ctrl, inp_obs, out_obs
@classmethod
def from_chunk(
cls,
chunk: MultiEpisodeChunk,
control_encoding_rule: EncodingRule,
observation_encoding_rule: EncodingRule,
config: Optional[SequenceDatasetConfig] = None,
diff_as_control: bool = True,
) -> "MarkovControlSystemDataset":
if config is None:
config = SequenceDatasetConfig()
ctrl_seq_list = control_encoding_rule.apply_to_multi_episode_chunk(chunk)
obs_seq_list = observation_encoding_rule.apply_to_multi_episode_chunk(chunk)
assert_two_sequences_same_length(ctrl_seq_list, obs_seq_list)
ctrl_augmentor = SequenceDataAugmentor(config, take_diff=False)
obs_augmentor = SequenceDataAugmentor(config, take_diff=True)
ctrl_seq_list_auged, _ = ctrl_augmentor.apply(ctrl_seq_list, [])
obs_seq_list_auged, _ = obs_augmentor.apply(obs_seq_list, [])
inp_ctrl_seq = []
inp_obs_seq = []
out_obs_seq = []
for i in range(len(ctrl_seq_list_auged)):
ctrl_seq = ctrl_seq_list_auged[i]
obs_seq = obs_seq_list_auged[i]
for j in range(len(ctrl_seq) - 1):
if diff_as_control:
inp_ctrl_seq.append(ctrl_seq[j + 1] - ctrl_seq[j])
else:
inp_ctrl_seq.append(ctrl_seq[j])
inp_obs_seq.append(obs_seq[j])
out_obs_seq.append(obs_seq[j + 1])
return cls(np.array(inp_ctrl_seq), np.array(inp_obs_seq), np.array(out_obs_seq))
| [
"logging.getLogger",
"numpy.tile",
"numpy.ones",
"numpy.hstack",
"numpy.random.multivariate_normal",
"torch.from_numpy",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"torch.tensor",
"numpy.vstack",
"mohou.utils.assert_two_sequences_same_length",
"copy.deepcopy",
"numpy.cov"
] | [((412, 439), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (429, 439), False, 'import logging\n'), ((1100, 1125), 'numpy.vstack', 'np.vstack', (['state_seq_list'], {}), '(state_seq_list)\n', (1109, 1125), True, 'import numpy as np\n'), ((1144, 1165), 'numpy.cov', 'np.cov', (['state_diffs.T'], {}), '(state_diffs.T)\n', (1150, 1165), True, 'import numpy as np\n'), ((1479, 1505), 'numpy.vstack', 'np.vstack', (['state_diff_list'], {}), '(state_diff_list)\n', (1488, 1505), True, 'import numpy as np\n'), ((1524, 1545), 'numpy.cov', 'np.cov', (['state_diffs.T'], {}), '(state_diffs.T)\n', (1530, 1545), True, 'import numpy as np\n'), ((2603, 2632), 'copy.deepcopy', 'copy.deepcopy', (['state_seq_list'], {}), '(state_seq_list)\n', (2616, 2632), False, 'import copy\n'), ((3465, 3483), 'numpy.ones', 'np.ones', (['n_seq_lne'], {}), '(n_seq_lne)\n', (3472, 3483), True, 'import numpy as np\n'), ((3646, 3695), 'numpy.linspace', 'np.linspace', (['self.w_left', 'self.w_right', 'n_seq_len'], {}), '(self.w_left, self.w_right, n_seq_len)\n', (3657, 3695), True, 'import numpy as np\n'), ((4476, 4551), 'mohou.utils.assert_two_sequences_same_length', 'assert_two_sequences_same_length', (['self.state_seq_list', 'self.weight_seq_list'], {}), '(self.state_seq_list, self.weight_seq_list)\n', (4508, 4551), False, 'from mohou.utils import assert_equal_with_message, assert_two_sequences_same_length\n'), ((5750, 5815), 'mohou.utils.assert_two_sequences_same_length', 'assert_two_sequences_same_length', (['state_seq_list', 'weight_seq_list'], {}), '(state_seq_list, weight_seq_list)\n', (5782, 5815), False, 'from mohou.utils import assert_equal_with_message, assert_two_sequences_same_length\n'), ((8096, 8161), 'mohou.utils.assert_two_sequences_same_length', 'assert_two_sequences_same_length', (['state_seq_list', 'weight_seq_list'], {}), '(state_seq_list, weight_seq_list)\n', (8128, 8161), False, 'from mohou.utils import assert_equal_with_message, assert_two_sequences_same_length\n'), ((9343, 9404), 'mohou.utils.assert_two_sequences_same_length', 'assert_two_sequences_same_length', (['ctrl_seq_list', 'obs_seq_list'], {}), '(ctrl_seq_list, obs_seq_list)\n', (9375, 9404), False, 'from mohou.utils import assert_equal_with_message, assert_two_sequences_same_length\n'), ((2905, 2934), 'copy.deepcopy', 'copy.deepcopy', (['other_seq_list'], {}), '(other_seq_list)\n', (2918, 2934), False, 'import copy\n'), ((7602, 7640), 'numpy.tile', 'np.tile', (['state_seq[-1]', '(n_padding, 1)'], {}), '(state_seq[-1], (n_padding, 1))\n', (7609, 7640), True, 'import numpy as np\n'), ((7672, 7713), 'numpy.vstack', 'np.vstack', (['(state_seq, padding_state_seq)'], {}), '((state_seq, padding_state_seq))\n', (7681, 7713), True, 'import numpy as np\n'), ((7748, 7786), 'numpy.array', 'np.array', (['([weight_seq[-1]] * n_padding)'], {}), '([weight_seq[-1]] * n_padding)\n', (7756, 7786), True, 'import numpy as np\n'), ((7819, 7862), 'numpy.hstack', 'np.hstack', (['(weight_seq, padding_weight_seq)'], {}), '((weight_seq, padding_weight_seq))\n', (7828, 7862), True, 'import numpy as np\n'), ((10256, 10278), 'numpy.array', 'np.array', (['inp_ctrl_seq'], {}), '(inp_ctrl_seq)\n', (10264, 10278), True, 'import numpy as np\n'), ((10280, 10301), 'numpy.array', 'np.array', (['inp_obs_seq'], {}), '(inp_obs_seq)\n', (10288, 10301), True, 'import numpy as np\n'), ((10303, 10324), 'numpy.array', 'np.array', (['out_obs_seq'], {}), '(out_obs_seq)\n', (10311, 10324), True, 'import numpy as np\n'), ((2400, 2415), 'numpy.zeros', 'np.zeros', (['n_dim'], {}), '(n_dim)\n', (2408, 2415), True, 'import numpy as np\n'), ((2444, 2502), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov_mat_scaled', 'n_seq'], {}), '(mean, cov_mat_scaled, n_seq)\n', (2473, 2502), True, 'import numpy as np\n'), ((4196, 4238), 'torch.from_numpy', 'torch.from_numpy', (['self.state_seq_list[idx]'], {}), '(self.state_seq_list[idx])\n', (4212, 4238), False, 'import torch\n'), ((4265, 4312), 'torch.from_numpy', 'torch.from_numpy', (['self.static_context_list[idx]'], {}), '(self.static_context_list[idx])\n', (4281, 4312), False, 'import torch\n'), ((4338, 4377), 'torch.tensor', 'torch.tensor', (['self.weight_seq_list[idx]'], {}), '(self.weight_seq_list[idx])\n', (4350, 4377), False, 'import torch\n'), ((5955, 5966), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (5963, 5966), True, 'import numpy as np\n'), ((8560, 8600), 'torch.from_numpy', 'torch.from_numpy', (['self.inp_ctrl_seq[idx]'], {}), '(self.inp_ctrl_seq[idx])\n', (8576, 8600), False, 'import torch\n'), ((8627, 8666), 'torch.from_numpy', 'torch.from_numpy', (['self.inp_obs_seq[idx]'], {}), '(self.inp_obs_seq[idx])\n', (8643, 8666), False, 'import torch\n'), ((8693, 8732), 'torch.from_numpy', 'torch.from_numpy', (['self.out_obs_seq[idx]'], {}), '(self.out_obs_seq[idx])\n', (8709, 8732), False, 'import torch\n'), ((3080, 3105), 'copy.deepcopy', 'copy.deepcopy', (['weight_seq'], {}), '(weight_seq)\n', (3093, 3105), False, 'import copy\n')] |
from .factory import Factory
import numpy as np
import itertools as itt
# NOTE special case... not a standard factory!
class FactoryUnion(Factory):
def __init__(self, **fmap):
self.__fmap = fmap
self.__fimap = {k: i for i, k in enumerate(fmap.keys())}
dims = (0,) + tuple(f.nitems for f in fmap.values())
self.__cumdims = np.cumsum(dims)
self.nitems = self.__cumdims[-1]
def __getattr__(self, name):
try:
return self.__fmap[name]
except KeyError:
raise AttributeError
@property
def values(self):
for k, f in self.__fmap.items():
for v in f.values:
yield k, v
@property
def items(self):
return itt.chain(*(f.items for f in self.__fmap.values()))
def i(self, value):
k, v = value
fi = self.__fimap[k]
i = self.__fmap[k].i(v)
return self.__cumdims[fi] + i
def value(self, i):
for k, f in self.__fmap.items():
if 0 <= i < f.nitems:
return k, f.value(i)
i -= f.nitems
raise ValueError
# simulating Item creation
@staticmethod # hack/trick; self passed twice..
def Item(self, i):
for k, f in self.__fmap.items():
if 0 <= i < f.nitems:
return f.item(i)
i -= f.nitems
raise ValueError
def isitem(self, item):
""" overriding """
return any(f.isitem(item) for f in self.__fmap.values())
| [
"numpy.cumsum"
] | [((362, 377), 'numpy.cumsum', 'np.cumsum', (['dims'], {}), '(dims)\n', (371, 377), True, 'import numpy as np\n')] |
import unittest
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torchvision
from code_soup.ch5 import ZooAttack
class TestZooAttack(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.orig_img = torch.tensor(
[
[
[
[-1.6505998, -1.0305759, 1.0229983],
[-0.49261865, 1.0394262, -2.0290275],
[0.21951008, -2.1673787, -0.38990623],
[-0.2866124, 1.0799991, -0.11442444],
],
[
[-0.7052935, -0.5529446, 0.26524046],
[-1.0540642, 0.6887131, 1.6723113],
[1.1097006, 2.1335971, 0.9231482],
[0.37910375, -0.12366215, -0.25093704],
],
[
[-1.9404864, -1.3078933, 0.88476175],
[0.35099706, -1.254437, 0.05408821],
[0.7342985, -0.43663985, 0.11520719],
[-0.07479854, -2.5859993, 1.4102333],
],
[
[0.21304935, -0.3496548, -0.19856042],
[-0.434919, -0.27774376, 1.1471609],
[1.4504786, 0.67261624, -0.23560882],
[1.0592173, 0.6655428, 1.1890292],
],
]
],
dtype=torch.float32,
)
cls.modifier = np.array(
[
[
[
[-0.21563086, 0.54629284, 1.0879989],
[-0.17234534, 0.37302095, 1.5072422],
[-0.14709516, -0.08446954, -1.0199878],
[-0.46581882, 0.41346493, -1.6357177],
],
[
[0.97039294, -0.46038368, -0.5377948],
[-0.08285582, -1.4017423, -0.6447743],
[-0.6031785, -2.003339, -0.01103557],
[0.41714168, -1.94303, 0.6685426],
],
[
[-0.83851266, 0.79823476, 0.2532903],
[-0.76351106, 0.90984505, 1.331635],
[-1.1300149, -0.8444777, -2.2185612],
[1.0166003, 0.9233805, 0.98315567],
],
[
[-0.88205546, -0.3438152, -0.36559045],
[0.56274384, 1.5836877, -1.2370849],
[1.4234338, -0.5929535, -1.3011148],
[0.84160084, 0.90161383, 0.80880517],
],
]
],
dtype=np.float32,
)
cls.labels = torch.tensor([[0, 1]])
cls.config = {
"binary_search_steps": 1,
"max_iterations": 100,
"learning_rate": 2e-3,
"abort_early": True,
"targeted": True,
"confidence": 0,
"initial_const": 0.5,
"use_log": False,
"use_tanh": True,
"reset_adam_after_found": True,
"batch_size": 4,
"const": 0.5,
"early_stop_iters": 0,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"use_importance": True,
"use_resize": False,
"init_size": 4,
"adam_eps": 1e-8,
"resize_iter_1": 2000,
"resize_iter_2": 10000,
}
cls.model = nn.Sequential(
nn.Conv2d(
in_channels=3, out_channels=1, kernel_size=2, padding=0, bias=False
),
nn.Flatten(),
nn.Linear(4 * 4 * 3, 2, bias=False),
)
with torch.no_grad():
cls.model[0].weight = nn.Parameter(
torch.tensor(
[
[
[[0.18992287], [-0.6111586], [-0.41560256]],
[[0.19819254], [0.06157357], [-0.29873127]],
],
[
[[0.08528781], [-0.4988662], [0.51414317]],
[[0.5520558], [0.35638297], [0.29052997]],
],
]
).permute(3, 2, 0, 1)
)
cls.model[2].weight = nn.Parameter(
torch.tensor(
[
[0.26311237, 0.7238547],
[-0.2869757, -0.6140047],
[-0.11846703, -0.57517225],
[-0.72543985, 0.6393444],
[0.45188862, 0.35718697],
[-0.7197881, 0.17988789],
[0.18161213, 0.32464463],
[0.37511164, 0.07291293],
[-0.27989575, -0.37013885],
]
).T
)
cls.attack = ZooAttack(
model=cls.model,
config=cls.config,
input_image_shape=cls.orig_img.shape[1:],
device="cpu:0",
)
def test_get_perturbed_image(self):
perturbed_image = self.attack.get_perturbed_image(self.orig_img, self.modifier)
self.assertEqual(perturbed_image.shape, self.orig_img.shape)
output = torch.tanh(self.orig_img + self.modifier) / 2
self.assertTrue(torch.allclose(perturbed_image, output, atol=1e-5))
# Without Tanh
attack = deepcopy(self.attack)
attack.config["use_tanh"] = False
perturbed_image_2 = attack.get_perturbed_image(self.orig_img, self.modifier)
self.assertEqual(perturbed_image_2.shape, self.orig_img.shape)
output_2 = self.orig_img + torch.from_numpy(self.modifier)
self.assertTrue(torch.allclose(perturbed_image_2, output_2, atol=1e-5))
# Integration Test
self.assertTrue(
torch.allclose(
perturbed_image,
torch.tensor(
[
[
[-0.47662562, -0.22483358, 0.4855427],
[-0.2908287, 0.44400635, -0.23953833],
[0.0361443, -0.4890532, -0.44373578],
[-0.3182986, 0.45198008, -0.47069582],
],
[
[0.12952949, -0.38356757, -0.13300003],
[-0.4066872, -0.30628642, 0.38645932],
[0.23361546, 0.06476317, 0.36107236],
[0.33096626, -0.48422426, 0.19745564],
],
[
[-0.49615836, -0.23483951, 0.40687856],
[-0.19530259, -0.16578534, 0.44111317],
[-0.18813889, -0.42839125, -0.4853233],
[0.3680245, -0.46528453, 0.49172965],
],
[
[-0.2921629, -0.3001033, -0.25552535],
[0.06356658, 0.43162277, -0.04484118],
[0.49682045, 0.03974732, -0.4557841],
[0.4781537, 0.4582861, 0.4819371],
],
],
),
)
)
def test_l2_distance_loss(self):
new_img = self.attack.get_perturbed_image(self.orig_img, self.modifier)
loss = self.attack.l2_distance_loss(self.orig_img, new_img)
self.assertEqual(loss.shape[0], self.orig_img.shape[0])
# Without Tanh
attack = deepcopy(self.attack)
attack.config["use_tanh"] = False
new_img_2 = attack.get_perturbed_image(self.orig_img, self.modifier)
loss_2 = attack.l2_distance_loss(self.orig_img, new_img_2)
self.assertEqual(loss_2.shape[0], self.orig_img.shape[0])
# Integration Test
self.assertTrue(np.allclose(np.array([3.7336116]), loss, atol=1e-5))
def test_confidence_loss(self):
new_img = self.attack.get_perturbed_image(self.orig_img, self.modifier)
loss, model_output = self.attack.confidence_loss(new_img, self.labels)
self.assertEqual(loss.shape[0], new_img.shape[0])
# With Log and Untargeted
attack = deepcopy(self.attack)
attack.config["use_log"] = True
attack.config["targeted"] = False
new_img_2 = attack.get_perturbed_image(self.orig_img, self.modifier)
loss_2, model_output = attack.confidence_loss(new_img_2, self.labels)
self.assertEqual(loss_2.shape[0], new_img_2.shape[0])
# Integration Test
self.assertTrue(np.allclose(np.array([0.2148518]), loss, atol=1e-5))
def test_zero_order_gradients(self):
losses = np.random.randn(2 * self.config["batch_size"] + 1)
grads = self.attack.zero_order_gradients(losses)
self.assertEqual(grads.shape, (self.config["batch_size"],))
def test_total_loss(self):
new_img = self.attack.get_perturbed_image(self.orig_img, self.modifier)
loss, l2_loss, confidence_loss, model_output = self.attack.total_loss(
self.orig_img, new_img, self.labels, self.config["initial_const"]
)
self.assertEqual(loss.shape[0], self.orig_img.shape[0])
self.assertEqual(confidence_loss.shape[0], self.orig_img.shape[0])
self.assertEqual(l2_loss.shape[0], self.orig_img.shape[0])
self.assertEqual(model_output.shape, self.labels.shape)
def test_max_pooling(self):
modifier = self.modifier[0][:, :, 0]
pooled_output = self.attack.max_pooling(modifier, 2)
self.assertEqual(pooled_output.shape, modifier.shape)
# Integration Test
self.assertTrue(
np.allclose(
pooled_output,
np.array(
[
[
0.97039294,
0.97039294,
0.41714168,
0.41714168,
],
[
0.97039294,
0.97039294,
0.41714168,
0.41714168,
],
[
0.56274384,
0.56274384,
1.4234338,
1.4234338,
],
[
0.56274384,
0.56274384,
1.4234338,
1.4234338,
],
]
),
atol=1e-5,
)
)
def test_coordinate_adam(self):
# With Proj True
attack = deepcopy(self.attack)
attack.config["use_tanh"] = False
attack.up = 0.5 - self.orig_img.numpy().reshape(-1)
attack.down = -0.5 - self.orig_img.numpy().reshape(-1)
indices = np.array([15, 24, 32, 45])
grad = np.array([2000.0, 3500.0, -1000.0, -1500.0])
proj = not attack.config["use_tanh"]
modifier = deepcopy(self.modifier)
attack.coordinate_adam(indices, grad, modifier, proj)
self.assertTrue(
np.allclose(
attack.mt_arr,
np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
200.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
350.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-100.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-150.0,
0.0,
0.0,
],
),
atol=1e-5,
)
)
self.assertTrue(
np.allclose(
attack.vt_arr,
np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
4000.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
12250.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1000.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
2250.0,
0.0,
0.0,
],
),
atol=1e-5,
)
)
self.assertTrue(
np.allclose(
modifier,
np.array(
[
[
[-0.21563086, 0.54629284, 1.0879989],
[-0.17234534, 0.37302095, 1.5072422],
[-0.14709516, -0.08446954, -1.0199878],
[-0.46581882, 0.41346493, -1.6357177],
],
[
[0.97039294, -0.46038368, -0.5377948],
[0.55406415, -1.4017423, -0.6447743],
[-0.6031785, -2.003339, -0.01103557],
[0.41714168, -1.94303, 0.6685426],
],
[
[1.4404864, 0.79823476, 0.2532903],
[-0.76351106, 0.90984505, 1.331635],
[-1.1300149, -0.8444777, -0.6152072],
[1.0166003, 0.9233805, 0.98315567],
],
[
[-0.88205546, -0.3438152, -0.36559045],
[0.56274384, 1.5836877, -1.2370849],
[1.4234338, -0.5929535, -1.3011148],
[-0.55921733, 0.90161383, 0.80880517],
],
]
),
atol=1e-5,
)
)
self.assertTrue(
(
attack.adam_epochs
== np.array(
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
],
)
).all(),
)
# Integration Test
# Without Proj True
attack = deepcopy(self.attack)
indices = np.array([15, 24, 32, 45])
grad = np.array([2000.0, 3500.0, -1000.0, -1500.0])
proj = not attack.config["use_tanh"]
modifier = deepcopy(self.modifier)
attack.coordinate_adam(indices, grad, modifier, proj)
self.assertTrue(
np.allclose(
attack.mt_arr,
np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
200.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
350.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-100.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-150.0,
0.0,
0.0,
],
),
atol=1e-5,
)
)
self.assertTrue(
np.allclose(
attack.vt_arr,
np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
4000.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
12250.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1000.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
2250.0,
0.0,
0.0,
],
),
atol=1e-5,
)
)
self.assertTrue(
np.allclose(
modifier,
np.array(
[
[
[-0.21563086, 0.54629284, 1.0879989],
[-0.17234534, 0.37302095, 1.5072422],
[-0.14709516, -0.08446954, -1.0199878],
[-0.46581882, 0.41346493, -1.6357177],
],
[
[0.97039294, -0.46038368, -0.5377948],
[-0.08485582, -1.4017423, -0.6447743],
[-0.6031785, -2.003339, -0.01103557],
[0.41714168, -1.94303, 0.6685426],
],
[
[-0.84051266, 0.79823476, 0.2532903],
[-0.76351106, 0.90984505, 1.331635],
[-1.1300149, -0.8444777, -2.2165612],
[1.0166003, 0.9233805, 0.98315567],
],
[
[-0.88205546, -0.3438152, -0.36559045],
[0.56274384, 1.5836877, -1.2370849],
[1.4234338, -0.5929535, -1.3011148],
[0.84360084, 0.90161383, 0.80880517],
],
],
),
atol=1e-5,
)
)
self.assertTrue(
(
attack.adam_epochs
== np.array(
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
],
)
).all(),
)
def test_get_new_prob(self):
probs = self.attack.get_new_prob(self.modifier, 2)
self.assertEqual(probs.shape, self.modifier.shape[1:])
# Integration Test
self.assertTrue(
np.allclose(
probs,
np.array(
[
[
[0.01471687, 0.02125866, 0.02285866],
[0.01471687, 0.02125866, 0.02285866],
[0.00914774, 0.03038241, 0.0248071],
[0.00914774, 0.03038241, 0.0248071],
],
[
[0.01471687, 0.02125866, 0.02285866],
[0.01471687, 0.02125866, 0.02285866],
[0.00914774, 0.03038241, 0.0248071],
[0.00914774, 0.03038241, 0.0248071],
],
[
[0.01337715, 0.02401802, 0.02019542],
[0.01337715, 0.02401802, 0.02019542],
[0.02158763, 0.01400388, 0.03364644],
[0.02158763, 0.01400388, 0.03364644],
],
[
[0.01337715, 0.02401802, 0.02019542],
[0.01337715, 0.02401802, 0.02019542],
[0.02158763, 0.01400388, 0.03364644],
[0.02158763, 0.01400388, 0.03364644],
],
]
),
atol=1e-5,
)
)
def test_resize_img(self):
# Reset Only True
attack = deepcopy(self.attack)
new_modifier = attack.resize_img(8, 8, 3, self.modifier, 2, reset_only=True)
self.assertEqual(new_modifier.shape, (1, 8, 8, 3))
self.assertEqual(attack.sample_prob.shape, np.prod(8 * 8 * 3))
# Reset Only False
attack = deepcopy(self.attack)
new_modifier = attack.resize_img(8, 8, 3, self.modifier, 2)
self.assertEqual(new_modifier.shape, (1, 8, 8, 3))
self.assertEqual(attack.sample_prob.shape, np.prod(8 * 8 * 3))
# Integration Test
self.assertTrue(
np.allclose(
new_modifier,
np.array(
[
[
[
[-0.21563086, 0.54629284, 1.0879989],
[-0.20480949, 0.50297487, 1.1928097],
[-0.18316671, 0.41633892, 1.4024314],
[-0.16603279, 0.25864834, 0.8754347],
[-0.15340771, 0.02990307, -0.38818032],
[-0.22677608, 0.04001407, -1.1739203],
[-0.3861379, 0.28898132, -1.4817853],
[-0.46581882, 0.41346493, -1.6357177],
],
[
[0.0808751, 0.2946237, 0.68155044],
[0.02316307, 0.2033003, 0.7534723],
[-0.09226094, 0.02065352, 0.89731616],
[-0.17775872, -0.19404912, 0.53499115],
[-0.23333023, -0.44080764, -0.3335028],
[-0.25710666, -0.4670549, -0.8407254],
[-0.24908802, -0.2727908, -0.98667693],
[-0.2450787, -0.17565879, -1.0596527],
],
[
[0.673887, -0.20871457, -0.1313464],
[0.47910815, -0.39604884, -0.12520233],
[0.0895506, -0.77071726, -0.11291426],
[-0.20121056, -1.099444, -0.14589605],
[-0.3931753, -1.3822291, -0.22414777],
[-0.31776786, -1.481193, -0.17433581],
[0.02501175, -1.396335, 0.00353971],
[0.19640155, -1.3539063, 0.09247744],
],
[
[0.51816654, -0.14572906, -0.34002355],
[0.32536998, -0.3152582, -0.29268563],
[-0.0602231, -0.6543164, -0.19800991],
[-0.3734866, -1.04629, -0.25373322],
[-0.61442065, -1.4911791, -0.45985574],
[-0.4094141, -1.5918247, -0.23538877],
[0.24153282, -1.3482264, 0.4196677],
[0.56700635, -1.2264273, 0.7471959],
],
[
[-0.38628626, 0.48358017, 0.05551901],
[-0.4380515, 0.4456721, 0.25102246],
[-0.54158205, 0.36985612, 0.6420292],
[-0.6945869, -0.03458703, 0.21147956],
[-0.89706624, -0.76765776, -1.0406268],
[-0.5320455, -0.7989503, -1.0238843],
[0.4004752, -0.12846482, 0.2617069],
[0.8667356, 0.20677787, 0.9045024],
],
[
[-0.8493984, 0.51272225, 0.09857011],
[-0.7450356, 0.6541181, 0.24629137],
[-0.5363101, 0.93690985, 0.54173374],
[-0.44687366, 0.6133301, 0.01979139],
[-0.4767264, -0.31662107, -1.319536],
[-0.12552696, -0.35671276, -1.2570076],
[0.60672456, 0.493055, 0.20737618],
[0.9728504, 0.9179388, 0.93956804],
],
[
[-0.87116975, -0.05830276, -0.21087027],
[-0.5955823, 0.3100797, -0.3068789],
[-0.04440734, 1.0468445, -0.49889636],
[0.369653, 0.89746165, -0.8287977],
[0.6465987, -0.1380692, -1.2965835],
[0.8101414, -0.26511204, -0.9347591],
[0.8602809, 0.51633304, 0.25667554],
[0.8853507, 0.9070555, 0.8523928],
],
[
[-0.88205546, -0.3438152, -0.36559045],
[-0.52085567, 0.13806051, -0.583464],
[0.20154402, 1.1018119, -1.0192113],
[0.7779163, 1.0395274, -1.2530923],
[1.2082613, -0.04879326, -1.2851074],
[1.2779756, -0.21931165, -0.7736348],
[0.98705906, 0.52797204, 0.28132522],
[0.84160084, 0.90161383, 0.80880517],
],
]
]
),
atol=1e-5,
)
)
self.assertTrue(
np.allclose(
attack.sample_prob,
np.array(
[
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00367922,
0.00531467,
0.00571467,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00228693,
0.0075956,
0.00620178,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00334429,
0.00600451,
0.00504886,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
0.00539691,
0.00350097,
0.00841161,
]
),
atol=1e-5,
)
)
def test_single_step(self):
# Random Without Importance and init size reduce
attack = ZooAttack(
model=self.model,
config=self.config,
input_image_shape=self.orig_img.shape[1:],
device="cpu:0",
)
attack.config["use_importance"] = False
attack.config["init_size"] = 2
modifier = deepcopy(self.modifier)
(
total_loss,
l2_loss,
confidence_loss,
model_output,
new_img,
) = attack.single_step(
modifier,
self.orig_img,
self.labels,
self.config["initial_const"],
max_pooling_ratio=2,
)
self.assertFalse(np.allclose(modifier, self.modifier, atol=1e-5))
self.assertEqual(new_img.shape, self.modifier.shape[1:])
# With Custom Indices
attack = deepcopy(self.attack)
modifier = deepcopy(self.modifier)
indices = [15, 24, 32, 45]
(
total_loss,
l2_loss,
confidence_loss,
model_output,
new_img,
) = attack.single_step(
modifier,
self.orig_img,
self.labels,
self.config["initial_const"],
var_indice=indices,
max_pooling_ratio=2,
)
self.assertFalse(np.allclose(modifier, self.modifier, atol=1e-5))
self.assertEqual(new_img.shape, self.modifier.shape[1:])
def test_attack(self):
attack = deepcopy(self.attack)
orig_img = deepcopy(self.orig_img[0].numpy())
orig_img /= 10 * np.max(orig_img)
labels = self.labels[0].numpy()
outer_best_adv, outer_best_const = attack.attack(
orig_img, labels, max_pooling_ratio=2
)
self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:])
# Without x10
attack = deepcopy(self.attack)
orig_img = deepcopy(self.orig_img[0].numpy())
orig_img /= 100 * np.max(orig_img)
outer_best_adv, outer_best_const = attack.attack(
orig_img, labels, max_pooling_ratio=2
)
self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:])
# With modifier init
attack = deepcopy(self.attack)
outer_best_adv, outer_best_const = attack.attack(
orig_img,
labels,
modifier_init=self.modifier[0],
max_pooling_ratio=2,
)
self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:])
# With use resize and untargeted and max iterations 10k
attack = deepcopy(self.attack)
attack.config["use_resize"] = True
attack.config["resize_iter_1"] = 20
attack.config["resize_iter_2"] = 80
attack.config["abort_early"] = False
attack.config["targeted"] = False
orig_img = deepcopy(self.orig_img[0].numpy())
orig_img /= 10 * np.max(orig_img)
outer_best_adv, outer_best_const = attack.attack(
orig_img, labels, max_pooling_ratio=2
)
self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:])
# Without tanh
attack = deepcopy(self.attack)
attack.config["use_tanh"] = False
outer_best_adv, outer_best_const = attack.attack(
orig_img, labels, max_pooling_ratio=2
)
self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:])
| [
"torch.tanh",
"numpy.prod",
"numpy.allclose",
"torch.nn.Flatten",
"torch.from_numpy",
"torch.nn.Conv2d",
"numpy.max",
"torch.tensor",
"numpy.array",
"torch.no_grad",
"torch.nn.Linear",
"copy.deepcopy",
"torch.allclose",
"code_soup.ch5.ZooAttack",
"numpy.random.randn"
] | [((269, 957), 'torch.tensor', 'torch.tensor', (['[[[[-1.6505998, -1.0305759, 1.0229983], [-0.49261865, 1.0394262, -2.0290275\n ], [0.21951008, -2.1673787, -0.38990623], [-0.2866124, 1.0799991, -\n 0.11442444]], [[-0.7052935, -0.5529446, 0.26524046], [-1.0540642, \n 0.6887131, 1.6723113], [1.1097006, 2.1335971, 0.9231482], [0.37910375, \n -0.12366215, -0.25093704]], [[-1.9404864, -1.3078933, 0.88476175], [\n 0.35099706, -1.254437, 0.05408821], [0.7342985, -0.43663985, 0.11520719\n ], [-0.07479854, -2.5859993, 1.4102333]], [[0.21304935, -0.3496548, -\n 0.19856042], [-0.434919, -0.27774376, 1.1471609], [1.4504786, \n 0.67261624, -0.23560882], [1.0592173, 0.6655428, 1.1890292]]]]'], {'dtype': 'torch.float32'}), '([[[[-1.6505998, -1.0305759, 1.0229983], [-0.49261865, \n 1.0394262, -2.0290275], [0.21951008, -2.1673787, -0.38990623], [-\n 0.2866124, 1.0799991, -0.11442444]], [[-0.7052935, -0.5529446, \n 0.26524046], [-1.0540642, 0.6887131, 1.6723113], [1.1097006, 2.1335971,\n 0.9231482], [0.37910375, -0.12366215, -0.25093704]], [[-1.9404864, -\n 1.3078933, 0.88476175], [0.35099706, -1.254437, 0.05408821], [0.7342985,\n -0.43663985, 0.11520719], [-0.07479854, -2.5859993, 1.4102333]], [[\n 0.21304935, -0.3496548, -0.19856042], [-0.434919, -0.27774376, \n 1.1471609], [1.4504786, 0.67261624, -0.23560882], [1.0592173, 0.6655428,\n 1.1890292]]]], dtype=torch.float32)\n', (281, 957), False, 'import torch\n'), ((1580, 2268), 'numpy.array', 'np.array', (['[[[[-0.21563086, 0.54629284, 1.0879989], [-0.17234534, 0.37302095, \n 1.5072422], [-0.14709516, -0.08446954, -1.0199878], [-0.46581882, \n 0.41346493, -1.6357177]], [[0.97039294, -0.46038368, -0.5377948], [-\n 0.08285582, -1.4017423, -0.6447743], [-0.6031785, -2.003339, -\n 0.01103557], [0.41714168, -1.94303, 0.6685426]], [[-0.83851266, \n 0.79823476, 0.2532903], [-0.76351106, 0.90984505, 1.331635], [-\n 1.1300149, -0.8444777, -2.2185612], [1.0166003, 0.9233805, 0.98315567]],\n [[-0.88205546, -0.3438152, -0.36559045], [0.56274384, 1.5836877, -\n 1.2370849], [1.4234338, -0.5929535, -1.3011148], [0.84160084, \n 0.90161383, 0.80880517]]]]'], {'dtype': 'np.float32'}), '([[[[-0.21563086, 0.54629284, 1.0879989], [-0.17234534, 0.37302095,\n 1.5072422], [-0.14709516, -0.08446954, -1.0199878], [-0.46581882, \n 0.41346493, -1.6357177]], [[0.97039294, -0.46038368, -0.5377948], [-\n 0.08285582, -1.4017423, -0.6447743], [-0.6031785, -2.003339, -\n 0.01103557], [0.41714168, -1.94303, 0.6685426]], [[-0.83851266, \n 0.79823476, 0.2532903], [-0.76351106, 0.90984505, 1.331635], [-\n 1.1300149, -0.8444777, -2.2185612], [1.0166003, 0.9233805, 0.98315567]],\n [[-0.88205546, -0.3438152, -0.36559045], [0.56274384, 1.5836877, -\n 1.2370849], [1.4234338, -0.5929535, -1.3011148], [0.84160084, \n 0.90161383, 0.80880517]]]], dtype=np.float32)\n', (1588, 2268), True, 'import numpy as np\n'), ((2888, 2910), 'torch.tensor', 'torch.tensor', (['[[0, 1]]'], {}), '([[0, 1]])\n', (2900, 2910), False, 'import torch\n'), ((5107, 5215), 'code_soup.ch5.ZooAttack', 'ZooAttack', ([], {'model': 'cls.model', 'config': 'cls.config', 'input_image_shape': 'cls.orig_img.shape[1:]', 'device': '"""cpu:0"""'}), "(model=cls.model, config=cls.config, input_image_shape=cls.\n orig_img.shape[1:], device='cpu:0')\n", (5116, 5215), False, 'from code_soup.ch5 import ZooAttack\n'), ((5649, 5670), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (5657, 5670), False, 'from copy import deepcopy\n'), ((7821, 7842), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (7829, 7842), False, 'from copy import deepcopy\n'), ((8508, 8529), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (8516, 8529), False, 'from copy import deepcopy\n'), ((8995, 9045), 'numpy.random.randn', 'np.random.randn', (["(2 * self.config['batch_size'] + 1)"], {}), "(2 * self.config['batch_size'] + 1)\n", (9010, 9045), True, 'import numpy as np\n'), ((11102, 11123), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (11110, 11123), False, 'from copy import deepcopy\n'), ((11307, 11333), 'numpy.array', 'np.array', (['[15, 24, 32, 45]'], {}), '([15, 24, 32, 45])\n', (11315, 11333), True, 'import numpy as np\n'), ((11350, 11394), 'numpy.array', 'np.array', (['[2000.0, 3500.0, -1000.0, -1500.0]'], {}), '([2000.0, 3500.0, -1000.0, -1500.0])\n', (11358, 11394), True, 'import numpy as np\n'), ((11461, 11484), 'copy.deepcopy', 'deepcopy', (['self.modifier'], {}), '(self.modifier)\n', (11469, 11484), False, 'from copy import deepcopy\n'), ((17850, 17871), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (17858, 17871), False, 'from copy import deepcopy\n'), ((17890, 17916), 'numpy.array', 'np.array', (['[15, 24, 32, 45]'], {}), '([15, 24, 32, 45])\n', (17898, 17916), True, 'import numpy as np\n'), ((17933, 17977), 'numpy.array', 'np.array', (['[2000.0, 3500.0, -1000.0, -1500.0]'], {}), '([2000.0, 3500.0, -1000.0, -1500.0])\n', (17941, 17977), True, 'import numpy as np\n'), ((18044, 18067), 'copy.deepcopy', 'deepcopy', (['self.modifier'], {}), '(self.modifier)\n', (18052, 18067), False, 'from copy import deepcopy\n'), ((26100, 26121), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (26108, 26121), False, 'from copy import deepcopy\n'), ((26384, 26405), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (26392, 26405), False, 'from copy import deepcopy\n'), ((39114, 39225), 'code_soup.ch5.ZooAttack', 'ZooAttack', ([], {'model': 'self.model', 'config': 'self.config', 'input_image_shape': 'self.orig_img.shape[1:]', 'device': '"""cpu:0"""'}), "(model=self.model, config=self.config, input_image_shape=self.\n orig_img.shape[1:], device='cpu:0')\n", (39123, 39225), False, 'from code_soup.ch5 import ZooAttack\n'), ((39386, 39409), 'copy.deepcopy', 'deepcopy', (['self.modifier'], {}), '(self.modifier)\n', (39394, 39409), False, 'from copy import deepcopy\n'), ((39922, 39943), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (39930, 39943), False, 'from copy import deepcopy\n'), ((39963, 39986), 'copy.deepcopy', 'deepcopy', (['self.modifier'], {}), '(self.modifier)\n', (39971, 39986), False, 'from copy import deepcopy\n'), ((40562, 40583), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (40570, 40583), False, 'from copy import deepcopy\n'), ((40951, 40972), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (40959, 40972), False, 'from copy import deepcopy\n'), ((41308, 41329), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (41316, 41329), False, 'from copy import deepcopy\n'), ((41672, 41693), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (41680, 41693), False, 'from copy import deepcopy\n'), ((42240, 42261), 'copy.deepcopy', 'deepcopy', (['self.attack'], {}), '(self.attack)\n', (42248, 42261), False, 'from copy import deepcopy\n'), ((3682, 3760), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(3)', 'out_channels': '(1)', 'kernel_size': '(2)', 'padding': '(0)', 'bias': '(False)'}), '(in_channels=3, out_channels=1, kernel_size=2, padding=0, bias=False)\n', (3691, 3760), True, 'import torch.nn as nn\n'), ((3804, 3816), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (3814, 3816), True, 'import torch.nn as nn\n'), ((3830, 3865), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 3)', '(2)'], {'bias': '(False)'}), '(4 * 4 * 3, 2, bias=False)\n', (3839, 3865), True, 'import torch.nn as nn\n'), ((3891, 3906), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3904, 3906), False, 'import torch\n'), ((5486, 5527), 'torch.tanh', 'torch.tanh', (['(self.orig_img + self.modifier)'], {}), '(self.orig_img + self.modifier)\n', (5496, 5527), False, 'import torch\n'), ((5556, 5607), 'torch.allclose', 'torch.allclose', (['perturbed_image', 'output'], {'atol': '(1e-05)'}), '(perturbed_image, output, atol=1e-05)\n', (5570, 5607), False, 'import torch\n'), ((5906, 5937), 'torch.from_numpy', 'torch.from_numpy', (['self.modifier'], {}), '(self.modifier)\n', (5922, 5937), False, 'import torch\n'), ((5962, 6017), 'torch.allclose', 'torch.allclose', (['perturbed_image_2', 'output_2'], {'atol': '(1e-05)'}), '(perturbed_image_2, output_2, atol=1e-05)\n', (5976, 6017), False, 'import torch\n'), ((26319, 26337), 'numpy.prod', 'np.prod', (['(8 * 8 * 3)'], {}), '(8 * 8 * 3)\n', (26326, 26337), True, 'import numpy as np\n'), ((26586, 26604), 'numpy.prod', 'np.prod', (['(8 * 8 * 3)'], {}), '(8 * 8 * 3)\n', (26593, 26604), True, 'import numpy as np\n'), ((39759, 39807), 'numpy.allclose', 'np.allclose', (['modifier', 'self.modifier'], {'atol': '(1e-05)'}), '(modifier, self.modifier, atol=1e-05)\n', (39770, 39807), True, 'import numpy as np\n'), ((40402, 40450), 'numpy.allclose', 'np.allclose', (['modifier', 'self.modifier'], {'atol': '(1e-05)'}), '(modifier, self.modifier, atol=1e-05)\n', (40413, 40450), True, 'import numpy as np\n'), ((40663, 40679), 'numpy.max', 'np.max', (['orig_img'], {}), '(orig_img)\n', (40669, 40679), True, 'import numpy as np\n'), ((41053, 41069), 'numpy.max', 'np.max', (['orig_img'], {}), '(orig_img)\n', (41059, 41069), True, 'import numpy as np\n'), ((41992, 42008), 'numpy.max', 'np.max', (['orig_img'], {}), '(orig_img)\n', (41998, 42008), True, 'import numpy as np\n'), ((6148, 6835), 'torch.tensor', 'torch.tensor', (['[[[-0.47662562, -0.22483358, 0.4855427], [-0.2908287, 0.44400635, -\n 0.23953833], [0.0361443, -0.4890532, -0.44373578], [-0.3182986, \n 0.45198008, -0.47069582]], [[0.12952949, -0.38356757, -0.13300003], [-\n 0.4066872, -0.30628642, 0.38645932], [0.23361546, 0.06476317, \n 0.36107236], [0.33096626, -0.48422426, 0.19745564]], [[-0.49615836, -\n 0.23483951, 0.40687856], [-0.19530259, -0.16578534, 0.44111317], [-\n 0.18813889, -0.42839125, -0.4853233], [0.3680245, -0.46528453, \n 0.49172965]], [[-0.2921629, -0.3001033, -0.25552535], [0.06356658, \n 0.43162277, -0.04484118], [0.49682045, 0.03974732, -0.4557841], [\n 0.4781537, 0.4582861, 0.4819371]]]'], {}), '([[[-0.47662562, -0.22483358, 0.4855427], [-0.2908287, \n 0.44400635, -0.23953833], [0.0361443, -0.4890532, -0.44373578], [-\n 0.3182986, 0.45198008, -0.47069582]], [[0.12952949, -0.38356757, -\n 0.13300003], [-0.4066872, -0.30628642, 0.38645932], [0.23361546, \n 0.06476317, 0.36107236], [0.33096626, -0.48422426, 0.19745564]], [[-\n 0.49615836, -0.23483951, 0.40687856], [-0.19530259, -0.16578534, \n 0.44111317], [-0.18813889, -0.42839125, -0.4853233], [0.3680245, -\n 0.46528453, 0.49172965]], [[-0.2921629, -0.3001033, -0.25552535], [\n 0.06356658, 0.43162277, -0.04484118], [0.49682045, 0.03974732, -\n 0.4557841], [0.4781537, 0.4582861, 0.4819371]]])\n', (6160, 6835), False, 'import torch\n'), ((8160, 8181), 'numpy.array', 'np.array', (['[3.7336116]'], {}), '([3.7336116])\n', (8168, 8181), True, 'import numpy as np\n'), ((8895, 8916), 'numpy.array', 'np.array', (['[0.2148518]'], {}), '([0.2148518])\n', (8903, 8916), True, 'import numpy as np\n'), ((10050, 10265), 'numpy.array', 'np.array', (['[[0.97039294, 0.97039294, 0.41714168, 0.41714168], [0.97039294, 0.97039294,\n 0.41714168, 0.41714168], [0.56274384, 0.56274384, 1.4234338, 1.4234338],\n [0.56274384, 0.56274384, 1.4234338, 1.4234338]]'], {}), '([[0.97039294, 0.97039294, 0.41714168, 0.41714168], [0.97039294, \n 0.97039294, 0.41714168, 0.41714168], [0.56274384, 0.56274384, 1.4234338,\n 1.4234338], [0.56274384, 0.56274384, 1.4234338, 1.4234338]])\n', (10058, 10265), True, 'import numpy as np\n'), ((11646, 11920), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 200.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 350.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, -100.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, -150.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 200.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 350.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -100.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, -150.0, 0.0, 0.0])\n', (11654, 11920), True, 'import numpy as np\n'), ((13271, 13547), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 4000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12250.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 1000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 2250.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 4000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12250.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 2250.0, 0.0, 0.0])\n', (13279, 13547), True, 'import numpy as np\n'), ((14894, 15561), 'numpy.array', 'np.array', (['[[[-0.21563086, 0.54629284, 1.0879989], [-0.17234534, 0.37302095, 1.5072422\n ], [-0.14709516, -0.08446954, -1.0199878], [-0.46581882, 0.41346493, -\n 1.6357177]], [[0.97039294, -0.46038368, -0.5377948], [0.55406415, -\n 1.4017423, -0.6447743], [-0.6031785, -2.003339, -0.01103557], [\n 0.41714168, -1.94303, 0.6685426]], [[1.4404864, 0.79823476, 0.2532903],\n [-0.76351106, 0.90984505, 1.331635], [-1.1300149, -0.8444777, -\n 0.6152072], [1.0166003, 0.9233805, 0.98315567]], [[-0.88205546, -\n 0.3438152, -0.36559045], [0.56274384, 1.5836877, -1.2370849], [\n 1.4234338, -0.5929535, -1.3011148], [-0.55921733, 0.90161383, 0.80880517]]]'], {}), '([[[-0.21563086, 0.54629284, 1.0879989], [-0.17234534, 0.37302095, \n 1.5072422], [-0.14709516, -0.08446954, -1.0199878], [-0.46581882, \n 0.41346493, -1.6357177]], [[0.97039294, -0.46038368, -0.5377948], [\n 0.55406415, -1.4017423, -0.6447743], [-0.6031785, -2.003339, -\n 0.01103557], [0.41714168, -1.94303, 0.6685426]], [[1.4404864, \n 0.79823476, 0.2532903], [-0.76351106, 0.90984505, 1.331635], [-\n 1.1300149, -0.8444777, -0.6152072], [1.0166003, 0.9233805, 0.98315567]],\n [[-0.88205546, -0.3438152, -0.36559045], [0.56274384, 1.5836877, -\n 1.2370849], [1.4234338, -0.5929535, -1.3011148], [-0.55921733, \n 0.90161383, 0.80880517]]])\n', (14902, 15561), True, 'import numpy as np\n'), ((18229, 18503), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 200.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 350.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, -100.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, -150.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 200.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 350.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -100.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, -150.0, 0.0, 0.0])\n', (18237, 18503), True, 'import numpy as np\n'), ((19854, 20130), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 4000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12250.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 1000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 2250.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 4000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12250.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 2250.0, 0.0, 0.0])\n', (19862, 20130), True, 'import numpy as np\n'), ((21477, 22146), 'numpy.array', 'np.array', (['[[[-0.21563086, 0.54629284, 1.0879989], [-0.17234534, 0.37302095, 1.5072422\n ], [-0.14709516, -0.08446954, -1.0199878], [-0.46581882, 0.41346493, -\n 1.6357177]], [[0.97039294, -0.46038368, -0.5377948], [-0.08485582, -\n 1.4017423, -0.6447743], [-0.6031785, -2.003339, -0.01103557], [\n 0.41714168, -1.94303, 0.6685426]], [[-0.84051266, 0.79823476, 0.2532903\n ], [-0.76351106, 0.90984505, 1.331635], [-1.1300149, -0.8444777, -\n 2.2165612], [1.0166003, 0.9233805, 0.98315567]], [[-0.88205546, -\n 0.3438152, -0.36559045], [0.56274384, 1.5836877, -1.2370849], [\n 1.4234338, -0.5929535, -1.3011148], [0.84360084, 0.90161383, 0.80880517]]]'], {}), '([[[-0.21563086, 0.54629284, 1.0879989], [-0.17234534, 0.37302095, \n 1.5072422], [-0.14709516, -0.08446954, -1.0199878], [-0.46581882, \n 0.41346493, -1.6357177]], [[0.97039294, -0.46038368, -0.5377948], [-\n 0.08485582, -1.4017423, -0.6447743], [-0.6031785, -2.003339, -\n 0.01103557], [0.41714168, -1.94303, 0.6685426]], [[-0.84051266, \n 0.79823476, 0.2532903], [-0.76351106, 0.90984505, 1.331635], [-\n 1.1300149, -0.8444777, -2.2165612], [1.0166003, 0.9233805, 0.98315567]],\n [[-0.88205546, -0.3438152, -0.36559045], [0.56274384, 1.5836877, -\n 1.2370849], [1.4234338, -0.5929535, -1.3011148], [0.84360084, \n 0.90161383, 0.80880517]]])\n', (21485, 22146), True, 'import numpy as np\n'), ((24636, 25303), 'numpy.array', 'np.array', (['[[[0.01471687, 0.02125866, 0.02285866], [0.01471687, 0.02125866, 0.02285866\n ], [0.00914774, 0.03038241, 0.0248071], [0.00914774, 0.03038241, \n 0.0248071]], [[0.01471687, 0.02125866, 0.02285866], [0.01471687, \n 0.02125866, 0.02285866], [0.00914774, 0.03038241, 0.0248071], [\n 0.00914774, 0.03038241, 0.0248071]], [[0.01337715, 0.02401802, \n 0.02019542], [0.01337715, 0.02401802, 0.02019542], [0.02158763, \n 0.01400388, 0.03364644], [0.02158763, 0.01400388, 0.03364644]], [[\n 0.01337715, 0.02401802, 0.02019542], [0.01337715, 0.02401802, \n 0.02019542], [0.02158763, 0.01400388, 0.03364644], [0.02158763, \n 0.01400388, 0.03364644]]]'], {}), '([[[0.01471687, 0.02125866, 0.02285866], [0.01471687, 0.02125866, \n 0.02285866], [0.00914774, 0.03038241, 0.0248071], [0.00914774, \n 0.03038241, 0.0248071]], [[0.01471687, 0.02125866, 0.02285866], [\n 0.01471687, 0.02125866, 0.02285866], [0.00914774, 0.03038241, 0.0248071\n ], [0.00914774, 0.03038241, 0.0248071]], [[0.01337715, 0.02401802, \n 0.02019542], [0.01337715, 0.02401802, 0.02019542], [0.02158763, \n 0.01400388, 0.03364644], [0.02158763, 0.01400388, 0.03364644]], [[\n 0.01337715, 0.02401802, 0.02019542], [0.01337715, 0.02401802, \n 0.02019542], [0.02158763, 0.01400388, 0.03364644], [0.02158763, \n 0.01400388, 0.03364644]]])\n', (24644, 25303), True, 'import numpy as np\n'), ((26730, 29380), 'numpy.array', 'np.array', (['[[[[-0.21563086, 0.54629284, 1.0879989], [-0.20480949, 0.50297487, \n 1.1928097], [-0.18316671, 0.41633892, 1.4024314], [-0.16603279, \n 0.25864834, 0.8754347], [-0.15340771, 0.02990307, -0.38818032], [-\n 0.22677608, 0.04001407, -1.1739203], [-0.3861379, 0.28898132, -\n 1.4817853], [-0.46581882, 0.41346493, -1.6357177]], [[0.0808751, \n 0.2946237, 0.68155044], [0.02316307, 0.2033003, 0.7534723], [-\n 0.09226094, 0.02065352, 0.89731616], [-0.17775872, -0.19404912, \n 0.53499115], [-0.23333023, -0.44080764, -0.3335028], [-0.25710666, -\n 0.4670549, -0.8407254], [-0.24908802, -0.2727908, -0.98667693], [-\n 0.2450787, -0.17565879, -1.0596527]], [[0.673887, -0.20871457, -\n 0.1313464], [0.47910815, -0.39604884, -0.12520233], [0.0895506, -\n 0.77071726, -0.11291426], [-0.20121056, -1.099444, -0.14589605], [-\n 0.3931753, -1.3822291, -0.22414777], [-0.31776786, -1.481193, -\n 0.17433581], [0.02501175, -1.396335, 0.00353971], [0.19640155, -\n 1.3539063, 0.09247744]], [[0.51816654, -0.14572906, -0.34002355], [\n 0.32536998, -0.3152582, -0.29268563], [-0.0602231, -0.6543164, -\n 0.19800991], [-0.3734866, -1.04629, -0.25373322], [-0.61442065, -\n 1.4911791, -0.45985574], [-0.4094141, -1.5918247, -0.23538877], [\n 0.24153282, -1.3482264, 0.4196677], [0.56700635, -1.2264273, 0.7471959]\n ], [[-0.38628626, 0.48358017, 0.05551901], [-0.4380515, 0.4456721, \n 0.25102246], [-0.54158205, 0.36985612, 0.6420292], [-0.6945869, -\n 0.03458703, 0.21147956], [-0.89706624, -0.76765776, -1.0406268], [-\n 0.5320455, -0.7989503, -1.0238843], [0.4004752, -0.12846482, 0.2617069],\n [0.8667356, 0.20677787, 0.9045024]], [[-0.8493984, 0.51272225, \n 0.09857011], [-0.7450356, 0.6541181, 0.24629137], [-0.5363101, \n 0.93690985, 0.54173374], [-0.44687366, 0.6133301, 0.01979139], [-\n 0.4767264, -0.31662107, -1.319536], [-0.12552696, -0.35671276, -\n 1.2570076], [0.60672456, 0.493055, 0.20737618], [0.9728504, 0.9179388, \n 0.93956804]], [[-0.87116975, -0.05830276, -0.21087027], [-0.5955823, \n 0.3100797, -0.3068789], [-0.04440734, 1.0468445, -0.49889636], [\n 0.369653, 0.89746165, -0.8287977], [0.6465987, -0.1380692, -1.2965835],\n [0.8101414, -0.26511204, -0.9347591], [0.8602809, 0.51633304, \n 0.25667554], [0.8853507, 0.9070555, 0.8523928]], [[-0.88205546, -\n 0.3438152, -0.36559045], [-0.52085567, 0.13806051, -0.583464], [\n 0.20154402, 1.1018119, -1.0192113], [0.7779163, 1.0395274, -1.2530923],\n [1.2082613, -0.04879326, -1.2851074], [1.2779756, -0.21931165, -\n 0.7736348], [0.98705906, 0.52797204, 0.28132522], [0.84160084, \n 0.90161383, 0.80880517]]]]'], {}), '([[[[-0.21563086, 0.54629284, 1.0879989], [-0.20480949, 0.50297487,\n 1.1928097], [-0.18316671, 0.41633892, 1.4024314], [-0.16603279, \n 0.25864834, 0.8754347], [-0.15340771, 0.02990307, -0.38818032], [-\n 0.22677608, 0.04001407, -1.1739203], [-0.3861379, 0.28898132, -\n 1.4817853], [-0.46581882, 0.41346493, -1.6357177]], [[0.0808751, \n 0.2946237, 0.68155044], [0.02316307, 0.2033003, 0.7534723], [-\n 0.09226094, 0.02065352, 0.89731616], [-0.17775872, -0.19404912, \n 0.53499115], [-0.23333023, -0.44080764, -0.3335028], [-0.25710666, -\n 0.4670549, -0.8407254], [-0.24908802, -0.2727908, -0.98667693], [-\n 0.2450787, -0.17565879, -1.0596527]], [[0.673887, -0.20871457, -\n 0.1313464], [0.47910815, -0.39604884, -0.12520233], [0.0895506, -\n 0.77071726, -0.11291426], [-0.20121056, -1.099444, -0.14589605], [-\n 0.3931753, -1.3822291, -0.22414777], [-0.31776786, -1.481193, -\n 0.17433581], [0.02501175, -1.396335, 0.00353971], [0.19640155, -\n 1.3539063, 0.09247744]], [[0.51816654, -0.14572906, -0.34002355], [\n 0.32536998, -0.3152582, -0.29268563], [-0.0602231, -0.6543164, -\n 0.19800991], [-0.3734866, -1.04629, -0.25373322], [-0.61442065, -\n 1.4911791, -0.45985574], [-0.4094141, -1.5918247, -0.23538877], [\n 0.24153282, -1.3482264, 0.4196677], [0.56700635, -1.2264273, 0.7471959]\n ], [[-0.38628626, 0.48358017, 0.05551901], [-0.4380515, 0.4456721, \n 0.25102246], [-0.54158205, 0.36985612, 0.6420292], [-0.6945869, -\n 0.03458703, 0.21147956], [-0.89706624, -0.76765776, -1.0406268], [-\n 0.5320455, -0.7989503, -1.0238843], [0.4004752, -0.12846482, 0.2617069],\n [0.8667356, 0.20677787, 0.9045024]], [[-0.8493984, 0.51272225, \n 0.09857011], [-0.7450356, 0.6541181, 0.24629137], [-0.5363101, \n 0.93690985, 0.54173374], [-0.44687366, 0.6133301, 0.01979139], [-\n 0.4767264, -0.31662107, -1.319536], [-0.12552696, -0.35671276, -\n 1.2570076], [0.60672456, 0.493055, 0.20737618], [0.9728504, 0.9179388, \n 0.93956804]], [[-0.87116975, -0.05830276, -0.21087027], [-0.5955823, \n 0.3100797, -0.3068789], [-0.04440734, 1.0468445, -0.49889636], [\n 0.369653, 0.89746165, -0.8287977], [0.6465987, -0.1380692, -1.2965835],\n [0.8101414, -0.26511204, -0.9347591], [0.8602809, 0.51633304, \n 0.25667554], [0.8853507, 0.9070555, 0.8523928]], [[-0.88205546, -\n 0.3438152, -0.36559045], [-0.52085567, 0.13806051, -0.583464], [\n 0.20154402, 1.1018119, -1.0192113], [0.7779163, 1.0395274, -1.2530923],\n [1.2082613, -0.04879326, -1.2851074], [1.2779756, -0.21931165, -\n 0.7736348], [0.98705906, 0.52797204, 0.28132522], [0.84160084, \n 0.90161383, 0.80880517]]]])\n', (26738, 29380), True, 'import numpy as np\n'), ((31986, 34421), 'numpy.array', 'np.array', (['[0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467, \n 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467,\n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467,\n 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467,\n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467,\n 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467,\n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467,\n 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, 0.00571467,\n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, 0.00620178, \n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161,\n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161,\n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161,\n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451, 0.00504886,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161,\n 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097, 0.00841161]'], {}), '([0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467, \n 0.00571467, 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467,\n 0.00571467, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467,\n 0.00571467, 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467,\n 0.00571467, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467,\n 0.00571467, 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467,\n 0.00571467, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467,\n 0.00571467, 0.00367922, 0.00531467, 0.00571467, 0.00367922, 0.00531467,\n 0.00571467, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00228693, 0.0075956, 0.00620178, 0.00228693, 0.0075956, \n 0.00620178, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00334429, 0.00600451, 0.00504886, 0.00334429, 0.00600451,\n 0.00504886, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161, 0.00539691, 0.00350097, 0.00841161, 0.00539691, 0.00350097,\n 0.00841161])\n', (31994, 34421), True, 'import numpy as np\n'), ((4541, 4807), 'torch.tensor', 'torch.tensor', (['[[0.26311237, 0.7238547], [-0.2869757, -0.6140047], [-0.11846703, -\n 0.57517225], [-0.72543985, 0.6393444], [0.45188862, 0.35718697], [-\n 0.7197881, 0.17988789], [0.18161213, 0.32464463], [0.37511164, \n 0.07291293], [-0.27989575, -0.37013885]]'], {}), '([[0.26311237, 0.7238547], [-0.2869757, -0.6140047], [-\n 0.11846703, -0.57517225], [-0.72543985, 0.6393444], [0.45188862, \n 0.35718697], [-0.7197881, 0.17988789], [0.18161213, 0.32464463], [\n 0.37511164, 0.07291293], [-0.27989575, -0.37013885]])\n', (4553, 4807), False, 'import torch\n'), ((3972, 4176), 'torch.tensor', 'torch.tensor', (['[[[[0.18992287], [-0.6111586], [-0.41560256]], [[0.19819254], [0.06157357],\n [-0.29873127]]], [[[0.08528781], [-0.4988662], [0.51414317]], [[\n 0.5520558], [0.35638297], [0.29052997]]]]'], {}), '([[[[0.18992287], [-0.6111586], [-0.41560256]], [[0.19819254],\n [0.06157357], [-0.29873127]]], [[[0.08528781], [-0.4988662], [\n 0.51414317]], [[0.5520558], [0.35638297], [0.29052997]]]])\n', (3984, 4176), False, 'import torch\n'), ((16377, 16539), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2,\n 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1,\n 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,\n 1, 1])\n', (16385, 16539), True, 'import numpy as np\n'), ((22963, 23125), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2,\n 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1,\n 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,\n 1, 1])\n', (22971, 23125), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Domain adaptation with optimal transport with GPU implementation
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <https://github.com/aje>
#
# License: MIT License
import cupy as np # np used for matrix computation
import cupy as cp # cp used for cupy specific operations
import numpy as npp
from . import utils
from .bregman import sinkhorn
def sinkhorn_lpl1_mm(a, labels_a, b, M, reg, eta=0.1, numItermax=10,
numInnerItermax=200, stopInnerThr=1e-9, verbose=False,
log=False, to_numpy=True):
"""
Solve the entropic regularization optimal transport problem with nonconvex
group lasso regularization on GPU
If the input matrix are in numpy format, they will be uploaded to the
GPU first which can incur significant time overhead.
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega_e(\gamma)
+ \eta \Omega_g(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega_e` is the entropic regularization term
:math:`\Omega_e(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- :math:`\Omega_g` is the group lasso regulaization term
:math:`\Omega_g(\gamma)=\sum_{i,c} \|\gamma_{i,\mathcal{I}_c}\|^{1/2}_1`
where :math:`\mathcal{I}_c` are the index of samples from class c
in the source domain.
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the generalised conditional
gradient as proposed in [5]_ [7]_
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
labels_a : np.ndarray (ns,)
labels of samples in the source domain
b : np.ndarray (nt,)
samples weights in the target domain
M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term for entropic regularization >0
eta : float, optional
Regularization term for group lasso regularization >0
numItermax : int, optional
Max number of iterations
numInnerItermax : int, optional
Max number of iterations (inner sinkhorn solver)
stopInnerThr : float, optional
Stop threshold on error (inner sinkhorn solver) (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
to_numpy : boolean, optional (default True)
If true convert back the GPU array result to numpy format.
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [5] <NAME>; <NAME>; <NAME>; <NAME>,
"Optimal Transport for Domain Adaptation," in IEEE
Transactions on Pattern Analysis and Machine Intelligence ,
vol.PP, no.99, pp.1-1
.. [7] <NAME>., <NAME>., & Courty, N. (2015).
Generalized conditional gradient: analysis of convergence
and applications. arXiv preprint arXiv:1510.06567.
See Also
--------
ot.lp.emd : Unregularized OT
ot.bregman.sinkhorn : Entropic regularized OT
ot.optim.cg : General regularized OT
"""
a, labels_a, b, M = utils.to_gpu(a, labels_a, b, M)
p = 0.5
epsilon = 1e-3
indices_labels = []
labels_a2 = cp.asnumpy(labels_a)
classes = npp.unique(labels_a2)
for c in classes:
idxc, = utils.to_gpu(npp.where(labels_a2 == c))
indices_labels.append(idxc)
W = np.zeros(M.shape)
for cpt in range(numItermax):
Mreg = M + eta * W
transp = sinkhorn(a, b, Mreg, reg, numItermax=numInnerItermax,
stopThr=stopInnerThr, to_numpy=False)
# the transport has been computed. Check if classes are really
# separated
W = np.ones(M.shape)
for (i, c) in enumerate(classes):
majs = np.sum(transp[indices_labels[i]], axis=0)
majs = p * ((majs + epsilon)**(p - 1))
W[indices_labels[i]] = majs
if to_numpy:
return utils.to_np(transp)
else:
return transp
| [
"cupy.asnumpy",
"numpy.unique",
"numpy.where",
"cupy.ones",
"cupy.sum",
"cupy.zeros"
] | [((3591, 3611), 'cupy.asnumpy', 'cp.asnumpy', (['labels_a'], {}), '(labels_a)\n', (3601, 3611), True, 'import cupy as cp\n'), ((3626, 3647), 'numpy.unique', 'npp.unique', (['labels_a2'], {}), '(labels_a2)\n', (3636, 3647), True, 'import numpy as npp\n'), ((3771, 3788), 'cupy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (3779, 3788), True, 'import cupy as np\n'), ((4089, 4105), 'cupy.ones', 'np.ones', (['M.shape'], {}), '(M.shape)\n', (4096, 4105), True, 'import cupy as np\n'), ((3699, 3724), 'numpy.where', 'npp.where', (['(labels_a2 == c)'], {}), '(labels_a2 == c)\n', (3708, 3724), True, 'import numpy as npp\n'), ((4168, 4209), 'cupy.sum', 'np.sum', (['transp[indices_labels[i]]'], {'axis': '(0)'}), '(transp[indices_labels[i]], axis=0)\n', (4174, 4209), True, 'import cupy as np\n')] |
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F, init
from .linear import Linear
class LULinear(Linear):
"""A linear transform where we parameterize the LU decomposition of the weights."""
def __init__(self, features, using_cache=False, identity_init=True, eps=1e-3):
super().__init__(features, using_cache)
self.eps = eps
self.lower_indices = np.tril_indices(features, k=-1)
self.upper_indices = np.triu_indices(features, k=1)
self.diag_indices = np.diag_indices(features)
n_triangular_entries = ((features - 1) * features) // 2
self.lower_entries = nn.Parameter(torch.zeros(n_triangular_entries))
self.upper_entries = nn.Parameter(torch.zeros(n_triangular_entries))
self.unconstrained_upper_diag = nn.Parameter(torch.zeros(features))
self._initialize(identity_init)
def _initialize(self, identity_init):
init.zeros_(self.bias)
if identity_init:
init.zeros_(self.lower_entries)
init.zeros_(self.upper_entries)
constant = np.log(np.exp(1 - self.eps) - 1)
init.constant_(self.unconstrained_upper_diag, constant)
else:
stdv = 1.0 / np.sqrt(self.features)
init.uniform_(self.lower_entries, -stdv, stdv)
init.uniform_(self.upper_entries, -stdv, stdv)
init.uniform_(self.unconstrained_upper_diag, -stdv, stdv)
def _create_lower_upper(self):
lower = self.lower_entries.new_zeros(self.features, self.features)
lower[self.lower_indices[0], self.lower_indices[1]] = self.lower_entries
# The diagonal of L is taken to be all-ones without loss of generality.
lower[self.diag_indices[0], self.diag_indices[1]] = 1.
upper = self.upper_entries.new_zeros(self.features, self.features)
upper[self.upper_indices[0], self.upper_indices[1]] = self.upper_entries
upper[self.diag_indices[0], self.diag_indices[1]] = self.upper_diag
return lower, upper
def forward_no_cache(self, inputs):
"""Cost:
output = O(D^2N)
logabsdet = O(D)
where:
D = num of features
N = num of inputs
"""
lower, upper = self._create_lower_upper()
outputs = F.linear(inputs, upper)
outputs = F.linear(outputs, lower, self.bias)
logabsdet = self.logabsdet() * inputs.new_ones(outputs.shape[0])
return outputs, logabsdet
def inverse_no_cache(self, inputs):
"""Cost:
output = O(D^2N)
logabsdet = O(D)
where:
D = num of features
N = num of inputs
"""
lower, upper = self._create_lower_upper()
outputs = inputs - self.bias
outputs, _ = torch.triangular_solve(outputs.t(), lower, upper=False, unitriangular=True)
outputs, _ = torch.triangular_solve(outputs, upper, upper=True, unitriangular=False)
outputs = outputs.t()
logabsdet = -self.logabsdet()
logabsdet = logabsdet * inputs.new_ones(outputs.shape[0])
return outputs, logabsdet
def weight(self):
"""Cost:
weight = O(D^3)
where:
D = num of features
"""
lower, upper = self._create_lower_upper()
return lower @ upper
def weight_inverse(self):
"""Cost:
inverse = O(D^3)
where:
D = num of features
"""
lower, upper = self._create_lower_upper()
identity = torch.eye(self.features, self.features)
lower_inverse, _ = torch.trtrs(identity, lower, upper=False, unitriangular=True)
weight_inverse, _ = torch.trtrs(lower_inverse, upper, upper=True, unitriangular=False)
return weight_inverse
@property
def upper_diag(self):
return F.softplus(self.unconstrained_upper_diag) + self.eps
def logabsdet(self):
"""Cost:
logabsdet = O(D)
where:
D = num of features
"""
return torch.sum(torch.log(self.upper_diag)) | [
"torch.nn.functional.linear",
"torch.triangular_solve",
"torch.log",
"numpy.triu_indices",
"torch.nn.init.constant_",
"numpy.sqrt",
"torch.eye",
"numpy.diag_indices",
"torch.nn.init.zeros_",
"numpy.exp",
"torch.nn.functional.softplus",
"torch.nn.init.uniform_",
"torch.trtrs",
"numpy.tril_i... | [((425, 456), 'numpy.tril_indices', 'np.tril_indices', (['features'], {'k': '(-1)'}), '(features, k=-1)\n', (440, 456), True, 'import numpy as np\n'), ((486, 516), 'numpy.triu_indices', 'np.triu_indices', (['features'], {'k': '(1)'}), '(features, k=1)\n', (501, 516), True, 'import numpy as np\n'), ((545, 570), 'numpy.diag_indices', 'np.diag_indices', (['features'], {}), '(features)\n', (560, 570), True, 'import numpy as np\n'), ((959, 981), 'torch.nn.init.zeros_', 'init.zeros_', (['self.bias'], {}), '(self.bias)\n', (970, 981), False, 'from torch.nn import functional as F, init\n'), ((2341, 2364), 'torch.nn.functional.linear', 'F.linear', (['inputs', 'upper'], {}), '(inputs, upper)\n', (2349, 2364), True, 'from torch.nn import functional as F, init\n'), ((2383, 2418), 'torch.nn.functional.linear', 'F.linear', (['outputs', 'lower', 'self.bias'], {}), '(outputs, lower, self.bias)\n', (2391, 2418), True, 'from torch.nn import functional as F, init\n'), ((2936, 3007), 'torch.triangular_solve', 'torch.triangular_solve', (['outputs', 'upper'], {'upper': '(True)', 'unitriangular': '(False)'}), '(outputs, upper, upper=True, unitriangular=False)\n', (2958, 3007), False, 'import torch\n'), ((3589, 3628), 'torch.eye', 'torch.eye', (['self.features', 'self.features'], {}), '(self.features, self.features)\n', (3598, 3628), False, 'import torch\n'), ((3656, 3717), 'torch.trtrs', 'torch.trtrs', (['identity', 'lower'], {'upper': '(False)', 'unitriangular': '(True)'}), '(identity, lower, upper=False, unitriangular=True)\n', (3667, 3717), False, 'import torch\n'), ((3746, 3812), 'torch.trtrs', 'torch.trtrs', (['lower_inverse', 'upper'], {'upper': '(True)', 'unitriangular': '(False)'}), '(lower_inverse, upper, upper=True, unitriangular=False)\n', (3757, 3812), False, 'import torch\n'), ((679, 712), 'torch.zeros', 'torch.zeros', (['n_triangular_entries'], {}), '(n_triangular_entries)\n', (690, 712), False, 'import torch\n'), ((756, 789), 'torch.zeros', 'torch.zeros', (['n_triangular_entries'], {}), '(n_triangular_entries)\n', (767, 789), False, 'import torch\n'), ((844, 865), 'torch.zeros', 'torch.zeros', (['features'], {}), '(features)\n', (855, 865), False, 'import torch\n'), ((1021, 1052), 'torch.nn.init.zeros_', 'init.zeros_', (['self.lower_entries'], {}), '(self.lower_entries)\n', (1032, 1052), False, 'from torch.nn import functional as F, init\n'), ((1065, 1096), 'torch.nn.init.zeros_', 'init.zeros_', (['self.upper_entries'], {}), '(self.upper_entries)\n', (1076, 1096), False, 'from torch.nn import functional as F, init\n'), ((1165, 1220), 'torch.nn.init.constant_', 'init.constant_', (['self.unconstrained_upper_diag', 'constant'], {}), '(self.unconstrained_upper_diag, constant)\n', (1179, 1220), False, 'from torch.nn import functional as F, init\n'), ((1295, 1341), 'torch.nn.init.uniform_', 'init.uniform_', (['self.lower_entries', '(-stdv)', 'stdv'], {}), '(self.lower_entries, -stdv, stdv)\n', (1308, 1341), False, 'from torch.nn import functional as F, init\n'), ((1354, 1400), 'torch.nn.init.uniform_', 'init.uniform_', (['self.upper_entries', '(-stdv)', 'stdv'], {}), '(self.upper_entries, -stdv, stdv)\n', (1367, 1400), False, 'from torch.nn import functional as F, init\n'), ((1413, 1470), 'torch.nn.init.uniform_', 'init.uniform_', (['self.unconstrained_upper_diag', '(-stdv)', 'stdv'], {}), '(self.unconstrained_upper_diag, -stdv, stdv)\n', (1426, 1470), False, 'from torch.nn import functional as F, init\n'), ((3899, 3940), 'torch.nn.functional.softplus', 'F.softplus', (['self.unconstrained_upper_diag'], {}), '(self.unconstrained_upper_diag)\n', (3909, 3940), True, 'from torch.nn import functional as F, init\n'), ((4108, 4134), 'torch.log', 'torch.log', (['self.upper_diag'], {}), '(self.upper_diag)\n', (4117, 4134), False, 'import torch\n'), ((1260, 1282), 'numpy.sqrt', 'np.sqrt', (['self.features'], {}), '(self.features)\n', (1267, 1282), True, 'import numpy as np\n'), ((1127, 1147), 'numpy.exp', 'np.exp', (['(1 - self.eps)'], {}), '(1 - self.eps)\n', (1133, 1147), True, 'import numpy as np\n')] |
import numpy as np
import scipy.stats
import datetime
from spodernet.interfaces import IAtIterEndObservable, IAtEpochEndObservable, IAtEpochStartObservable
from spodernet.utils.util import Timer
from spodernet.utils.global_config import Config, Backends
from spodernet.utils.logger import Logger
log = Logger('hooks.py.txt')
class AbstractHook(IAtIterEndObservable, IAtEpochEndObservable):
def __init__(self, name, metric_name, print_every_x_batches):
self.epoch_errors = []
self.current_scores = []
self.name = name
self.iter_count = 0
self.print_every = print_every_x_batches
self.metric_name = metric_name
self.epoch = 1
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
self.n = 0
self.epoch_n = 0
self.mean = 0
self.M2 = 0
self.load_backend_specific_functions()
def load_backend_specific_functions(self):
if Config.backend == Backends.TORCH:
from torch.autograd import Variable
def convert_state(state):
if isinstance(state.targets, Variable):
state.targets = state.targets.data
if isinstance(state.argmax, Variable):
state.argmax = state.argmax.data
if isinstance(state.pred, Variable):
state.pred = state.pred.data
if isinstance(state.loss, Variable):
state.loss = state.loss.data
if isinstance(state.multi_labels, Variable):
state.multi_labels = state.multi_labels.data
return state
self.convert_state = convert_state
else:
self.convert_state = lambda x: x
def calculate_metric(self, state):
raise NotImplementedError('Classes that inherit from abstract hook need to implement the calcualte metric method.')
def at_end_of_iter_event(self, state):
state = self.convert_state(state)
metric = self.calculate_metric(state)
#print(metric)
self.n += 1
delta = metric - self.mean
self.mean += delta/self.n
delta2 = metric - self.mean
self.M2 += delta*delta2
self.current_scores.append(metric)
self.iter_count += 1
if self.iter_count % self.print_every == 0:
lower, upper, m, n = self.print_statistic()
self.n = 0
self.mean = 0
self.M2 = 0
return lower, upper, m, n
return 0, 0, self.mean, self.n
def at_end_of_epoch_event(self, state):
if self.n == 0: return 0, 0, 0, 0
self.epoch_errors.append(self.get_confidence_intervals())
lower, upper, m, n = self.print_statistic(True)
del self.current_scores[:]
self.n = 0
self.mean = 0
self.M2 = 0
self.epoch += 1
self.iter_count = 0
return lower, upper, m, n
def get_confidence_intervals(self, percentile=0.99, limit=1000):
z = scipy.stats.norm.ppf(percentile)
var = self.M2/ (self.n)
SE = np.sqrt(var/self.n)
lower = self.mean-(z*SE)
upper = self.mean+(z*SE)
return [self.n, lower, self.mean, upper]
def print_statistic(self, at_epoch_end=False):
n, lower, m, upper = self.get_confidence_intervals()
str_message = '{3} {4}: {2:.5}\t99% CI: ({0:.5}, {1:.5}), n={5}'.format(lower, upper, m, self.name, self.metric_name, self.n)
if at_epoch_end: log.info('\n')
if at_epoch_end: log.info('#'*40)
if at_epoch_end: log.info(' '*10 + 'COMPLETED EPOCH: {0}'.format(self.epoch) + ' '*30)
log.info(str_message)
if at_epoch_end: log.info('#'*40)
if at_epoch_end: log.info('\n')
return lower, upper, m, n
class AccuracyHook(AbstractHook):
def __init__(self, name='', print_every_x_batches=1000):
super(AccuracyHook, self).__init__(name, 'Accuracy', print_every_x_batches)
self.func = None
self.topk = 1
if Config.backend == Backends.TORCH:
import torch
self.func = lambda x: torch.sum(x)
def calculate_metric(self, state):
if Config.backend == Backends.TORCH:
correct = 0.0
if len(state.argmax.size()) == 1:
correct += self.func(state.targets==state.argmax)
else:
topk = state.argmax.size(1)
for i in range(topk):
correct += self.func(state.targets==state.argmax[:, i])
n = state.argmax.size()[0]
return correct.item()/np.float32(n)
elif Config.backend == Backends.TENSORFLOW:
n = state.argmax.shape[0]
return np.sum(state.targets==state.argmax)/np.float32(n)
elif Config.backend == Backends.TEST:
n = state.argmax.shape[0]
return np.sum(state.targets==state.argmax)/np.float32(n)
else:
raise Exception('Backend has unsupported value {0}'.format(Config.backend))
class TopKRankingLoss(AbstractHook):
def __init__(self, k, filtered=False, name='', print_every_x_batches=1000):
super(TopKRankingLoss, self).__init__(name, '{1}Hits@{0} loss'.format(k, ('' if not filtered else 'Filtered ')), print_every_x_batches)
self.func = None
self.argsort = None
self.sum_func = None
self.k = k
self.filtered = filtered
if Config.backend == Backends.TORCH:
import torch
self.argsort = lambda x, k: torch.topk(x, k)
self.sum_func = lambda x: torch.sum(x)
def calculate_metric(self, state):
if Config.backend == Backends.TORCH:
if self.filtered:
import torch
saved = torch.index_select(state.pred,1,state.targets)
state.pred[state.multi_labels.byte()] = -100000.0
state.pred.index_copy_(1, state.targets, saved)
max_values, argmax = self.argsort(state.pred, self.k)
in_topk = 0
for i in range(self.k):
in_topk += self.sum_func(argmax[:,i] == state.targets)
n = state.pred.size()[0]
return in_topk/np.float32(n)
else:
raise Exception('Backend has unsupported value {0}'.format(Config.backend))
class LossHook(AbstractHook):
def __init__(self, name='', print_every_x_batches=1000):
super(LossHook, self).__init__(name, 'Loss', print_every_x_batches)
def calculate_metric(self, state):
if Config.backend == Backends.TORCH:
state = self.convert_state(state)
return state.loss.item()
else:
return state.loss
class IntersectionHook(AbstractHook):
def __init__(self, name='', print_every_x_batches=1000):
super(IntersectionHook, self).__init__(name, 'Intersection', print_every_x_batches)
def calculate_metric(self, state):
state = self.convert_state(state)
preds = state.pred
targets = state.targets
if Config.cuda:
preds = preds.cpu()
targets = targets.cpu()
preds = preds.numpy()
targets = targets.numpy()
n = targets.size
k = 0
for row in range(Config.batch_size):
k += np.intersect1d(preds[row], targets[row]).size
return k/float(n)
class ETAHook(AbstractHook, IAtEpochStartObservable):
def __init__(self, name='', print_every_x_batches=1000):
super(ETAHook, self).__init__(name, 'ETA', print_every_x_batches)
self.t = Timer(silent=True)
self.cumulative_t = 0.0
self.skipped_first = False
def get_time_string(self, seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h < 0: h = 0
if m < 0: m = 0
if s < 0: s = 0
return "%d:%02d:%02d" % (h, m, s)
def calculate_metric(self, state):
n = state.num_batches
i = state.current_idx
cumulative_t = self.t.tick('ETA')
total_time_estimate = (cumulative_t/i)*n
self.t.tick('ETA')
self.cumulative_t = cumulative_t
return total_time_estimate
def print_statistic(self):
if not self.skipped_first:
# the first estimation is very unreliable for time measures
self.skipped_first = True
return 0, 0, 0, 0
n, lower, m, upper = self.get_confidence_intervals()
lower -= self.cumulative_t
m -= self.cumulative_t
upper -= self.cumulative_t
lower, m, upper = self.get_time_string(lower), self.get_time_string(m), self.get_time_string(upper)
log.info('{3} {4}: {2}\t99% CI: ({0}, {1}), n={5}'.format(lower, upper, m, self.name, self.metric_name, n))
return lower, upper, m, n
def at_start_of_epoch_event(self, batcher_state):
self.t.tick('ETA')
t = self.t.tick('Epoch')
def at_end_of_epoch_event(self, state):
self.t.tock('ETA')
epoch_time = self.t.tock('Epoch')
self.epoch_errors.append([epoch_time])
log.info('Total epoch time: {0}'.format(self.get_time_string(epoch_time)))
del self.current_scores[:]
self.n = 0
self.mean = 0
self.M2 = 0
self.skipped_first = False
self.epoch += 1
return epoch_time
| [
"torch.index_select",
"numpy.intersect1d",
"spodernet.utils.logger.Logger",
"numpy.sqrt",
"torch.topk",
"numpy.sum",
"torch.sum",
"spodernet.utils.util.Timer",
"numpy.float32"
] | [((304, 326), 'spodernet.utils.logger.Logger', 'Logger', (['"""hooks.py.txt"""'], {}), "('hooks.py.txt')\n", (310, 326), False, 'from spodernet.utils.logger import Logger\n'), ((3121, 3142), 'numpy.sqrt', 'np.sqrt', (['(var / self.n)'], {}), '(var / self.n)\n', (3128, 3142), True, 'import numpy as np\n'), ((7626, 7644), 'spodernet.utils.util.Timer', 'Timer', ([], {'silent': '(True)'}), '(silent=True)\n', (7631, 7644), False, 'from spodernet.utils.util import Timer\n'), ((4158, 4170), 'torch.sum', 'torch.sum', (['x'], {}), '(x)\n', (4167, 4170), False, 'import torch\n'), ((4643, 4656), 'numpy.float32', 'np.float32', (['n'], {}), '(n)\n', (4653, 4656), True, 'import numpy as np\n'), ((5578, 5594), 'torch.topk', 'torch.topk', (['x', 'k'], {}), '(x, k)\n', (5588, 5594), False, 'import torch\n'), ((5633, 5645), 'torch.sum', 'torch.sum', (['x'], {}), '(x)\n', (5642, 5645), False, 'import torch\n'), ((5815, 5863), 'torch.index_select', 'torch.index_select', (['state.pred', '(1)', 'state.targets'], {}), '(state.pred, 1, state.targets)\n', (5833, 5863), False, 'import torch\n'), ((6254, 6267), 'numpy.float32', 'np.float32', (['n'], {}), '(n)\n', (6264, 6267), True, 'import numpy as np\n'), ((7344, 7384), 'numpy.intersect1d', 'np.intersect1d', (['preds[row]', 'targets[row]'], {}), '(preds[row], targets[row])\n', (7358, 7384), True, 'import numpy as np\n'), ((4766, 4803), 'numpy.sum', 'np.sum', (['(state.targets == state.argmax)'], {}), '(state.targets == state.argmax)\n', (4772, 4803), True, 'import numpy as np\n'), ((4802, 4815), 'numpy.float32', 'np.float32', (['n'], {}), '(n)\n', (4812, 4815), True, 'import numpy as np\n'), ((4919, 4956), 'numpy.sum', 'np.sum', (['(state.targets == state.argmax)'], {}), '(state.targets == state.argmax)\n', (4925, 4956), True, 'import numpy as np\n'), ((4955, 4968), 'numpy.float32', 'np.float32', (['n'], {}), '(n)\n', (4965, 4968), True, 'import numpy as np\n')] |
### A SET OF PLOTTING FUNCTIONS INSPIRED BY EXPLORING VIZGEN MERFISH
### --- START OF VIZGEN MERFISH SECTION
import numpy as np
import datashader as ds
import colorcet
import json
from __init__plots import *
class PlotScale:
"""
arguments: rangex, ragey, [npxlx, npxly, pxl_scale]
one of the three in [] will be required
"""
def __init__(self, rangex, rangey, npxlx=0, npxly=0, pxl_scale=0):
"""
rangex(y) - range of the x(y) axis (in micron)
pxl_scale - number of microns per pixel
npxlx(y) - number of pixels on the x(y)axis
"""
# 1 of the three optional args need to be set
assert (np.array([npxlx, npxly, pxl_scale])==0).sum() == 2
self.rangex = rangex
self.rangey = rangey
if pxl_scale:
pxl_scale = pxl_scale
npxlx = int(rangex/pxl_scale)
npxly = int(rangey/pxl_scale)
if npxlx:
npxlx = int(npxlx)
pxl_scale = rangex/npxlx
npxly = int(rangey/pxl_scale)
if npxly:
npxly = int(npxly)
pxl_scale = rangey/npxly
npxlx = int(rangex/pxl_scale)
self.pxl_scale = pxl_scale
self.npxlx = npxlx
self.npxly = npxly
self.num_pxl = self.npxlx*self.npxly
self.check_dim()
def check_dim(self):
"""
"""
num_pixel_limit = 1e6
assert self.npxlx > 0
assert self.npxly > 0
assert self.num_pxl < num_pixel_limit
return
def len2pixel(self, length):
"""
"""
return int(length/self.pxl_scale)
def pixel2len(self, npixel):
"""
"""
return npixel*self.pxl_scale
class CategoricalColors:
"""
Arguments: labels, [colors]
"""
def __init__(self, labels, colors=[], basis_cmap=colorcet.cm.rainbow):
"""
"""
self.labels = labels
self.indices = np.arange(len(labels))
if not colors:
self.colors = basis_cmap(np.linspace(0, 1, len(self.indices)))
# colors = colorcet.cm.glasbey(np.arange(len(indices)))
# colors = sns.color_palette('husl', len(indices))
else:
self.colors = colors
assert len(self.labels) == len(self.colors)
self.gen_cmap()
def gen_cmap(self):
"""Use a list of colors to generate a categorical cmap
which maps
[0, 1) -> self.colors[0]
[1, 2) -> self.colors[1]
[2, 3) -> self.colors[2]
[3, 4) -> self.colors[3]
...
"""
self.cmap = mpl.colors.ListedColormap(self.colors)
self.bounds = np.arange(len(self.colors)+1)
self.norm = mpl.colors.BoundaryNorm(self.bounds, self.cmap.N)
def add_colorbar(
self,
fig,
cax_dim=[0.95, 0.1, 0.05, 0.8],
shift=0.5,
fontsize=10,
**kwargs,
):
"""
"""
cax = fig.add_axes(cax_dim)
cbar = fig.colorbar(
cm.ScalarMappable(cmap=self.cmap, norm=self.norm),
cax=cax,
boundaries=self.bounds,
ticks=self.bounds[:-1]+shift,
drawedges=True,
**kwargs,
)
cbar.ax.set_yticklabels(self.labels, fontsize=fontsize)
cbar.ax.tick_params(axis=u'both', which=u'both', length=0)
return
def to_dict(self, to_hex=True, output=""):
"""
"""
if to_hex:
self.palette = {label: mpl.colors.to_hex(color)
for label, color
in zip(self.labels, self.colors)
}
else:
self.palette = {label: color
for label, color
in zip(self.labels, self.colors)
}
if output:
with open(output, 'w') as fh:
json.dump(self.palette, fh)
print("saved to file: {}".format(output))
return self.palette
def agg_data(
data,
x, y,
npxlx, npxly,
agg,
):
"""
"""
aggdata = ds.Canvas(plot_width=npxlx, plot_height=npxly).points(data, x, y, agg=agg)
return aggdata
def agg_data_count(
data,
x, y,
npxlx, npxly,
):
agg = ds.count()
aggdata = agg_data(data, x, y, npxlx, npxly, agg)
agg = ds.any()
aggdata_any = agg_data(data, x, y, npxlx, npxly, agg)
aggdata = aggdata/aggdata_any
return aggdata
def agg_data_ps(data, x, y, agg, scale_paras):
"""
"""
# main
rangex = data[x].max() - data[x].min()
rangey = data[y].max() - data[y].min()
ps = PlotScale(rangex, rangey, **scale_paras)
aggdata = agg_data(data, x, y, ps.npxlx, ps.npxly, agg,)
return aggdata, ps
def agg_count_cat(
data, x, y, z, scale_paras,
clip_max=0,
reduce=False,
sharp_boundary=True,
):
"""count categorical data
"""
# collect aggdata and ps
agg = ds.count_cat(z)
aggdata, ps = agg_data_ps(data, x, y, agg, scale_paras)
zlabels = aggdata[z].values
if clip_max:
aggdata = aggdata.clip(max=clip_max)
if reduce:
aggdata = aggdata.argmax(z)
if sharp_boundary:
# normalize by any (set no cells to nan)
agg = ds.any()
aggdata_any = agg_data(data, x, y, ps.npxlx, ps.npxly, agg)
aggdata_any = aggdata_any.astype(int)
aggdata = aggdata/aggdata_any
return aggdata, ps, zlabels
def set_vmin_vmax(
numbers, vmaxp=99
):
"""
"""
vmin, vmax = 0, np.nanpercentile(numbers, vmaxp)
return vmin, vmax
def add_colorbar_unified_colorbar(
fig, cax,
vmin=0, vmax=0,
cmap=sns.cubehelix_palette(as_cmap=True),
**kwargs,
):
"""User specified vmin and vmax
"""
# colorbar
norm = plt.Normalize(vmin, vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm,)
fig.colorbar(sm, cax=cax,
ticks=[vmin, vmax],
label='Normalized expression',
**kwargs,
)
return
def add_colorbar(
fig, cax,
vmaxp=99,
cmap=sns.cubehelix_palette(as_cmap=True),
**kwargs,
):
"""[log10(normalized_counts+1)] further normed by the 99% highest expression)
"""
# colorbar
norm = plt.Normalize(0, vmaxp)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm,)
fig.colorbar(sm, cax=cax,
ticks=[0, vmaxp],
label='Normalized expression\n(normed by 99% highest expression)',
**kwargs,
)
return
def imshow_routine(
ax,
aggdata,
cmap=sns.cubehelix_palette(as_cmap=True),
vmin=None, vmax=None,
origin='lower',
aspect='equal',
**kwargs
):
"""
"""
ax.imshow(aggdata, aspect=aspect, cmap=cmap, origin=origin, vmin=vmin, vmax=vmax, **kwargs)
ax.axis('off')
return ax
def massive_scatterplot(
ax,
data,
x, y,
npxlx, npxly,
agg=ds.count(),
cmap=sns.cubehelix_palette(as_cmap=True),
vmin=0,
vmax=0,
vmaxp=99,
):
"""
"""
aggdata = agg_data(data, x, y, npxlx, npxly, agg)
if vmin==0 and vmax == 0:
vmin, vmax = set_vmin_vmax(aggdata.values, vmaxp)
imshow_routine(ax, aggdata, cmap=cmap,
vmin=vmin, vmax=vmax,
)
else:
imshow_routine(ax, aggdata, cmap=cmap,
vmin=vmin, vmax=vmax,
)
return ax
def massive_scatterplot_withticks(
ax, data, x, y, npxlx, npxly,
aspect='auto',
color_logscale=False,
):
xmin, xmax = data[x].min(), data[x].max()
ymin, ymax = data[y].min(), data[y].max()
aggdata = agg_data_count(data, x, y, npxlx, npxly)
if color_logscale:
aggdata = np.log10(aggdata)
ax.imshow(
aggdata, origin='lower', aspect=aspect,
extent=[xmin, xmax, ymin, ymax])
ax.set_xlabel(x)
ax.set_ylabel(y)
return
def add_arrows(
ax, label,
fontsize=15,
px=-0.01,
py=-0.01,
):
"""
"""
# arrows
ax.arrow(px, py, 0, 0.1,
transform=ax.transAxes,
head_width=0.01, head_length=0.01,
fc='k', ec='k', clip_on=False,)
ax.arrow(px, py, 0.1, 0,
transform=ax.transAxes,
head_width=0.01, head_length=0.01,
fc='k', ec='k', clip_on=False,)
ax.text(px, py-0.01, label,
transform=ax.transAxes,
va='top', ha='left',
fontsize=fontsize)
# end arrows
return ax
def add_scalebar(
ax, left, right, label, fontsize=15,
ax_y=-0.01,
):
"""
"""
ax.hlines(ax_y, left, right, color='k', linewidth=3,
transform=ax.get_xaxis_transform(),
clip_on=False,
)
ax.text(right, ax_y-0.01, label,
va='top', ha='right',
transform=ax.get_xaxis_transform(),
fontsize=fontsize)
# end scale bar
return ax
def plot_gene_insitu_routine(
ax, data, x, y, hue, scale_paras, cmap, title,
arrows=True, scalebar=True,
vmaxp=99,
vmin=0, vmax=0,
):
"""
"""
# main
agg = ds.mean(hue)
rangex = data[x].max() - data[x].min()
rangey = data[y].max() - data[y].min()
ps = PlotScale(rangex, rangey, **scale_paras)
massive_scatterplot(
ax, data, x, y, ps.npxlx, ps.npxly,
agg=agg,
cmap=cmap,
vmaxp=vmaxp,
vmin=vmin,
vmax=vmax,
)
ax.set_title(title)
# arrows
if arrows:
add_arrows(ax, 'in situ')
# scale bar
if scalebar:
bar_length = 1000 # (micron)
add_scalebar(ax, ps.npxlx-ps.len2pixel(bar_length), ps.npxlx, '1 mm')
return ax
def plot_gene_umap_routine(
ax, data, x, y, hue, scale_paras, cmap, title,
arrows=True,
vmaxp=99,
):
"""
"""
# main
agg = ds.mean(hue)
rangex = data[x].max() - data[x].min()
rangey = data[y].max() - data[y].min()
ps = PlotScale(rangex, rangey, **scale_paras)
massive_scatterplot(ax, data, x, y, ps.npxlx, ps.npxly,
agg=agg,
cmap=cmap,
vmaxp=vmaxp,
)
ax.set_title(title)
# arrows
if arrows:
add_arrows(ax, 'UMAP', px=-0.03, py=-0.03)
return ax
def plot_cluster_insitu_routine(
ax,
ps,
aggdata,
hue,
zlabel,
title,
cmap,
arrows=True, scalebar=True,
):
"""
ps - an instance of PlotScale
"""
zlabels = aggdata.coords[hue].values
i = np.where(zlabels==zlabel)[0][0]
imshow_routine(
ax,
aggdata[:,:,i],
cmap=cmap,
)
ax.set_title(title)
# arrows
if arrows:
add_arrows(ax, 'in situ')
# scale bar
if scalebar:
bar_length = 1000 # (micron)
add_scalebar(ax, ps.npxlx-ps.len2pixel(bar_length), ps.npxlx, '1 mm')
return ax
def plot_cluster_umap_routine(
ax,
ps,
aggdata,
hue,
zlabel,
title,
cmap,
arrows=True, scalebar=True,
):
"""
ps - an instance of PlotScale
"""
zlabels = aggdata.coords[hue].values
i = np.where(zlabels==zlabel)[0][0]
imshow_routine(
ax,
aggdata[:,:,i],
cmap=cmap,
)
ax.set_title(title)
# arrows
if arrows:
add_arrows(ax, 'UMAP')
return ax
### END OF VIZGEN MERFISH SECTION
def gen_cdf(array, ax, x_range=[], n_points=1000, show=True, flip=False, **kwargs):
""" returns x and y values
"""
x = np.sort(array)
y = np.arange(len(array))/len(array)
if flip:
# x = x[::-1]
y = 1 - y
if not x_range:
if show:
ax.plot(x, y, **kwargs)
return x, y
else:
start, end = x_range
xbins = np.linspace(start, end, n_points)
ybins = np.interp(xbins, x, y)
if show:
ax.plot(xbins, ybins, **kwargs)
return xbins, ybins | [
"datashader.count_cat",
"datashader.count",
"datashader.mean",
"numpy.log10",
"numpy.nanpercentile",
"datashader.Canvas",
"numpy.where",
"numpy.sort",
"datashader.any",
"numpy.array",
"numpy.linspace",
"numpy.interp",
"json.dump"
] | [((4400, 4410), 'datashader.count', 'ds.count', ([], {}), '()\n', (4408, 4410), True, 'import datashader as ds\n'), ((4475, 4483), 'datashader.any', 'ds.any', ([], {}), '()\n', (4481, 4483), True, 'import datashader as ds\n'), ((5090, 5105), 'datashader.count_cat', 'ds.count_cat', (['z'], {}), '(z)\n', (5102, 5105), True, 'import datashader as ds\n'), ((7152, 7162), 'datashader.count', 'ds.count', ([], {}), '()\n', (7160, 7162), True, 'import datashader as ds\n'), ((9385, 9397), 'datashader.mean', 'ds.mean', (['hue'], {}), '(hue)\n', (9392, 9397), True, 'import datashader as ds\n'), ((10113, 10125), 'datashader.mean', 'ds.mean', (['hue'], {}), '(hue)\n', (10120, 10125), True, 'import datashader as ds\n'), ((11811, 11825), 'numpy.sort', 'np.sort', (['array'], {}), '(array)\n', (11818, 11825), True, 'import numpy as np\n'), ((5419, 5427), 'datashader.any', 'ds.any', ([], {}), '()\n', (5425, 5427), True, 'import datashader as ds\n'), ((5702, 5734), 'numpy.nanpercentile', 'np.nanpercentile', (['numbers', 'vmaxp'], {}), '(numbers, vmaxp)\n', (5718, 5734), True, 'import numpy as np\n'), ((7974, 7991), 'numpy.log10', 'np.log10', (['aggdata'], {}), '(aggdata)\n', (7982, 7991), True, 'import numpy as np\n'), ((12070, 12103), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n_points'], {}), '(start, end, n_points)\n', (12081, 12103), True, 'import numpy as np\n'), ((12120, 12142), 'numpy.interp', 'np.interp', (['xbins', 'x', 'y'], {}), '(xbins, x, y)\n', (12129, 12142), True, 'import numpy as np\n'), ((4228, 4274), 'datashader.Canvas', 'ds.Canvas', ([], {'plot_width': 'npxlx', 'plot_height': 'npxly'}), '(plot_width=npxlx, plot_height=npxly)\n', (4237, 4274), True, 'import datashader as ds\n'), ((10825, 10852), 'numpy.where', 'np.where', (['(zlabels == zlabel)'], {}), '(zlabels == zlabel)\n', (10833, 10852), True, 'import numpy as np\n'), ((11433, 11460), 'numpy.where', 'np.where', (['(zlabels == zlabel)'], {}), '(zlabels == zlabel)\n', (11441, 11460), True, 'import numpy as np\n'), ((3997, 4024), 'json.dump', 'json.dump', (['self.palette', 'fh'], {}), '(self.palette, fh)\n', (4006, 4024), False, 'import json\n'), ((669, 704), 'numpy.array', 'np.array', (['[npxlx, npxly, pxl_scale]'], {}), '([npxlx, npxly, pxl_scale])\n', (677, 704), True, 'import numpy as np\n')] |
import os
import sys
import time
import numpy as np
import Pyro4
import select
from slam_pkg.utils.map_builder import MapBuilder as mb
from slam_pkg.utils import depth_util as du
from skimage.morphology import disk, binary_dilation
from rich import print
Pyro4.config.SERIALIZER = "pickle"
Pyro4.config.SERIALIZERS_ACCEPTED.add("pickle")
Pyro4.config.PICKLE_PROTOCOL_VERSION = 4
@Pyro4.expose
class SLAM(object):
def __init__(
self,
robot,
map_size=4000,
resolution=5,
robot_rad=30,
agent_min_z=5,
agent_max_z=70,
obstacle_threshold=1,
):
self.robot = robot
self.robot_rad = robot_rad
self.map_resolution = resolution
self.map_builder = mb(
map_size_cm=map_size,
resolution=resolution,
agent_min_z=agent_min_z,
agent_max_z=agent_max_z,
obs_thr=obstacle_threshold,
)
self.map_size = map_size
# if the map is a previous map loaded from disk, and
# if the robot looks around and registers itself at a
# non-origin location in the map just as it is coming up,
# then the robot's reported origin (from get_base_state) is
# not the map's origin. In such cases, `self.init_state`
# is useful, as it is used to handle all co-ordinate transforms
# correctly.
# Currently, self.init_state is kinda useless and not utilized
# in any meaningful way
self.init_state = (0.0, 0.0, 0.0)
self.prev_bot_state = (0.0, 0.0, 0.0)
self.update_map()
assert self.traversable is not None
def get_traversable_map(self):
return self.traversable
def real2map(self, real):
return self.map_builder.real2map(real)
def map2real(self, map_loc):
return self.map_builder.map2real(map_loc)
def robot2map(self, robot_loc):
# TODO: re-enable this code when init_state can be non-zero
# robot_location = du.get_relative_state(
# robot_loc,
# self.init_state)
return self.real2map(robot_loc)
def map2robot(self, map_loc):
return self.map2real(map_loc)
# TODO: re-enable and test this code when init_state can be non-zero
# real_loc = self.map2real(map_loc)
# loc = du.get_relative_state(real_loc, (0.0, 0.0, -self.init_state[2]))
# # 2) add the offset
# loc = list(loc)
# loc[0] += self.init_state[0]
# loc[1] += self.init_state[1]
# return tuple(loc)
def add_obstacle(self, location, in_map=False):
"""
add an obstacle at the given location.
if in_map=False, then location is given in real co-ordinates
if in_map=True, then location is given in map co-ordinates
"""
if not in_map:
location = self.real2map(location)
self.map_builder.add_obstacle(location)
def update_map(self):
pcd = self.robot.get_current_pcd()[0]
self.map_builder.update_map(pcd)
# explore the map by robot shape
obstacle = self.map_builder.map[:, :, 1] >= 1.0
selem = disk(self.robot_rad / self.map_builder.resolution)
traversable = binary_dilation(obstacle, selem) != True
self.traversable = traversable
def get_map_resolution(self):
return self.map_resolution
def get_map(self):
"""returns the location of obstacles created by slam only for the obstacles,"""
# get the index correspnding to obstacles
indices = np.where(self.map_builder.map[:, :, 1] >= 1.0)
# convert them into robot frame
real_world_locations = [
self.map2real([indice[0], indice[1]]).tolist()
for indice in zip(indices[0], indices[1])
]
return real_world_locations
def reset_map(self, z_bins=None, obs_thr=None):
self.map_builder.reset_map(self.map_size, z_bins=z_bins, obs_thr=obs_thr)
robot_ip = os.getenv("LOCOBOT_IP")
ip = os.getenv("LOCAL_IP")
robot_name = "remotelocobot"
if len(sys.argv) > 1:
robot_name = sys.argv[1]
with Pyro4.Daemon(ip) as daemon:
robot = Pyro4.Proxy("PYRONAME:" + robot_name + "@" + robot_ip)
if robot_name == "hello_realsense":
robot_height = 141 # cm
min_z = 20 # because of the huge spatial variance in realsense readings
max_z = robot_height + 5 # cm
obj = SLAM(
robot,
obstacle_threshold=10,
agent_min_z=min_z,
agent_max_z=max_z,
)
else:
obj = SLAM(robot)
obj_uri = daemon.register(obj)
with Pyro4.locateNS(robot_ip) as ns:
ns.register("slam", obj_uri)
print("SLAM Server is started...")
def refresh():
obj.update_map()
# print("In refresh: ", time.asctime())
return True
daemon.requestLoop(refresh)
# visit this later
# try:
# while True:
# print(time.asctime(), "Waiting for requests...")
# sockets = daemon.sockets
# ready_socks = select.select(sockets, [], [], 0)
# events = []
# for s in ready_socks:
# events.append(s)
# daemon.events(events)
# except KeyboardInterrupt:
# pass
| [
"skimage.morphology.binary_dilation",
"os.getenv",
"numpy.where",
"Pyro4.config.SERIALIZERS_ACCEPTED.add",
"Pyro4.locateNS",
"slam_pkg.utils.map_builder.MapBuilder",
"rich.print",
"Pyro4.Daemon",
"Pyro4.Proxy",
"skimage.morphology.disk"
] | [((291, 338), 'Pyro4.config.SERIALIZERS_ACCEPTED.add', 'Pyro4.config.SERIALIZERS_ACCEPTED.add', (['"""pickle"""'], {}), "('pickle')\n", (328, 338), False, 'import Pyro4\n'), ((4006, 4029), 'os.getenv', 'os.getenv', (['"""LOCOBOT_IP"""'], {}), "('LOCOBOT_IP')\n", (4015, 4029), False, 'import os\n'), ((4035, 4056), 'os.getenv', 'os.getenv', (['"""LOCAL_IP"""'], {}), "('LOCAL_IP')\n", (4044, 4056), False, 'import os\n'), ((4142, 4158), 'Pyro4.Daemon', 'Pyro4.Daemon', (['ip'], {}), '(ip)\n', (4154, 4158), False, 'import Pyro4\n'), ((4182, 4236), 'Pyro4.Proxy', 'Pyro4.Proxy', (["('PYRONAME:' + robot_name + '@' + robot_ip)"], {}), "('PYRONAME:' + robot_name + '@' + robot_ip)\n", (4193, 4236), False, 'import Pyro4\n'), ((4731, 4765), 'rich.print', 'print', (['"""SLAM Server is started..."""'], {}), "('SLAM Server is started...')\n", (4736, 4765), False, 'from rich import print\n'), ((744, 873), 'slam_pkg.utils.map_builder.MapBuilder', 'mb', ([], {'map_size_cm': 'map_size', 'resolution': 'resolution', 'agent_min_z': 'agent_min_z', 'agent_max_z': 'agent_max_z', 'obs_thr': 'obstacle_threshold'}), '(map_size_cm=map_size, resolution=resolution, agent_min_z=agent_min_z,\n agent_max_z=agent_max_z, obs_thr=obstacle_threshold)\n', (746, 873), True, 'from slam_pkg.utils.map_builder import MapBuilder as mb\n'), ((3176, 3226), 'skimage.morphology.disk', 'disk', (['(self.robot_rad / self.map_builder.resolution)'], {}), '(self.robot_rad / self.map_builder.resolution)\n', (3180, 3226), False, 'from skimage.morphology import disk, binary_dilation\n'), ((3579, 3625), 'numpy.where', 'np.where', (['(self.map_builder.map[:, :, 1] >= 1.0)'], {}), '(self.map_builder.map[:, :, 1] >= 1.0)\n', (3587, 3625), True, 'import numpy as np\n'), ((4657, 4681), 'Pyro4.locateNS', 'Pyro4.locateNS', (['robot_ip'], {}), '(robot_ip)\n', (4671, 4681), False, 'import Pyro4\n'), ((3249, 3281), 'skimage.morphology.binary_dilation', 'binary_dilation', (['obstacle', 'selem'], {}), '(obstacle, selem)\n', (3264, 3281), False, 'from skimage.morphology import disk, binary_dilation\n')] |
#copyright: <NAME> 2020
#open source under the MIT License
import matplotlib.pyplot as plt
import numpy as np
duration = 200 #number of days to run the simulation from patient zero
survival = 0.96 #chance of surviving
recovery = 21 #median days to fully recover
terminal = 14 #median days to die if not recovering
transmit = 5 #median days to transmit infection
initial_R0 = 2.1
max_lookahead = 60 #not a model parameter. This is the extra space we need to allocate in the arrays to capture data that happens after the end of the simulation
# utility function for generating normally distributed random numbers with upper and lower bounds
def min_max_normal(mu,sigma,lb,ub) :
return int(round(min(ub,max(lb,np.round(np.random.normal(mu,sigma))))))
def sim(start, effect, minR0) :
np.random.seed(1)
R0 = np.zeros([max_lookahead + 1+ duration])
sick = np.zeros([max_lookahead + 1 + duration])
recovered = np.zeros([max_lookahead + 1 + duration])
dead = np.zeros([max_lookahead + 1 + duration])
new = np.zeros([max_lookahead + 1 + duration])
sick[0] = 1 #initial number of sick people to get things going. Can start with 1
new[0] = 1
R0[0] = initial_R0#this was the initial reported R0, this should decline with aggressive social constraints
days = range(duration)
for d in days :
# determine how many people each new patient will infect
newly_infected = int(0)
for patient in range(int(new[d])) :
newly_infected = newly_infected + min_max_normal(R0[d],initial_R0/3,0,max_lookahead)
# determine future outcomes for each new patient
for n in range(int(newly_infected)) :
#determine date of infection
date_infected = d + min_max_normal(transmit,7,1,max_lookahead)
new[date_infected] += 1
#determine survival
outcome = np.random.uniform()
if (outcome < survival) :
days_til_recovery = min_max_normal(recovery,7,1,max_lookahead)
recovered[date_infected+days_til_recovery] += 1
sick[date_infected+days_til_recovery] += -1 #decrement sick on date of recovery
else :
days_til_death = min_max_normal(terminal,3,1,max_lookahead)
dead[date_infected+days_til_death] += 1
sick[date_infected+days_til_death] += -1 # decrement sick on date of death
# setup accumulators for next day
sick[d+1] = (sick[d] + new[d+1]) + sick[d+1]
dead[d+1] = dead[d+1] + dead[d]
recovered[d+1] = recovered[d+1] + recovered[d]
#adjust R0 based on lockdown date and effectiveness
if (d+1 > start) :
newR0 = R0[d] * (1-effect)
R0[d+1] = max(minR0, newR0)
else :
R0[d+1] = R0[d]
return sick,dead,recovered,R0
s1,d1,r1,a = sim(40,0.2,0.6)
s2,d2,r2,b = sim(47,0.1,0.8)
fig, axs = plt.subplots(4)
plt.ylim(bottom=0)
axs[0].set_title('Sick')
axs[0].plot(s1[0:duration])
axs[0].plot(s2[0:duration])
axs[1].set_title('Died')
axs[1].plot(d1[0:duration])
axs[1].plot(d2[0:duration])
axs[2].set_title('Recovered')
axs[2].plot(r1[0:duration])
axs[2].plot(r2[0:duration])
plt.ylim(top=2.5)
axs[3].set_title('R0')
axs[3].plot(a[0:duration])
axs[3].plot(b[0:duration])
plt.subplots_adjust(top=3)
| [
"numpy.random.normal",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.uniform",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust"
] | [((2988, 3003), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)'], {}), '(4)\n', (3000, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3004, 3022), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (3012, 3022), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3288), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': '(2.5)'}), '(top=2.5)\n', (3279, 3288), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3392), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(3)'}), '(top=3)\n', (3385, 3392), True, 'import matplotlib.pyplot as plt\n'), ((794, 811), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (808, 811), True, 'import numpy as np\n'), ((821, 861), 'numpy.zeros', 'np.zeros', (['[max_lookahead + 1 + duration]'], {}), '([max_lookahead + 1 + duration])\n', (829, 861), True, 'import numpy as np\n'), ((872, 912), 'numpy.zeros', 'np.zeros', (['[max_lookahead + 1 + duration]'], {}), '([max_lookahead + 1 + duration])\n', (880, 912), True, 'import numpy as np\n'), ((929, 969), 'numpy.zeros', 'np.zeros', (['[max_lookahead + 1 + duration]'], {}), '([max_lookahead + 1 + duration])\n', (937, 969), True, 'import numpy as np\n'), ((981, 1021), 'numpy.zeros', 'np.zeros', (['[max_lookahead + 1 + duration]'], {}), '([max_lookahead + 1 + duration])\n', (989, 1021), True, 'import numpy as np\n'), ((1032, 1072), 'numpy.zeros', 'np.zeros', (['[max_lookahead + 1 + duration]'], {}), '([max_lookahead + 1 + duration])\n', (1040, 1072), True, 'import numpy as np\n'), ((1912, 1931), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1929, 1931), True, 'import numpy as np\n'), ((724, 751), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (740, 751), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.