code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# coding: utf-8
import chainer
import chainer.links as L
from chainer import serializers
class A(chainer.Chain):
def __init__(self):
super(A, self).__init__()
with self.init_scope():
# TODO Add more tests
self.l1 = L.Convolution2D(None, 6, (5, 7), stride=(2, 3))
def forward(self, x):
y1 = self.l1(x)
return y1
class SingleParam(chainer.Chain):
def __init__(self):
super(SingleParam, self).__init__()
with self.init_scope():
self.l1 = L.Convolution2D(20, 10, 3, stride=1, pad=1, nobias=True)
def forward(self, x):
y1 = self.l1(x)
return y1
# ======================================
from chainer_compiler.elichika import testtools
import numpy as np
def main():
model = A()
np.random.seed(123)
x = np.random.rand(2, 20, 15, 17).astype(np.float32)
testtools.generate_testcase(model, [x])
testtools.generate_testcase(SingleParam(), [x], subname='single_param')
testtools.generate_testcase(lambda : SingleParam(), [x], subname='single_param_lambda')
if __name__ == '__main__':
main()
| [
"numpy.random.rand",
"numpy.random.seed",
"chainer_compiler.elichika.testtools.generate_testcase",
"chainer.links.Convolution2D"
] | [((808, 827), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (822, 827), True, 'import numpy as np\n'), ((890, 929), 'chainer_compiler.elichika.testtools.generate_testcase', 'testtools.generate_testcase', (['model', '[x]'], {}), '(model, [x])\n', (917, 929), False, 'from chainer_compiler.elichika import testtools\n'), ((262, 309), 'chainer.links.Convolution2D', 'L.Convolution2D', (['None', '(6)', '(5, 7)'], {'stride': '(2, 3)'}), '(None, 6, (5, 7), stride=(2, 3))\n', (277, 309), True, 'import chainer.links as L\n'), ((537, 593), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(20)', '(10)', '(3)'], {'stride': '(1)', 'pad': '(1)', 'nobias': '(True)'}), '(20, 10, 3, stride=1, pad=1, nobias=True)\n', (552, 593), True, 'import chainer.links as L\n'), ((836, 865), 'numpy.random.rand', 'np.random.rand', (['(2)', '(20)', '(15)', '(17)'], {}), '(2, 20, 15, 17)\n', (850, 865), True, 'import numpy as np\n')] |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for base_task."""
import contextlib
from absl.testing import absltest
from dm_control import composer
from dm_control.composer.observation import observable
from dm_control.composer.observation import updater
from dm_control.rl import control
from dm_robotics.moma import base_task
from dm_robotics.moma import entity_initializer
from dm_robotics.moma import robot
from dm_robotics.moma import scene_initializer
from dm_robotics.moma.effectors import arm_effector as arm_effector_module
from dm_robotics.moma.models.arenas import empty
from dm_robotics.moma.models.end_effectors.robot_hands import robotiq_2f85
from dm_robotics.moma.models.robots.robot_arms import sawyer
from dm_robotics.moma.sensors import robot_arm_sensor
from dm_robotics.moma.sensors import robot_tcp_sensor
import numpy as np
class BaseTaskTest(absltest.TestCase):
def _build_sample_base_task(self):
arena = empty.Arena('test_arena')
arm = sawyer.Sawyer(with_pedestal=False)
gripper = robotiq_2f85.Robotiq2F85()
robot.standard_compose(
arm=arm, gripper=gripper, wrist_ft=None, wrist_cameras=[])
robot_sensors = [
robot_arm_sensor.RobotArmSensor(
arm=arm, name='robot0', have_torque_sensors=True),
robot_tcp_sensor.RobotTCPSensor(gripper=gripper, name='robot0'),
]
arm_effector = arm_effector_module.ArmEffector(
arm=arm, action_range_override=None, robot_name='robot0')
rbt = robot.StandardRobot(
arm=arm,
arm_base_site_name='pedestal_attachment',
gripper=gripper,
wrist_ft=None,
wrist_cameras=[],
robot_sensors=robot_sensors,
arm_effector=arm_effector,
gripper_effector=None,
name='robot0')
arena.attach(arm)
task = base_task.BaseTask(
task_name='test',
arena=arena,
robots=[rbt],
props=[],
extra_sensors=[],
extra_effectors=[],
scene_initializer=scene_initializer.CompositeSceneInitializer([]),
episode_initializer=entity_initializer.TaskEntitiesInitializer([]),
control_timestep=0.1)
return task
def test_observables(self):
"""Test that the task observables includes only sensor observables."""
task = self._build_sample_base_task()
task_obs = set(task.observables)
robot_sensor_obs = []
for s in task.robots[0].sensors:
robot_sensor_obs.extend(list(s.observables))
robot_sensor_obs = set(robot_sensor_obs)
self.assertEmpty(task_obs ^ robot_sensor_obs)
def test_observable_types(self):
"""Test that the task observables includes only sensor observables."""
task = self._build_sample_base_task()
env = composer.Environment(task, strip_singleton_obs_buffer_dim=True)
obs_spec = env.observation_spec()
acceptable_types = set(
[np.dtype(np.uint8),
np.dtype(np.int64),
np.dtype(np.float32)])
for spec in obs_spec.values():
self.assertIn(np.dtype(spec.dtype), acceptable_types)
class FakePhysics(control.Physics):
"""A fake Physics class for unit testing observations."""
def __init__(self):
self._step_counter = 0
self._observables = {}
def step(self, sub_steps=1):
self._step_counter += 1
@property
def observables(self):
return self._observables
def time(self):
return self._step_counter
def timestep(self):
return 1.0
def set_control(self, ctrl):
pass
def reset(self):
self._step_counter = 0
def after_reset(self):
pass
@contextlib.contextmanager
def suppress_physics_errors(self):
yield
class CastObservationsTest(absltest.TestCase):
def testCastAggregatedObservable(self):
physics = FakePhysics()
physics.observables['raw_value'] = observable.Generic(
raw_observation_callable=lambda unused: np.float64(physics.time()),
update_interval=1,
buffer_size=2,
aggregator=lambda arr: np.asarray([arr[0], arr[1], arr[0] + arr[1]]),
corruptor=lambda value, random_state: value * 10.0)
physics.observables['cast_value'] = base_task.CastObservable(
physics.observables['raw_value'])
for obs in physics.observables.values():
obs.enabled = True
physics.reset()
physics_steps_per_control_step = 2
observation_updater = updater.Updater(
physics.observables,
physics_steps_per_control_step,
strip_singleton_buffer_dim=True)
observation_updater.reset(physics=physics, random_state=None)
raw_values, cast_values = [], []
for unused_step in range(0, 3):
observation_updater.prepare_for_next_control_step()
for _ in range(physics_steps_per_control_step):
physics.step()
observation_updater.update()
observation = observation_updater.get_observation()
print(observation)
raw_values.append(observation['raw_value'])
cast_values.append(observation['cast_value'])
np.testing.assert_equal(raw_values[0], np.asarray([10.0, 20.0, 30.0]))
np.testing.assert_equal(cast_values[0], np.asarray([10.0, 20.0, 30.0]))
np.testing.assert_equal(raw_values[1], np.asarray([30.0, 40.0, 70.0]))
np.testing.assert_equal(cast_values[1], np.asarray([30.0, 40.0, 70.0]))
np.testing.assert_equal(raw_values[2], np.asarray([50.0, 60.0, 110.0]))
np.testing.assert_equal(cast_values[2], np.asarray([50.0, 60.0, 110.0]))
self.assertEqual(raw_values[0].dtype, np.float64)
self.assertEqual(cast_values[0].dtype, np.float32)
if __name__ == '__main__':
absltest.main()
| [
"numpy.dtype",
"dm_robotics.moma.sensors.robot_tcp_sensor.RobotTCPSensor",
"dm_robotics.moma.models.end_effectors.robot_hands.robotiq_2f85.Robotiq2F85",
"dm_robotics.moma.effectors.arm_effector.ArmEffector",
"dm_robotics.moma.scene_initializer.CompositeSceneInitializer",
"dm_robotics.moma.sensors.robot_ar... | [((6122, 6137), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (6135, 6137), False, 'from absl.testing import absltest\n'), ((1497, 1522), 'dm_robotics.moma.models.arenas.empty.Arena', 'empty.Arena', (['"""test_arena"""'], {}), "('test_arena')\n", (1508, 1522), False, 'from dm_robotics.moma.models.arenas import empty\n'), ((1534, 1568), 'dm_robotics.moma.models.robots.robot_arms.sawyer.Sawyer', 'sawyer.Sawyer', ([], {'with_pedestal': '(False)'}), '(with_pedestal=False)\n', (1547, 1568), False, 'from dm_robotics.moma.models.robots.robot_arms import sawyer\n'), ((1584, 1610), 'dm_robotics.moma.models.end_effectors.robot_hands.robotiq_2f85.Robotiq2F85', 'robotiq_2f85.Robotiq2F85', ([], {}), '()\n', (1608, 1610), False, 'from dm_robotics.moma.models.end_effectors.robot_hands import robotiq_2f85\n'), ((1616, 1701), 'dm_robotics.moma.robot.standard_compose', 'robot.standard_compose', ([], {'arm': 'arm', 'gripper': 'gripper', 'wrist_ft': 'None', 'wrist_cameras': '[]'}), '(arm=arm, gripper=gripper, wrist_ft=None,\n wrist_cameras=[])\n', (1638, 1701), False, 'from dm_robotics.moma import robot\n'), ((1933, 2026), 'dm_robotics.moma.effectors.arm_effector.ArmEffector', 'arm_effector_module.ArmEffector', ([], {'arm': 'arm', 'action_range_override': 'None', 'robot_name': '"""robot0"""'}), "(arm=arm, action_range_override=None,\n robot_name='robot0')\n", (1964, 2026), True, 'from dm_robotics.moma.effectors import arm_effector as arm_effector_module\n'), ((2043, 2271), 'dm_robotics.moma.robot.StandardRobot', 'robot.StandardRobot', ([], {'arm': 'arm', 'arm_base_site_name': '"""pedestal_attachment"""', 'gripper': 'gripper', 'wrist_ft': 'None', 'wrist_cameras': '[]', 'robot_sensors': 'robot_sensors', 'arm_effector': 'arm_effector', 'gripper_effector': 'None', 'name': '"""robot0"""'}), "(arm=arm, arm_base_site_name='pedestal_attachment',\n gripper=gripper, wrist_ft=None, wrist_cameras=[], robot_sensors=\n robot_sensors, arm_effector=arm_effector, gripper_effector=None, name=\n 'robot0')\n", (2062, 2271), False, 'from dm_robotics.moma import robot\n'), ((3286, 3349), 'dm_control.composer.Environment', 'composer.Environment', (['task'], {'strip_singleton_obs_buffer_dim': '(True)'}), '(task, strip_singleton_obs_buffer_dim=True)\n', (3306, 3349), False, 'from dm_control import composer\n'), ((4675, 4733), 'dm_robotics.moma.base_task.CastObservable', 'base_task.CastObservable', (["physics.observables['raw_value']"], {}), "(physics.observables['raw_value'])\n", (4699, 4733), False, 'from dm_robotics.moma import base_task\n'), ((4900, 5005), 'dm_control.composer.observation.updater.Updater', 'updater.Updater', (['physics.observables', 'physics_steps_per_control_step'], {'strip_singleton_buffer_dim': '(True)'}), '(physics.observables, physics_steps_per_control_step,\n strip_singleton_buffer_dim=True)\n', (4915, 5005), False, 'from dm_control.composer.observation import updater\n'), ((1738, 1824), 'dm_robotics.moma.sensors.robot_arm_sensor.RobotArmSensor', 'robot_arm_sensor.RobotArmSensor', ([], {'arm': 'arm', 'name': '"""robot0"""', 'have_torque_sensors': '(True)'}), "(arm=arm, name='robot0', have_torque_sensors\n =True)\n", (1769, 1824), False, 'from dm_robotics.moma.sensors import robot_arm_sensor\n'), ((1842, 1905), 'dm_robotics.moma.sensors.robot_tcp_sensor.RobotTCPSensor', 'robot_tcp_sensor.RobotTCPSensor', ([], {'gripper': 'gripper', 'name': '"""robot0"""'}), "(gripper=gripper, name='robot0')\n", (1873, 1905), False, 'from dm_robotics.moma.sensors import robot_tcp_sensor\n'), ((5570, 5600), 'numpy.asarray', 'np.asarray', (['[10.0, 20.0, 30.0]'], {}), '([10.0, 20.0, 30.0])\n', (5580, 5600), True, 'import numpy as np\n'), ((5646, 5676), 'numpy.asarray', 'np.asarray', (['[10.0, 20.0, 30.0]'], {}), '([10.0, 20.0, 30.0])\n', (5656, 5676), True, 'import numpy as np\n'), ((5721, 5751), 'numpy.asarray', 'np.asarray', (['[30.0, 40.0, 70.0]'], {}), '([30.0, 40.0, 70.0])\n', (5731, 5751), True, 'import numpy as np\n'), ((5797, 5827), 'numpy.asarray', 'np.asarray', (['[30.0, 40.0, 70.0]'], {}), '([30.0, 40.0, 70.0])\n', (5807, 5827), True, 'import numpy as np\n'), ((5872, 5903), 'numpy.asarray', 'np.asarray', (['[50.0, 60.0, 110.0]'], {}), '([50.0, 60.0, 110.0])\n', (5882, 5903), True, 'import numpy as np\n'), ((5949, 5980), 'numpy.asarray', 'np.asarray', (['[50.0, 60.0, 110.0]'], {}), '([50.0, 60.0, 110.0])\n', (5959, 5980), True, 'import numpy as np\n'), ((2553, 2600), 'dm_robotics.moma.scene_initializer.CompositeSceneInitializer', 'scene_initializer.CompositeSceneInitializer', (['[]'], {}), '([])\n', (2596, 2600), False, 'from dm_robotics.moma import scene_initializer\n'), ((2630, 2676), 'dm_robotics.moma.entity_initializer.TaskEntitiesInitializer', 'entity_initializer.TaskEntitiesInitializer', (['[]'], {}), '([])\n', (2672, 2676), False, 'from dm_robotics.moma import entity_initializer\n'), ((3426, 3444), 'numpy.dtype', 'np.dtype', (['np.uint8'], {}), '(np.uint8)\n', (3434, 3444), True, 'import numpy as np\n'), ((3455, 3473), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (3463, 3473), True, 'import numpy as np\n'), ((3484, 3504), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (3492, 3504), True, 'import numpy as np\n'), ((3563, 3583), 'numpy.dtype', 'np.dtype', (['spec.dtype'], {}), '(spec.dtype)\n', (3571, 3583), True, 'import numpy as np\n'), ((4528, 4573), 'numpy.asarray', 'np.asarray', (['[arr[0], arr[1], arr[0] + arr[1]]'], {}), '([arr[0], arr[1], arr[0] + arr[1]])\n', (4538, 4573), True, 'import numpy as np\n')] |
import cv2
import itertools, os, time
import numpy as np
from Model import get_Model
from parameter import letters
import argparse
from keras import backend as K
K.set_learning_phase(0)
def decode_label(out):
# out : (1, 32, 42)
out_best = list(np.argmax(out[0, 2:], axis=1)) # get max index -> len = 32
out_best = [k for k, g in itertools.groupby(out_best)] # remove overlap value
outstr = ''
for i in out_best:
if i < len(letters):
outstr += letters[i]
return outstr
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--weight", help="weight file directory",
type=str, default="Final_weight.hdf5")
parser.add_argument("-t", "--test_img", help="Test image directory",
type=str, default="./smallDB/train/")
args = parser.parse_args()
# Get CRNN model
model = get_Model(training=False)
#model.summary()
try:
model.load_weights(args.weight)
print("...Previous weight data...")
except:
raise Exception("No weight file!")
test_dir =args.test_img
test_imgs = os.listdir(args.test_img)
total = 0
acc = 0
letter_total = 0
letter_acc = 0
start = time.time()
for test_img in test_imgs:
img = cv2.imread(test_dir + test_img, cv2.IMREAD_GRAYSCALE)
img_pred = img.astype(np.float32)
img_pred = cv2.resize(img_pred, (256,32))
img_pred = (img_pred / 255.0) * 2.0 - 1.0
img_pred = img_pred.T
img_pred = np.expand_dims(img_pred, axis=-1)
img_pred = np.expand_dims(img_pred, axis=0)
net_out_value = model.predict(img_pred)
pred_texts = decode_label(net_out_value)
for i in range(min(len(pred_texts), len(test_img[0:-9]))):
if pred_texts[i] == test_img[i]:
letter_acc += 1
letter_total += max(len(pred_texts), len(test_img[0:-9]))
if pred_texts == test_img[0:-9]:
acc += 1
total += 1
print('Predicted: %s / True: %s' % (pred_texts, test_img[0:-9]))
# cv2.rectangle(img, (0,0), (150, 30), (0,0,0), -1)
# cv2.putText(img, pred_texts, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255),2)
#cv2.imshow("q", img)
#if cv2.waitKey(0) == 27:
# break
#cv2.destroyAllWindows()
end = time.time()
total_time = (end - start)
print("Time : ",total_time / total)
print("ACC : ", acc / total)
print("letter ACC : ", letter_acc / letter_total)
| [
"os.listdir",
"itertools.groupby",
"argparse.ArgumentParser",
"Model.get_Model",
"numpy.argmax",
"numpy.expand_dims",
"time.time",
"cv2.resize",
"keras.backend.set_learning_phase",
"cv2.imread"
] | [((162, 185), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(0)'], {}), '(0)\n', (182, 185), True, 'from keras import backend as K\n'), ((531, 556), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (554, 556), False, 'import argparse\n'), ((864, 889), 'Model.get_Model', 'get_Model', ([], {'training': '(False)'}), '(training=False)\n', (873, 889), False, 'from Model import get_Model\n'), ((1073, 1098), 'os.listdir', 'os.listdir', (['args.test_img'], {}), '(args.test_img)\n', (1083, 1098), False, 'import itertools, os, time\n'), ((1157, 1168), 'time.time', 'time.time', ([], {}), '()\n', (1166, 1168), False, 'import itertools, os, time\n'), ((2207, 2218), 'time.time', 'time.time', ([], {}), '()\n', (2216, 2218), False, 'import itertools, os, time\n'), ((1206, 1259), 'cv2.imread', 'cv2.imread', (['(test_dir + test_img)', 'cv2.IMREAD_GRAYSCALE'], {}), '(test_dir + test_img, cv2.IMREAD_GRAYSCALE)\n', (1216, 1259), False, 'import cv2\n'), ((1314, 1345), 'cv2.resize', 'cv2.resize', (['img_pred', '(256, 32)'], {}), '(img_pred, (256, 32))\n', (1324, 1345), False, 'import cv2\n'), ((1432, 1465), 'numpy.expand_dims', 'np.expand_dims', (['img_pred'], {'axis': '(-1)'}), '(img_pred, axis=-1)\n', (1446, 1465), True, 'import numpy as np\n'), ((1481, 1513), 'numpy.expand_dims', 'np.expand_dims', (['img_pred'], {'axis': '(0)'}), '(img_pred, axis=0)\n', (1495, 1513), True, 'import numpy as np\n'), ((256, 285), 'numpy.argmax', 'np.argmax', (['out[0, 2:]'], {'axis': '(1)'}), '(out[0, 2:], axis=1)\n', (265, 285), True, 'import numpy as np\n'), ((346, 373), 'itertools.groupby', 'itertools.groupby', (['out_best'], {}), '(out_best)\n', (363, 373), False, 'import itertools, os, time\n')] |
from __future__ import annotations # noqa: F401
import re
import warnings
import numpy as np
import pandas as pd
import xarray as xr
from .config import config
from .grid import _wrf_grid_from_dataset
def _decode_times(ds: xr.Dataset) -> xr.Dataset:
"""
Decode the time variable to datetime64.
"""
try:
_time = pd.to_datetime(
ds.Times.data.astype('str'), errors='raise', format='%Y-%m-%d_%H:%M:%S'
)
except ValueError:
_time = pd.to_datetime(
ds.Times.data.astype('str'), errors='raise', format='%Y-%m-%dT%H:%M:%S.%f'
)
ds = ds.assign_coords({'Time': _time})
ds.Time.attrs = {'long_name': 'Time', 'standard_name': 'time'}
# make XTIME be consistent with its description
if 'XTIME' in ds.variables and np.issubdtype(ds.XTIME.dtype, np.datetime64):
ds['XTIME'].data = (
ds.XTIME.data
- pd.to_datetime(
ds['XTIME'].description, format='minutes since %Y-%m-%d %H:%M:%S'
).to_datetime64()
)
return ds
def _clean_brackets_from_units(ds: xr.Dataset) -> xr.Dataset:
"""
Cleans brackets from units attributes
"""
sep = '\\'
regex = re.compile(f'[{sep.join(config.get("brackets_to_clean_from_units"))}]')
for var in ds.variables:
if 'units' in ds[var].attrs:
ds[var].attrs['units'] = regex.sub('', ds[var].attrs['units'])
return ds
def _make_units_pint_friendly(ds: xr.Dataset) -> xr.Dataset:
"""
Harmonizes awkward WRF units into pint-friendly ones
"""
ds = _clean_brackets_from_units(ds)
# We have to invert the mapping from "new_unit -> wrf_units" to "wrf_unit -> new_unit"
wrf_units_map = {
v: k for (k, val_list) in config.get('unit_harmonization_map').items() for v in val_list
}
for variable in ds.data_vars:
if ds[variable].attrs.get('units') in wrf_units_map:
harmonized_unit = wrf_units_map[ds[variable].attrs['units']]
if harmonized_unit == 'invalid':
ds[variable].attrs.pop('units', None)
else:
ds[variable].attrs['units'] = harmonized_unit
return ds
def _modify_attrs_to_cf(ds: xr.Dataset) -> xr.Dataset:
"""Modify the attributes of the dataset to comply with CF conventions."""
# Universal updates
vars_to_update = set(config.get('cf_attribute_map').keys()).intersection(set(ds.data_vars))
for variable in vars_to_update:
ds[variable].attrs.update(config.get(f'cf_attribute_map.{variable}'))
# Conditional updates (right now just vertical coordinate type)
hybrid_opt_condition = 'HYBRID_OPT==0' if getattr(ds, 'HYBRID_OPT', 0) == 0 else 'HYBRID_OPT!=0'
vars_to_update = set(
config.get(f'conditional_cf_attribute_map.{hybrid_opt_condition}').keys()
).intersection(set(ds.data_vars))
for variable in vars_to_update:
ds[variable].attrs.update(
config.get(f'conditional_cf_attribute_map.{hybrid_opt_condition}.{variable}')
)
return ds
def _collapse_time_dim(ds: xr.Dataset) -> xr.Dataset:
# This "time dimension collapsing" assumption is wrong with moving nests
# and should be applied to static, nested domains.
lat_lon_coords = set(config.get('latitude_coords') + config.get('longitude_coords'))
vertical_coords = set(config.get('vertical_coords'))
coords = set(ds.variables).intersection(lat_lon_coords.union(vertical_coords))
ds = ds.set_coords(coords)
for coord in ds.coords:
data_to_reassign = None
if coord in lat_lon_coords and ds[coord].ndim == 3:
data_to_reassign = ds[coord].data[0, :, :]
elif coord in vertical_coords and ds[coord].ndim == 2:
data_to_reassign = ds[coord].data[0, :]
if data_to_reassign is not None:
attrs, encoding = ds[coord].attrs, ds[coord].encoding
ds = ds.assign_coords({coord: (ds[coord].dims[1:], data_to_reassign)})
ds[coord].attrs = attrs
ds[coord].encoding = encoding
return ds
def _include_projection_coordinates(ds: xr.Dataset) -> xr.Dataset:
"""Introduce projection dimension coordinate values and CRS."""
try:
grid_components = _wrf_grid_from_dataset(ds)
except KeyError:
warnings.warn(
'Unable to create coordinate values and CRS due to insufficient dimensions or '
'projection metadata.'
)
return ds
horizontal_dims = set(config.get('horizontal_dims')).intersection(set(ds.dims))
# Include dimension coordinates
for dim in horizontal_dims:
ds[dim] = (dim, grid_components[dim], config.get(f'cf_attribute_map.{dim}'))
# Include CRS
ds['wrf_projection'] = (tuple(), grid_components['crs'], grid_components['crs'].to_cf())
for varname in ds.data_vars:
if any(dim in ds[varname].dims for dim in horizontal_dims):
ds[varname].attrs['grid_mapping'] = 'wrf_projection'
return ds
def _assign_coord_to_dim_of_different_name(ds: xr.Dataset) -> xr.Dataset:
for varname, dim in config.get('assign_coord_to_dim_map').items():
try:
ds[dim] = ds[varname]
del ds[varname]
except KeyError:
pass
return ds
def _rename_dims(ds: xr.Dataset) -> xr.Dataset:
"""Rename dims for more consistent semantics."""
rename_dim_map = {k: v for k, v in config.get('rename_dim_map').items() if k in ds.dims}
return ds.rename(rename_dim_map)
def _calc_base_diagnostics(ds: xr.Dataset, drop: bool = True) -> xr.Dataset:
"""Calculate the four basic fields that WRF does not have in physically meaningful form.
Parameters
----------
dataset : xarray.Dataset
Dataset representing WRF data opened via normal backend, with chunking.
drop : bool
Decide whether to drop the components of origin after creating the diagnostic fields from
them.
Notes
-----
This operation should be called before destaggering.
"""
# Potential temperature
if 'T' in ds.data_vars:
ds['air_potential_temperature'] = ds['T'] + 300
ds['air_potential_temperature'].attrs = {
'units': 'K',
'standard_name': 'air_potential_temperature',
}
if drop:
del ds['T']
# Pressure
if 'P' in ds.data_vars and 'PB' in ds.data_vars:
ds['air_pressure'] = ds['P'] + ds['PB']
ds['air_pressure'].attrs = {
'units': ds['P'].attrs.get('units', 'Pa'),
'standard_name': 'air_pressure',
}
if drop:
del ds['P'], ds['PB']
# Geopotential and geopotential height
if 'PH' in ds.data_vars and 'PHB' in ds.data_vars:
ds['geopotential'] = ds['PH'] + ds['PHB']
ds['geopotential'].attrs = {
'units': 'm**2 s**-2',
'standard_name': 'geopotential',
'stagger': ds['PH'].attrs.get('stagger', 'Z'),
}
ds['geopotential_height'] = ds['geopotential'] / 9.81
ds['geopotential_height'].attrs = {
'units': 'm',
'standard_name': 'geopotential_height',
'stagger': ds['PH'].attrs.get('stagger', 'Z'),
}
if drop:
del ds['PH'], ds['PHB']
return ds
| [
"warnings.warn",
"numpy.issubdtype",
"pandas.to_datetime"
] | [((800, 844), 'numpy.issubdtype', 'np.issubdtype', (['ds.XTIME.dtype', 'np.datetime64'], {}), '(ds.XTIME.dtype, np.datetime64)\n', (813, 844), True, 'import numpy as np\n'), ((4319, 4443), 'warnings.warn', 'warnings.warn', (['"""Unable to create coordinate values and CRS due to insufficient dimensions or projection metadata."""'], {}), "(\n 'Unable to create coordinate values and CRS due to insufficient dimensions or projection metadata.'\n )\n", (4332, 4443), False, 'import warnings\n'), ((915, 1001), 'pandas.to_datetime', 'pd.to_datetime', (["ds['XTIME'].description"], {'format': '"""minutes since %Y-%m-%d %H:%M:%S"""'}), "(ds['XTIME'].description, format=\n 'minutes since %Y-%m-%d %H:%M:%S')\n", (929, 1001), True, 'import pandas as pd\n')] |
import unittest
import cellpylib as cpl
import numpy as np
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestHopfieldNet(unittest.TestCase):
def test_hopfield_net(self):
np.random.seed(0)
# patterns for training
zero = [
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
0, 1, 1, 1, 0,
0, 0, 0, 0, 0]
one = [
0, 1, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0]
two = [
1, 1, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 1, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
# replace the zeroes with -1 to make these vectors bipolar instead of binary
one = [-1 if x == 0 else x for x in one]
two = [-1 if x == 0 else x for x in two]
zero = [-1 if x == 0 else x for x in zero]
P = [zero, one, two]
hopfield_net = cpl.HopfieldNet(num_cells=35)
hopfield_net.train(P)
expected_weights = self._convert_to_ndarray("hopfield_net_weights.txt")
np.testing.assert_equal(expected_weights, hopfield_net.W)
expected_activities = self._convert_to_ndarray("hopfield_net.ca")
half_two = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
half_two = [-1 if x == 0 else x for x in half_two]
cellular_automaton = np.array([half_two])
cellular_automaton = cpl.evolve(cellular_automaton, timesteps=155,
apply_rule=hopfield_net.apply_rule, r=hopfield_net.r)
np.testing.assert_equal(expected_activities, cellular_automaton)
def _convert_to_ndarray(self, filename, dtype=int):
with open(os.path.join(THIS_DIR, 'resources', filename), 'r') as content_file:
content = content_file.read()
content = content.replace('[[', '')
content = content.replace(']]', '')
content = content.replace('[', '')
content = content.replace('],', ';')
content = [[dtype(i) for i in x.split(',')] for x in content.split(';')]
return np.array(content) | [
"numpy.testing.assert_equal",
"cellpylib.HopfieldNet",
"cellpylib.evolve",
"os.path.join",
"numpy.array",
"numpy.random.seed",
"os.path.abspath"
] | [((97, 122), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (112, 122), False, 'import os\n'), ((210, 227), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (224, 227), True, 'import numpy as np\n'), ((1164, 1193), 'cellpylib.HopfieldNet', 'cpl.HopfieldNet', ([], {'num_cells': '(35)'}), '(num_cells=35)\n', (1179, 1193), True, 'import cellpylib as cpl\n'), ((1313, 1370), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_weights', 'hopfield_net.W'], {}), '(expected_weights, hopfield_net.W)\n', (1336, 1370), True, 'import numpy as np\n'), ((1746, 1766), 'numpy.array', 'np.array', (['[half_two]'], {}), '([half_two])\n', (1754, 1766), True, 'import numpy as np\n'), ((1797, 1901), 'cellpylib.evolve', 'cpl.evolve', (['cellular_automaton'], {'timesteps': '(155)', 'apply_rule': 'hopfield_net.apply_rule', 'r': 'hopfield_net.r'}), '(cellular_automaton, timesteps=155, apply_rule=hopfield_net.\n apply_rule, r=hopfield_net.r)\n', (1807, 1901), True, 'import cellpylib as cpl\n'), ((1946, 2010), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_activities', 'cellular_automaton'], {}), '(expected_activities, cellular_automaton)\n', (1969, 2010), True, 'import numpy as np\n'), ((2469, 2486), 'numpy.array', 'np.array', (['content'], {}), '(content)\n', (2477, 2486), True, 'import numpy as np\n'), ((2086, 2131), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""resources"""', 'filename'], {}), "(THIS_DIR, 'resources', filename)\n", (2098, 2131), False, 'import os\n')] |
# !/usr/bin/env python
# coding=utf-8
"""
Make a graph for lecture 2, hippo digestion
"""
from __future__ import print_function
import sys
import numpy as np
from scipy import interpolate
from common import make_fig, GOOD_RET
__author__ = 'hbmayes'
def graph_alg_eq():
"""
Given a simple algebraic equation, makes a graph
"""
fig_name = 'lect2_hippo'
# x-axis
x_start = 0.0 # initial conversion
x_end = 0.999 # final conversion; didn't choose 1 to avoid divide by zero error
num_steps = 2001 # for solving/graphing
conversion_scale = np.linspace(x_start, x_end, num_steps)
# y-axis
design_eq = np.divide((1.00+16.5*(1.00-conversion_scale)), 1.75*(1-conversion_scale))
make_fig(fig_name, conversion_scale, design_eq,
x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{C_{F0}}{-r_F}$ (hr)',
x_lima=0.0, x_limb=1.0, y_lima=0.0, y_limb=24.0,
)
def graph_points():
"""
Given a few points, makes a smooth curve
:return: saves a file with the graph
"""
fig_name = 'lect2_num_solv'
# given data
x = np.array([0.0, 0.4, 0.6, 0.8])
ra = np.array([0.01, 0.0080, 0.005, 0.002])
design_eq = np.divide(2.0, ra)
print("Generic example design equation points: {}".format(["{:0.1f}".format(x) for x in design_eq]))
# cubic spline
x_new = np.linspace(0.0, 0.8, 101)
# alternately, from interpolation
y_interp = interpolate.interp1d(x, design_eq, kind='quadratic')
make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_interp(x_new),
x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{F_{A0}}{-r_A} \left(L\right)$',
x_lima=0.0, x_limb=0.8, y_lima=0.0, y_limb=1000,
fig_width=4, color2='green',
)
def graph_smooth_from_pts():
"""
Given a few points, interpolates a smooth curve
:return: saves a file with the graph
"""
fig_name = 'lect2_isom'
# given data
x = np.array([0.0, 0.2, 0.4, 0.6, 0.65])
ra = np.array([39.0, 53.0, 59.0, 38.0, 25.0])
design_eq = np.divide(50.0, ra)
print("Isom example design equation points: {}".format(design_eq))
# cubic spline
tck = interpolate.splrep(x, design_eq, s=0)
x_new = np.linspace(0.0, 0.7, 101)
y_new = interpolate.splev(x_new, tck, der=0)
# alternately, from interpolation
cubic_interp = interpolate.interp1d(x, design_eq, kind='quadratic', fill_value="extrapolate")
make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_new,
x3_array=x_new, y3_array=cubic_interp(x_new),
y1_label="data", y2_label="quadratic", y3_label="cubic",
x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{F_{A0}}{-r_A} \left(m^3\right)$',
x_lima=0.0, x_limb=0.7, y_lima=0.0, y_limb=2.5,
)
def main():
""" Runs the main program.
"""
graph_alg_eq()
graph_points()
graph_smooth_from_pts()
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| [
"common.make_fig",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.linspace",
"scipy.interpolate.splrep",
"scipy.interpolate.splev",
"sys.exit",
"numpy.divide"
] | [((578, 616), 'numpy.linspace', 'np.linspace', (['x_start', 'x_end', 'num_steps'], {}), '(x_start, x_end, num_steps)\n', (589, 616), True, 'import numpy as np\n'), ((647, 726), 'numpy.divide', 'np.divide', (['(1.0 + 16.5 * (1.0 - conversion_scale))', '(1.75 * (1 - conversion_scale))'], {}), '(1.0 + 16.5 * (1.0 - conversion_scale), 1.75 * (1 - conversion_scale))\n', (656, 726), True, 'import numpy as np\n'), ((726, 925), 'common.make_fig', 'make_fig', (['fig_name', 'conversion_scale', 'design_eq'], {'x_label': '"""conversion (X, unitless)"""', 'y_label': '"""$\\\\displaystyle\\\\frac{C_{F0}}{-r_F}$ (hr)"""', 'x_lima': '(0.0)', 'x_limb': '(1.0)', 'y_lima': '(0.0)', 'y_limb': '(24.0)'}), "(fig_name, conversion_scale, design_eq, x_label=\n 'conversion (X, unitless)', y_label=\n '$\\\\displaystyle\\\\frac{C_{F0}}{-r_F}$ (hr)', x_lima=0.0, x_limb=1.0,\n y_lima=0.0, y_limb=24.0)\n", (734, 925), False, 'from common import make_fig, GOOD_RET\n'), ((1135, 1165), 'numpy.array', 'np.array', (['[0.0, 0.4, 0.6, 0.8]'], {}), '([0.0, 0.4, 0.6, 0.8])\n', (1143, 1165), True, 'import numpy as np\n'), ((1175, 1212), 'numpy.array', 'np.array', (['[0.01, 0.008, 0.005, 0.002]'], {}), '([0.01, 0.008, 0.005, 0.002])\n', (1183, 1212), True, 'import numpy as np\n'), ((1230, 1248), 'numpy.divide', 'np.divide', (['(2.0)', 'ra'], {}), '(2.0, ra)\n', (1239, 1248), True, 'import numpy as np\n'), ((1386, 1412), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.8)', '(101)'], {}), '(0.0, 0.8, 101)\n', (1397, 1412), True, 'import numpy as np\n'), ((1466, 1518), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'design_eq'], {'kind': '"""quadratic"""'}), "(x, design_eq, kind='quadratic')\n", (1486, 1518), False, 'from scipy import interpolate\n'), ((2032, 2068), 'numpy.array', 'np.array', (['[0.0, 0.2, 0.4, 0.6, 0.65]'], {}), '([0.0, 0.2, 0.4, 0.6, 0.65])\n', (2040, 2068), True, 'import numpy as np\n'), ((2078, 2118), 'numpy.array', 'np.array', (['[39.0, 53.0, 59.0, 38.0, 25.0]'], {}), '([39.0, 53.0, 59.0, 38.0, 25.0])\n', (2086, 2118), True, 'import numpy as np\n'), ((2135, 2154), 'numpy.divide', 'np.divide', (['(50.0)', 'ra'], {}), '(50.0, ra)\n', (2144, 2154), True, 'import numpy as np\n'), ((2256, 2293), 'scipy.interpolate.splrep', 'interpolate.splrep', (['x', 'design_eq'], {'s': '(0)'}), '(x, design_eq, s=0)\n', (2274, 2293), False, 'from scipy import interpolate\n'), ((2306, 2332), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.7)', '(101)'], {}), '(0.0, 0.7, 101)\n', (2317, 2332), True, 'import numpy as np\n'), ((2345, 2381), 'scipy.interpolate.splev', 'interpolate.splev', (['x_new', 'tck'], {'der': '(0)'}), '(x_new, tck, der=0)\n', (2362, 2381), False, 'from scipy import interpolate\n'), ((2439, 2517), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'design_eq'], {'kind': '"""quadratic"""', 'fill_value': '"""extrapolate"""'}), "(x, design_eq, kind='quadratic', fill_value='extrapolate')\n", (2459, 2517), False, 'from scipy import interpolate\n'), ((3119, 3135), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (3127, 3135), False, 'import sys\n')] |
import numpy as np
from astropy.io import fits
def stitch_all_images(all_hdus,date):
stitched_hdu_dict = {}
hdu_opamp_dict = {}
for (camera, filenum, imtype, opamp),hdu in all_hdus.items():
if (camera, filenum, imtype) not in hdu_opamp_dict.keys():
hdu_opamp_dict[(camera, filenum, imtype)] = {}
hdu_opamp_dict[(camera, filenum, imtype)][opamp] = hdu
for (camera, filenum, imtype),opampdict in hdu_opamp_dict.items():
outhdu = stitch_these_camera_data(opampdict)
outhdu.header.add_history("Stitched 4 opamps by quickreduce on {}".format(date))
stitched_hdu_dict[(camera, filenum, imtype, None)] = outhdu
return stitched_hdu_dict
def stitch_these_camera_data(hdudict):
xorients = {-1: 'l', 1: 'r'}
yorients = {-1: 'b', 1: 'u'}
img = {}
for opamp,hdu in hdudict.items():
header = hdu.header
xsign = np.sign(header['CHOFFX'])
ysign = np.sign(header['CHOFFY'])
location = yorients[ysign] + xorients[xsign]
#print("Imtype: {} In filenum: {} Camera: {} Opamp: {} located at {}".format(imtype, filenum, camera, opamp,
# location))
img[location] = hdu.data
trans = {}
## Transform opamps to the correct directions
trans['bl'] = img['bl']
trans['br'] = np.fliplr(img['br'])
trans['ul'] = np.flipud(img['ul'])
trans['ur'] = np.fliplr(np.flipud(img['ur']))
y_bl, x_bl = trans['bl'].shape
y_ul, x_ul = trans['ul'].shape
y_br, x_br = trans['br'].shape
y_ur, x_ur = trans['ur'].shape
## Make sure the shapes work
if y_bl != y_br or y_ul != y_ur:
print("yr and yl not the same")
if x_bl != x_ul or x_br != x_ur:
print("xb and xu not the same")
## Create the full-sized image array
merged = np.ndarray(shape=(y_bl + y_ul, x_bl + x_br))
## Assign the opamps to the correct region of the array
## bl
merged[:y_bl, :x_bl] = trans['bl']
## ul
merged[y_bl:, :x_bl] = trans['ul']
## br
merged[:y_bl, x_bl:] = trans['br']
## ur
merged[y_bl:, x_bl:] = trans['ur']
## This returns the image with wavelength increasing to the left
## For simplicity, flip image so wavelength increases with pixel number
merged = np.fliplr(merged)
## Update header
opamp = 1
header = hdudict[opamp].header.copy()
header['NAXIS1'] = int(y_bl + y_ul)
header['NAXIS2'] = int(x_bl + x_br)
header['DATASEC'] = '[1:{},1:{}]'.format(header['NAXIS1'],header['NAXIS2'])
header['TRIMSEC'] = header['DATASEC']
header['CHOFFX'] = 0
header['CHOFFY'] = 0
header['FILENAME'] = header['FILENAME'].split('c{}'.format(opamp))[0]
for key in ['OPAMP']:#, 'CHOFFX', 'CHOFFY']:
header.remove(key)
## Save data and header to working memory
outhdu = fits.PrimaryHDU(data=merged, header=header)
return outhdu | [
"astropy.io.fits.PrimaryHDU",
"numpy.flipud",
"numpy.fliplr",
"numpy.ndarray",
"numpy.sign"
] | [((1388, 1408), 'numpy.fliplr', 'np.fliplr', (["img['br']"], {}), "(img['br'])\n", (1397, 1408), True, 'import numpy as np\n'), ((1427, 1447), 'numpy.flipud', 'np.flipud', (["img['ul']"], {}), "(img['ul'])\n", (1436, 1447), True, 'import numpy as np\n'), ((1882, 1926), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(y_bl + y_ul, x_bl + x_br)'}), '(shape=(y_bl + y_ul, x_bl + x_br))\n', (1892, 1926), True, 'import numpy as np\n'), ((2343, 2360), 'numpy.fliplr', 'np.fliplr', (['merged'], {}), '(merged)\n', (2352, 2360), True, 'import numpy as np\n'), ((2902, 2945), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'merged', 'header': 'header'}), '(data=merged, header=header)\n', (2917, 2945), False, 'from astropy.io import fits\n'), ((908, 933), 'numpy.sign', 'np.sign', (["header['CHOFFX']"], {}), "(header['CHOFFX'])\n", (915, 933), True, 'import numpy as np\n'), ((950, 975), 'numpy.sign', 'np.sign', (["header['CHOFFY']"], {}), "(header['CHOFFY'])\n", (957, 975), True, 'import numpy as np\n'), ((1476, 1496), 'numpy.flipud', 'np.flipud', (["img['ur']"], {}), "(img['ur'])\n", (1485, 1496), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from ssvm.utils import item_2_idc
class Test_item_2_idc(unittest.TestCase):
def test_correctness(self):
l_Y = [[], ["A", "B", "C"], ["A"], ["D", "E"], ["D"], [], ["A", "X", "Z", "Y"], []]
X = np.array([2, 2, 2, 3, 4, 4, 5, 7, 7, 7, 7])
out = item_2_idc(l_Y)
for s, Y in enumerate(l_Y):
self.assertTrue(np.all(X[out[s]] == (s + 1)))
self.assertEqual(len(Y), len(X[out[s]]))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"numpy.all",
"ssvm.utils.item_2_idc"
] | [((506, 521), 'unittest.main', 'unittest.main', ([], {}), '()\n', (519, 521), False, 'import unittest\n'), ((250, 293), 'numpy.array', 'np.array', (['[2, 2, 2, 3, 4, 4, 5, 7, 7, 7, 7]'], {}), '([2, 2, 2, 3, 4, 4, 5, 7, 7, 7, 7])\n', (258, 293), True, 'import numpy as np\n'), ((309, 324), 'ssvm.utils.item_2_idc', 'item_2_idc', (['l_Y'], {}), '(l_Y)\n', (319, 324), False, 'from ssvm.utils import item_2_idc\n'), ((390, 416), 'numpy.all', 'np.all', (['(X[out[s]] == s + 1)'], {}), '(X[out[s]] == s + 1)\n', (396, 416), True, 'import numpy as np\n')] |
"""
DeepLabCut Toolbox
https://github.com/AlexEMG/DeepLabCut
<NAME>, <EMAIL>
<NAME>, <EMAIL>
This script analyzes videos based on a trained network (as specified in myconfig_analysis.py)
You need tensorflow for evaluation. Run by:
CUDA_VISIBLE_DEVICES=0 python3 AnalyzeVideos.py
"""
####################################################
# Dependencies
####################################################
import os
import sys
# Add some folders to the python path
path_to_this_script = os.path.abspath(__file__)
path_to_delectable_folder = os.path.dirname(path_to_this_script)
dlc_folder_path = os.path.join(path_to_delectable_folder, "dlc")
sys.path.append(os.path.join(dlc_folder_path, "pose-tensorflow"))
sys.path.append(os.path.join(dlc_folder_path, "Generating_a_Training_Set"))
#sys.path.append(model_folder_path)
import dlct
#from myconfig_analysis import cropping, Task, date, \
# trainingsFraction, resnet, snapshotindex, shuffle,x1, x2, y1, y2, videotype, storedata_as_csv
# Deep-cut dependencies
from config import load_config
from nnet import predict
from dataset.pose_dataset import data_to_input
# Dependencies for video:
import pickle
# import matplotlib.pyplot as plt
import imageio
#imageio.plugins.ffmpeg.download()
from skimage.util import img_as_ubyte
from moviepy.editor import VideoFileClip
import skimage
import skimage.color
import time
import pandas as pd
import numpy as np
from tqdm import tqdm
def getpose(image, cfg, outputs, outall=False):
''' Adapted from DeeperCut, see pose-tensorflow folder'''
image_batch = data_to_input(skimage.color.gray2rgb(image))
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref = predict.extract_cnn_output(outputs_np, cfg)
pose = predict.argmax_pose_predict(scmap, locref, cfg.stride)
if outall:
return scmap, locref, pose
else:
return pose
def output_file_path_from_input_paths(model_folder_path, video_file_path, output_folder_path) :
model_folder_name = dlct.file_name_without_extension_from_path(model_folder_path)
video_file_folder_path = os.path.split(video_file_path)[0]
video_file_name_without_extension = dlct.file_name_without_extension_from_path(video_file_path)
output_file_name = video_file_name_without_extension + '-' + model_folder_name + '.h5'
return os.path.join(output_folder_path, output_file_name)
# Get the command-line args
model_folder_path = os.path.abspath(sys.argv[1])
video_file_path = os.path.abspath(sys.argv[2])
if len(sys.argv) >= 4:
output_file_path = os.path.abspath(sys.argv[3])
else:
output_file_path = output_file_path_from_input_paths(model_folder_path, video_file_path, os.getcwd())
# Load the configuration file
configuration_file_name = 'myconfig.py'
configuration_file_path = os.path.join(model_folder_path, configuration_file_name)
# For backwards-compatibility:
if not os.path.exists(configuration_file_path):
configuration_file_name = 'myconfig_analysis.py'
configuration_file_path = os.path.join(model_folder_path, configuration_file_name)
configuration = dlct.load_configuration_file(configuration_file_path)
Task = configuration['Task']
date = configuration['date']
#trainingsFraction = configuration['trainingsFraction']
resnet = configuration['resnet']
snapshotindex = configuration['snapshotindex']
#shuffle = configuration['shuffle']
cropping = configuration['cropping']
x1 = configuration['x1']
x2 = configuration['x2']
y1 = configuration['y1']
y2 = configuration['y2']
#videotype = configuration['videotype']
#storedata_as_csv = configuration['storedata_as_csv']
# Do some things to accomodate myconfig_analysis.py files
try:
trainingFractionList = configuration['TrainingFraction']
except KeyError:
trainingFractionList = [ configuration['trainingsFraction'] ]
try:
shuffleList = configuration['Shuffles']
except KeyError:
shuffleList = [1]
# These things are in myconfig_analysis.py in raw DeepLabCut
trainingFraction = trainingFractionList[0]
shuffle = shuffleList[0]
storedata_as_csv = True
####################################################
# Loading data, and defining model folder
####################################################
#basefolder = os.path.join('..','pose-tensorflow','models')
#basefolder = os.path.join(network_file_path, 'network')
modelfolder = os.path.join(model_folder_path,
Task + str(date) + '-trainset' + str(int(trainingFraction * 100)) + 'shuffle' + str(shuffle))
cfg = load_config(os.path.join(modelfolder , 'test' ,"pose_cfg.yaml"))
##################################################
# Load and setup CNN part detector
##################################################
# Check which snapshots are available and sort them by # iterations
Snapshots = np.array([
fn.split('.')[0]
for fn in os.listdir(os.path.join(modelfolder , 'train'))
if "index" in fn
])
increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
Snapshots = Snapshots[increasing_indices]
print(modelfolder)
print(Snapshots)
##################################################
# Compute predictions over images
##################################################
# Check if data already was generated:
cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])
# Name for scorer:
trainingsiterations = (cfg['init_weights'].split('/')[-1]).split('-')[-1]
# Name for scorer:
scorer = 'DeepCut' + "_resnet" + str(resnet) + "_" + Task + str(
date) + 'shuffle' + str(shuffle) + '_' + str(trainingsiterations)
cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
pdindex = pd.MultiIndex.from_product(
[[scorer], cfg['all_joints_names'], ['x', 'y', 'likelihood']],
names=['scorer', 'bodyparts', 'coords'])
##################################################
# Datafolder
##################################################
# videofolder='../videos/' #where your folder with videos is.
frame_buffer = 10
#os.chdir(videofolder)
#videos = np.sort([fn for fn in os.listdir(os.curdir) if (videotype in fn)])
#print("Starting ", video_file_path)
#for video in videos:
video = video_file_path
#dataname = video.split('.')[0] + scorer + '.h5'
print("Loading ", video)
clip = VideoFileClip(video)
ny, nx = clip.size # dimensions of frame (height, width)
fps = clip.fps
#nframes = np.sum(1 for j in clip.iter_frames()) #this is slow (but accurate)
nframes_approx = int(np.ceil(clip.duration * clip.fps) + frame_buffer)
# this will overestimage number of frames (see https://github.com/AlexEMG/DeepLabCut/issues/9) This is especially a problem
# for high frame rates and long durations due to rounding errors (as <NAME> found). Later we crop the result (line 187)
if cropping:
clip = clip.crop(
y1=y1, y2=y2, x1=x1, x2=x2) # one might want to adjust
print("Duration of video [s]: ", clip.duration, ", recorded with ", fps,
"fps!")
print("Overall # of frames: ", nframes_approx,"with cropped frame dimensions: ", clip.size)
start = time.time()
PredicteData = np.zeros((nframes_approx, 3 * len(cfg['all_joints_names'])))
clip.reader.initialize()
print("Starting to extract posture")
highest_index_with_valid_frame = -1
#for image in clip.iter_frames():
for index in tqdm(range(nframes_approx)):
image = img_as_ubyte(clip.reader.read_frame())
# Thanks to <NAME> for the following snipplet:
# if close to end of video, start checking whether two adjacent frames are identical
# this should only happen when moviepy has reached the final frame
# if two adjacent frames are identical, terminate the loop
if index==int(nframes_approx-frame_buffer*2):
last_image = image
elif index>int(nframes_approx-frame_buffer*2):
if (image==last_image).all():
#nframes = index
#print("Detected frames: ", nframes)
break
else:
last_image = image
highest_index_with_valid_frame = index
pose = getpose(image, cfg, outputs)
PredicteData[index, :] = pose.flatten() # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
nframes = highest_index_with_valid_frame + 1
print("Detected frames: ", nframes)
stop = time.time()
dictionary = {
"start": start,
"stop": stop,
"run_duration": stop - start,
"Scorer": scorer,
"config file": cfg,
"fps": fps,
"frame_dimensions": (ny, nx),
"nframes": nframes
}
metadata = {'data': dictionary}
print("Saving results...")
DataMachine = pd.DataFrame(PredicteData[:nframes,:], columns=pdindex, index=range(nframes)) #slice pose data to have same # as # of frames.
DataMachine.to_hdf(output_file_path, 'df_with_missing', format='table', mode='w')
if storedata_as_csv :
csv_file_path = dlct.replace_extension(output_file_path, ".csv")
DataMachine.to_csv(csv_file_path)
#with open(dataname.split('.')[0] + 'includingmetadata.pickle',
# 'wb') as f:
# pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL)
| [
"dlct.load_configuration_file",
"pandas.MultiIndex.from_product",
"os.path.exists",
"numpy.ceil",
"dlct.file_name_without_extension_from_path",
"nnet.predict.setup_pose_prediction",
"nnet.predict.extract_cnn_output",
"os.path.join",
"dlct.replace_extension",
"os.path.split",
"os.path.dirname",
... | [((493, 518), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (508, 518), False, 'import os\n'), ((547, 583), 'os.path.dirname', 'os.path.dirname', (['path_to_this_script'], {}), '(path_to_this_script)\n', (562, 583), False, 'import os\n'), ((602, 648), 'os.path.join', 'os.path.join', (['path_to_delectable_folder', '"""dlc"""'], {}), "(path_to_delectable_folder, 'dlc')\n", (614, 648), False, 'import os\n'), ((2437, 2465), 'os.path.abspath', 'os.path.abspath', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2452, 2465), False, 'import os\n'), ((2484, 2512), 'os.path.abspath', 'os.path.abspath', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (2499, 2512), False, 'import os\n'), ((2797, 2853), 'os.path.join', 'os.path.join', (['model_folder_path', 'configuration_file_name'], {}), '(model_folder_path, configuration_file_name)\n', (2809, 2853), False, 'import os\n'), ((3089, 3142), 'dlct.load_configuration_file', 'dlct.load_configuration_file', (['configuration_file_path'], {}), '(configuration_file_path)\n', (3117, 3142), False, 'import dlct\n'), ((5254, 5314), 'os.path.join', 'os.path.join', (['modelfolder', '"""train"""', 'Snapshots[snapshotindex]'], {}), "(modelfolder, 'train', Snapshots[snapshotindex])\n", (5266, 5314), False, 'import os\n'), ((5589, 5649), 'os.path.join', 'os.path.join', (['modelfolder', '"""train"""', 'Snapshots[snapshotindex]'], {}), "(modelfolder, 'train', Snapshots[snapshotindex])\n", (5601, 5649), False, 'import os\n'), ((5675, 5709), 'nnet.predict.setup_pose_prediction', 'predict.setup_pose_prediction', (['cfg'], {}), '(cfg)\n', (5704, 5709), False, 'from nnet import predict\n'), ((5720, 5854), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[[scorer], cfg['all_joints_names'], ['x', 'y', 'likelihood']]"], {'names': "['scorer', 'bodyparts', 'coords']"}), "([[scorer], cfg['all_joints_names'], ['x', 'y',\n 'likelihood']], names=['scorer', 'bodyparts', 'coords'])\n", (5746, 5854), True, 'import pandas as pd\n'), ((6323, 6343), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['video'], {}), '(video)\n', (6336, 6343), False, 'from moviepy.editor import VideoFileClip\n'), ((7099, 7110), 'time.time', 'time.time', ([], {}), '()\n', (7108, 7110), False, 'import time\n'), ((8292, 8303), 'time.time', 'time.time', ([], {}), '()\n', (8301, 8303), False, 'import time\n'), ((665, 713), 'os.path.join', 'os.path.join', (['dlc_folder_path', '"""pose-tensorflow"""'], {}), "(dlc_folder_path, 'pose-tensorflow')\n", (677, 713), False, 'import os\n'), ((731, 789), 'os.path.join', 'os.path.join', (['dlc_folder_path', '"""Generating_a_Training_Set"""'], {}), "(dlc_folder_path, 'Generating_a_Training_Set')\n", (743, 789), False, 'import os\n'), ((1698, 1741), 'nnet.predict.extract_cnn_output', 'predict.extract_cnn_output', (['outputs_np', 'cfg'], {}), '(outputs_np, cfg)\n', (1724, 1741), False, 'from nnet import predict\n'), ((1753, 1807), 'nnet.predict.argmax_pose_predict', 'predict.argmax_pose_predict', (['scmap', 'locref', 'cfg.stride'], {}), '(scmap, locref, cfg.stride)\n', (1780, 1807), False, 'from nnet import predict\n'), ((2009, 2070), 'dlct.file_name_without_extension_from_path', 'dlct.file_name_without_extension_from_path', (['model_folder_path'], {}), '(model_folder_path)\n', (2051, 2070), False, 'import dlct\n'), ((2174, 2233), 'dlct.file_name_without_extension_from_path', 'dlct.file_name_without_extension_from_path', (['video_file_path'], {}), '(video_file_path)\n', (2216, 2233), False, 'import dlct\n'), ((2336, 2386), 'os.path.join', 'os.path.join', (['output_folder_path', 'output_file_name'], {}), '(output_folder_path, output_file_name)\n', (2348, 2386), False, 'import os\n'), ((2559, 2587), 'os.path.abspath', 'os.path.abspath', (['sys.argv[3]'], {}), '(sys.argv[3])\n', (2574, 2587), False, 'import os\n'), ((2892, 2931), 'os.path.exists', 'os.path.exists', (['configuration_file_path'], {}), '(configuration_file_path)\n', (2906, 2931), False, 'import os\n'), ((3016, 3072), 'os.path.join', 'os.path.join', (['model_folder_path', 'configuration_file_name'], {}), '(model_folder_path, configuration_file_name)\n', (3028, 3072), False, 'import os\n'), ((4511, 4561), 'os.path.join', 'os.path.join', (['modelfolder', '"""test"""', '"""pose_cfg.yaml"""'], {}), "(modelfolder, 'test', 'pose_cfg.yaml')\n", (4523, 4561), False, 'import os\n'), ((8838, 8886), 'dlct.replace_extension', 'dlct.replace_extension', (['output_file_path', '""".csv"""'], {}), "(output_file_path, '.csv')\n", (8860, 8886), False, 'import dlct\n'), ((1579, 1608), 'skimage.color.gray2rgb', 'skimage.color.gray2rgb', (['image'], {}), '(image)\n', (1601, 1608), False, 'import skimage\n'), ((2100, 2130), 'os.path.split', 'os.path.split', (['video_file_path'], {}), '(video_file_path)\n', (2113, 2130), False, 'import os\n'), ((2687, 2698), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2696, 2698), False, 'import os\n'), ((6516, 6549), 'numpy.ceil', 'np.ceil', (['(clip.duration * clip.fps)'], {}), '(clip.duration * clip.fps)\n', (6523, 6549), True, 'import numpy as np\n'), ((4840, 4874), 'os.path.join', 'os.path.join', (['modelfolder', '"""train"""'], {}), "(modelfolder, 'train')\n", (4852, 4874), False, 'import os\n')] |
#!/usr/bin/python
# make code as python 3 compatible as possible
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import ast
import json
import logging
import sys
from io import BytesIO
import numpy
LOGGER = logging.getLogger()
def get_names(expr):
tree = ast.parse(expr)
return get_names_rec(tree)
def union(sets):
sets = list(sets)
return set.union(*sets) if sets else set()
def get_names_rec(node):
if isinstance(node, ast.Call):
arg_names = map(get_names, node.args)
return get_names_rec(node.func) | union(arg_names)
elif isinstance(node, ast.GeneratorExp):
return set()
elif isinstance(node, ast.Lambda):
# this is incorrect
return set()
elif isinstance(node, ast.Module):
return union(map(get_names_rec, node.body))
elif isinstance(node, ast.Expr):
return get_names_rec(node.value)
elif isinstance(node, ast.Assign):
return get_names_rec(node.value)
elif isinstance(node, ast.Index):
return get_names_rec(node.value)
elif isinstance(node, ast.Subscript):
return get_names_rec(node.value) | get_names_rec(node.slice)
elif isinstance(node, ast.BinOp):
return get_names_rec(node.left) | get_names_rec(node.right)
elif isinstance(node, ast.Compare):
return get_names_rec(node.left) | union(map(get_names_rec, node.comparators))
elif isinstance(node, ast.UnaryOp):
LOGGER.debug(dir(node.operand))
return get_names_rec(node.operand)
elif isinstance(node, ast.Str):
return set()
elif isinstance(node, ast.Name):
return set([node.id])
elif isinstance(node, ast.Attribute):
return get_names_rec(node.value)
elif isinstance(node, ast.Num):
return set()
elif isinstance(node, ast.Tuple):
return union(map(get_names_rec, node.elts))
elif isinstance(node, ast.List):
return union(map(get_names_rec, node.elts))
elif isinstance(node, ast.comprehension):
return (union(map(get_names_rec, node.ifs)) | get_names_rec(node.iter) | get_names_rec(node.target))
elif isinstance(node, ast.ListComp):
return get_names_rec(node.elt) | union(map(get_names_rec, node.generators))
elif isinstance(node, ast.Slice):
children = (node.lower, node.step, node.upper)
true_children = [x for x in children if x is not None]
return union(map(get_names_rec, true_children)) if true_children else set()
elif isinstance(node, ast.ExtSlice):
return union(map(get_names_rec, node.dims)) if node.dims else set()
else:
raise ValueError(node) #pragma: no cover
def uses_stdin(expr):
names = get_names(expr)
return 'd' in names or 'data' in names
def build_parser():
parser = argparse.ArgumentParser(prog='npcli', description='Interact with numpy from the command line')
parser.add_argument('expr', type=str, help='Expression involving d, a numpy array')
parser.add_argument(
'--expr',
'-e',
type=str,
help='Expression involving d, a numpy array. Multipe expressions get chained',
dest='more_expressions',
action='append',
metavar='EXPR')
parser.add_argument('--code', action='store_true', default=False, help='Produce python code rather than running')
parser.add_argument('--debug', action='store_true', help='Print debug output')
parser.add_argument('data_sources', type=str, nargs='*', help='Files to read data from. Stored in d1, d2 etc')
parser.add_argument('--input-format', '-I', help='Dtype of the data read in', choices=data_input(), default='default')
parser.add_argument('--kitchen-sink', '-K', action='store_true', help='Import a lot of useful things into the execution scope')
parser.add_argument('--name', '-N', nargs=2, action='append', type=str, help='A named data source')
format_group = parser.add_mutually_exclusive_group()
format_group.add_argument(
'--output-format',
'-O',
type=str,
help='Output data in this format. "str" for a string, "json" for json, "json-pretty" for pretty printed json with sorted keys')
format_group.add_argument('--raw', action='store_true', help='Result is a string that should be written to standard out')
format_group.add_argument('--repr', '-D', action='store_true', help='Output a repr of the result. Often used for _D_ebug')
format_group.add_argument('--no-result', '-n', action='store_true', help="Discard result")
parser.add_argument(
'--module', '-m',
action='append',
help='Result is a string that should be written to standard out')
parser.add_argument('-f', metavar='data_source', action='append', dest='flag_data_sources')
return parser
def parse_named_source(format, name_and_source):
(name, source) = name_and_source
with open(source) as stream:
data = read_data(format, stream)
return (name, data)
def run(stdin_stream, args):
parser = build_parser()
args = parser.parse_args(args)
args.module = args.module or []
if args.debug:
logging.basicConfig(level=logging.DEBUG) # pragma: no cover
module_dict = dict()
for m in args.module:
module_dict.update(**imp(m))
if args.kitchen_sink:
# Lazy because these may not be installed
import pandas
import pylab
import pandas
for x in [pandas, numpy, pylab]:
module_dict.update(imp_all(x))
module_dict['numpy'] = numpy
module_dict['pylab'] = pylab
module_dict['pandas'] = pandas
module_dict['pd'] = pandas
module_dict['np'] = numpy
module_dict['numpy'] = numpy
LOGGER.debug('Module dict: %r', module_dict)
context = module_dict.copy()
expressions = ([args.expr] if args.expr else []) + (args.more_expressions or [])
if not expressions:
raise ValueError('No expressions') # pragma: no cover
if args.data_sources and args.flag_data_sources:
raise ValueError("Either use -f for every source or None")
if args.flag_data_sources:
args.data_sources = args.flag_data_sources
if args.code:
# Lazy import because this is big
import autopep8
program = '\n'.join(expressions) + '\n'
if sys.version_info[0] == 3:
result = autopep8.fix_code(program).encode('utf8')
else:
result = program
return result,
if uses_stdin(expressions[0]):
data = read_data(args.input_format, stdin_stream)
context.update(data=data, d=data)
else:
LOGGER.debug('takes no data')
for index, source in enumerate(args.data_sources):
with open(source) as stream:
data = read_data(args.input_format, stream)
name1 = 'd{}'.format(index + 1)
name2 = 'data{}'.format(index + 1)
context.update({name1: data, name2: data})
named_sources = dict([parse_named_source(args.input_format, name_spec) for name_spec in args.name]) if args.name is not None else dict()
context.update(named_sources)
LOGGER.debug('context: %r', module_dict)
for expr in expressions:
context['d'] = multiline_eval(expr, context)
result = context['d']
if isinstance(result, (float, int, numpy.number)):
result = numpy.array([result])
try:
LOGGER.debug('Result length: %f ', len(result))
except TypeError:
LOGGER.debug('Result has no length length: %r! ', result)
if args.no_result:
return tuple()
elif args.raw:
return (result,)
elif args.output_format:
if args.output_format == "str":
return result
elif args.output_format == "json":
return json.dumps(result)
elif args.output_format == "json-pretty":
return json.dumps(result, indent=4, sort_keys=True)
else:
return (numpy.array(result, dtype=args.output_format),)
elif args.repr:
return (repr(result).encode('utf8'),)
else:
output = BytesIO()
numpy.savetxt(output, result, fmt=b'%s')
return (output.getvalue(),)
def main():
for part in run(sys.stdin, sys.argv[1:]):
sys.stdout.write(part)
sys.stdout.flush()
def multiline_eval(expr, context):
"Evaluate several lines of input, returning the result of the last line"
tree = ast.parse(expr)
is_eval = isinstance(tree.body[-1], ast.Expr)
eval_expr = ast.Expression(tree.body[-1].value)
exec_expr = ast.Module(tree.body[:-1])
if is_eval:
final_eval_expr = ast.Expression(tree.body[-1].value)
else:
final_exec_expr = ast.Module([tree.body[-1]])
exec(compile(exec_expr, 'file', 'exec'), context) #pylint: disable=exec-used
if is_eval:
return eval(compile(eval_expr, 'file', 'eval'), context) #pylint: disable=eval-used
else:
exec(compile(final_exec_expr, 'file', 'exec'), context) #pylint: disable=exec-used
return None
def maybe_float(x):
try:
return float(x)
except ValueError:
return x
def data_input():
return dict(
lines=lambda stream: [x.decode('utf8').strip('\n') for x in stream.readlines()],
str=lambda x: x.read(),
json=lambda x: json.loads(x.read()),
csv=lambda x: numpy.genfromtxt(x, delimiter=','),
pandas=read_pandas_csv,
pandas_json=read_pandas_json,
default=read_default,
eval=lambda x: eval(x.read())
)
def read_pandas_json(stream):
import pandas.io.json
return pandas.io.json.read_json(stream)
def read_pandas_csv(stream):
import pandas
return pandas.DataFrame.from_csv(stream)
def read_data(input_format, stream):
return data_input()[input_format](stream)
def read_default(stream):
data = numpy.array([list(map(maybe_float, line.split())) for line in stream.read().splitlines()])
if len(data.shape) > 1 and data.shape[1] == 1:
# Treat a stream of numbers a 1-D array
data = data.flatten()
LOGGER.debug('Data length: %s ', len(data))
if hasattr(data, 'shape'):
LOGGER.debug('Data shape: %s ', data.shape)
else:
LOGGER.debug('Data shape: None')
LOGGER.debug('data: %r', data)
return data
def imp(s):
name = s.split('.')[0]
return {name: __import__(s)}
def imp_all(s):
if isinstance(s, str):
name = s.split('.')[0]
obj = __import__(s)
for x in name[1:]:
obj = getattr(obj, x)
else:
obj = s
if hasattr(obj, '__all__'):
return dict((k, getattr(obj, k)) for k in obj.__all__)
else:
return dict(vars(obj))
| [
"logging.getLogger",
"logging.basicConfig",
"pandas.io.json.read_json",
"argparse.ArgumentParser",
"pandas.DataFrame.from_csv",
"io.BytesIO",
"json.dumps",
"ast.Module",
"numpy.array",
"autopep8.fix_code",
"numpy.savetxt",
"ast.parse",
"sys.stdout.flush",
"numpy.genfromtxt",
"ast.Express... | [((288, 307), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (305, 307), False, 'import logging\n'), ((341, 356), 'ast.parse', 'ast.parse', (['expr'], {}), '(expr)\n', (350, 356), False, 'import ast\n'), ((2857, 2956), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""npcli"""', 'description': '"""Interact with numpy from the command line"""'}), "(prog='npcli', description=\n 'Interact with numpy from the command line')\n", (2880, 2956), False, 'import argparse\n'), ((8503, 8518), 'ast.parse', 'ast.parse', (['expr'], {}), '(expr)\n', (8512, 8518), False, 'import ast\n'), ((8586, 8621), 'ast.Expression', 'ast.Expression', (['tree.body[-1].value'], {}), '(tree.body[-1].value)\n', (8600, 8621), False, 'import ast\n'), ((8638, 8664), 'ast.Module', 'ast.Module', (['tree.body[:-1]'], {}), '(tree.body[:-1])\n', (8648, 8664), False, 'import ast\n'), ((9687, 9719), 'pandas.io.json.read_json', 'pandas.io.json.read_json', (['stream'], {}), '(stream)\n', (9711, 9719), False, 'import pandas\n'), ((9779, 9812), 'pandas.DataFrame.from_csv', 'pandas.DataFrame.from_csv', (['stream'], {}), '(stream)\n', (9804, 9812), False, 'import pandas\n'), ((5203, 5243), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (5222, 5243), False, 'import logging\n'), ((7435, 7456), 'numpy.array', 'numpy.array', (['[result]'], {}), '([result])\n', (7446, 7456), False, 'import numpy\n'), ((8329, 8351), 'sys.stdout.write', 'sys.stdout.write', (['part'], {}), '(part)\n', (8345, 8351), False, 'import sys\n'), ((8360, 8378), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8376, 8378), False, 'import sys\n'), ((8708, 8743), 'ast.Expression', 'ast.Expression', (['tree.body[-1].value'], {}), '(tree.body[-1].value)\n', (8722, 8743), False, 'import ast\n'), ((8780, 8807), 'ast.Module', 'ast.Module', (['[tree.body[-1]]'], {}), '([tree.body[-1]])\n', (8790, 8807), False, 'import ast\n'), ((9439, 9473), 'numpy.genfromtxt', 'numpy.genfromtxt', (['x'], {'delimiter': '""","""'}), "(x, delimiter=',')\n", (9455, 9473), False, 'import numpy\n'), ((6443, 6469), 'autopep8.fix_code', 'autopep8.fix_code', (['program'], {}), '(program)\n', (6460, 6469), False, 'import autopep8\n'), ((8167, 8176), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (8174, 8176), False, 'from io import BytesIO\n'), ((8185, 8225), 'numpy.savetxt', 'numpy.savetxt', (['output', 'result'], {'fmt': "b'%s'"}), "(output, result, fmt=b'%s')\n", (8198, 8225), False, 'import numpy\n'), ((7859, 7877), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (7869, 7877), False, 'import json\n'), ((7947, 7991), 'json.dumps', 'json.dumps', (['result'], {'indent': '(4)', 'sort_keys': '(True)'}), '(result, indent=4, sort_keys=True)\n', (7957, 7991), False, 'import json\n'), ((8026, 8071), 'numpy.array', 'numpy.array', (['result'], {'dtype': 'args.output_format'}), '(result, dtype=args.output_format)\n', (8037, 8071), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
uniform_var = np.random.uniform(-5,5,100)
target_val = 0.1 * (uniform_var**3) + 3
noise = np.random.normal(size=100)
noisy_obs = target_val + noise
# plotting
fig_noisy_data = plt.figure()
plot = fig_noisy_data.add_subplot(111)
plot.scatter(uniform_var,target_val,c='blue')
plot.scatter(uniform_var,noisy_obs,c='red')
fig_noisy_data.show()
| [
"numpy.random.normal",
"matplotlib.pyplot.figure",
"numpy.random.uniform"
] | [((112, 141), 'numpy.random.uniform', 'np.random.uniform', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (129, 141), True, 'import numpy as np\n'), ((188, 214), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (204, 214), True, 'import numpy as np\n'), ((274, 286), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (284, 286), True, 'import matplotlib.pyplot as plt\n')] |
"""
Functionality to perform inference. Acts as runner between image queue and
GPU cluster containing trained models.
inference_runner.py
"""
import os
import os.path as op
import time
import base64
import json
import tempfile
from io import BytesIO
from functools import partial
import logging
import requests
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from google.cloud import pubsub_v1
from google.oauth2 import service_account
from skimage.transform import rescale, resize
from PIL import Image as PIL_Image
import rasterio
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from absl import app, logging, flags
from osgeo import gdal
from divdet.inference.utils_inference import (
iter_grouper, get_slice_bounds, windowed_reads_numpy,
calculate_region_grad, calculate_shape_props,
poly_non_max_suppression, convert_mask_to_polygon,
geospatial_polygon_transform)
from divdet.surface_feature import Crater, Image, Base
flags.DEFINE_string('gcp_project', None, 'Google cloud project ID.')
flags.DEFINE_string('pubsub_subscription_name', None, 'Google cloud pubsub subscription name for queue.')
flags.DEFINE_string('database_uri', None, 'Address of database to store prediction results.')
flags.DEFINE_integer('max_outstanding_messages', 1, 'Number of messages to have in local backlog.')
flags.DEFINE_string('service_account_fpath', None, 'Filepath to service account with pubsub access.')
FLAGS = flags.FLAGS
# Known errors that occur during image download step
overload_errors = ['<urlopen error [Errno 60] ETIMEDOUT>',
'<urlopen error [Errno 60] Operation timed out>',
'<urlopen error [Errno 2] Lookup timed out>',
'<urlopen error [Errno 8] Name or service not known>']
def download_url_to_file(url, directory='/tmp', clobber=False, repeat_tries=5,
chunk_size=2097152):
"""Download a url to disk
Parameters
----------
url: string
URL to file to be downloaded
directory: string
Local directory to save file to.
clobber: bool
If file exists, should program overwrite it. Default is False
repeat_tries: int
Number of times to retry the URL download. Default is 5
chunk_size: int
Size of the download chunk size in bytes. Default is 10Mb
Returns
-------
save_fpath: string
Filepath to downloaded file on disk.
"""
save_fpath = op.join(directory, url.split('/')[-1])
if op.exists(save_fpath) and not clobber:
raise ValueError(f'File exists at {save_fpath}')
# Use `stream=True` for large files
with requests.get(url, stream=True, allow_redirects=True, timeout=10) as req:
try:
req.raise_for_status()
with open(save_fpath, 'wb') as write_file:
pbar = tqdm(total=int(req.headers['Content-Length']))
for chunk in req.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
write_file.write(chunk)
pbar.update(len(chunk))
logging.info(f'Downloaded file from URL: {url}')
return save_fpath
# Handle some known exceptions
except requests.exceptions.HTTPError as http_e:
logging.error(f'HTTPError: {http_e}')
except requests.exceptions.InvalidURL as url_e:
logging.error(f'\nReceived invalid url error {url_e}')
if str(url_e) in overload_errors:
logging.error('Known load error, retrying')
except Exception as err:
logging.error(f'Other error on {url}: {err}')
# If download was unsuccessful, retry a few times
repeat_tries -= 1
if repeat_tries == 0:
logging.info(f'Too many repeats, stopping on {url}')
if op.exists(save_fpath):
os.remove(save_fpath)
def arr_to_b64(numpy_arr, ensure_RGB=True):
"""Convert a numpy array into a b64 string"""
# Generate image in bytes format; will convert using `tobytes` if not contiguous
image_pil = PIL_Image.fromarray(numpy_arr)
if ensure_RGB:
image_pil = image_pil.convert('RGB')
byte_io = BytesIO()
image_pil.save(byte_io, format='PNG')
# Convert to base64
b64_image = base64.b64encode(byte_io.getvalue()).decode('utf-8')
# Web-safe encoding (haven't had much luck with this yet -- fails to decode)
#b64_image = b64_image.replace('+', '-')
#b64_image = b64_image.replace('/', '_')
#b64_image = base64.urlsafe_b64encode(byte_io.getvalue()).decode("utf-8")
byte_io.close()
return b64_image
def pred_generator(generator, endpoint):
"""Send a data from a generator to an inference endpoint"""
###################################
# Create a batch of data to predict
for image_dict in tqdm(generator, desc='Making inference requests.'):
b64_image = arr_to_b64(image_dict['image_data'])
#XXX KF Serving example here:
# https://github.com/kubeflow/kfserving/blob/master/docs/samples/tensorflow/input.json
instances = [{'b64': b64_image}]
################
# Run prediction
payload = json.dumps({"inputs": {"input_tensor": instances}}) # TF "col" format
resp = requests.post(endpoint, data=payload)
resp_json = json.loads(resp.content)
if 'outputs' in resp_json.keys():
resp_outputs = resp_json['outputs']
else:
logging.error(f'Error in prediction step. Raw json: {resp_json}')
#############################
# Store and return prediction
# Only keep prediction indicies with confidences > 0 (some may have been filtered by OD API config)
n_good_inds = int(np.sum(np.array(resp_outputs['detection_scores'][0]) > 0))
image_dict['detection_scores'] = resp_outputs['detection_scores'][0][:n_good_inds] # Probability each proposal corresponds to an object
image_dict['detection_masks'] = resp_outputs['detection_masks'][0][:n_good_inds] # Mask for each proposal. Needs to be resized from (33, 33)
image_dict['proposal_boxes'] = resp_outputs['proposal_boxes'][0][:n_good_inds] # Box coordinates for each object in orig image coords
yield image_dict
def pred_generator_batched(generator, endpoint, batch_size=1):
"""Send a data from a generator to an inference endpoint"""
###################################
# Create a batch of data to predict
for image_batch in tqdm(iter_grouper(generator, batch_size)):
pred_batch = [] # List to hold image metadata, prediction information
instances = [] # List that will hold b64 images to be sent to endpoint
for image_dict in image_batch:
if image_dict is None:
continue
b64_image = arr_to_b64(image_dict['image_data'])
#b64_image = arr_to_b64(image_dict.pop('image_data'))
pred_batch.append(image_dict)
#XXX KF Serving example here:
# https://github.com/kubeflow/kfserving/blob/master/docs/samples/tensorflow/input.json
instances.append({"b64": b64_image})
################
# Run prediction
#payload = json.dumps({"instances": instances}) # TF "row" format
payload = json.dumps({"inputs": {"input_tensor": instances}}) # TF "col" format
# Sometimes, the prediction endpoint chokes. Allow for multiple retries
retries = 5
while retries:
try:
resp = requests.post(endpoint, data=payload, timeout=10)
resp_outputs = json.loads(resp.content)['outputs']
break
except Exception as e:
retries -= 1
if retries == 0:
logging.error('Problem in prediction step.')
if resp:
del resp
raise e
time.sleep(1)
#############################
# Store and return prediction
for pi, pred_dict in enumerate(pred_batch):
n_good_inds = int(np.sum(np.array(resp_outputs['detection_scores'][pi]) > 0)) # Throw out any 0-confidence predictions
pred_dict['detection_scores'] = resp_outputs['detection_scores'][pi][:n_good_inds] # Probability each proposal corresponds to an object
pred_dict['detection_masks'] = resp_outputs['detection_masks'][pi][:n_good_inds] # Mask for each proposal. Needs to be resized from (33, 33)
pred_dict['proposal_boxes'] = resp_outputs['proposal_boxes'][pi][:n_good_inds] # Box coordinates for each object in orig image coords
yield pred_batch
def proc_message_debug(message, session, endpoint=None):
logging.info('\nReceived message: {}'.format(message))
message.ack()
def proc_message(message, session):
"""Callback to process an image"""
start_time = time.time()
logging.info('\nReceived message: {}'.format(message.data))
msg_dict = dict(message.attributes)
if not message.attributes['scales']:
msg_dict['scales'] = [1]
else:
msg_dict['scales'] = json.loads(msg_dict['scales'])
# Check if image predictions already exists:
skip_run_check = session.query(Image.id).\
filter(Image.pds_id == msg_dict['pds_id']).first()
if skip_run_check:
logging.info(f'Image {msg_dict["pds_id"]} already exists in DB. Acknowledging message and skipping.')
message.ack()
return
msg_dict['window_size'] = int(msg_dict['window_size'])
msg_dict['min_window_overlap'] = int(msg_dict['min_window_overlap'])
msg_dict['center_latitude'] = float(msg_dict['center_latitude'])
msg_dict['center_longitude'] = float(msg_dict['center_longitude'])
msg_dict['sub_solar_azimuth'] = float(msg_dict['sub_solar_azimuth'])
msg_dict['batch_size'] = int(msg_dict['batch_size'])
# Use temp directory so everything is deleted after image processing completes
with tempfile.TemporaryDirectory() as tmp_dir:
##############################################
# Download data and reproject if needed
##############################################
# TODO: Move back to more generic url key name
logging.info('\nDownloading image: {}'.format(msg_dict['url']))
image_fpath_orig = download_url_to_file(msg_dict['url'],
directory=tmp_dir)
if image_fpath_orig is None:
logging.error('Image download errored.')
message.nack()
return
try:
message.modify_ack_deadline(600)
logging.info('Updating acknowledgement deadline.')
except:
logging.info('Mod to ack deadline failed. Skipping')
'''
if msg_dict['projection_url']:
logging.info('Downloading projection.')
proj_fpath = download_url_to_file(msg_dict['projection_url'],
directory=tmp_dir)
'''
if msg_dict['center_reproject'] == 'True':
image_fpath = op.splitext(image_fpath_orig)[0] + '_warp' + \
op.splitext(image_fpath_orig)[1]
logging.info(f'Reprojecting image and saving to {image_fpath}.')
with rasterio.open(image_fpath_orig) as temp_img:
#center_lat = temp_img.lnglat()[1] # Raster center
center_lat = msg_dict['center_latitude']
if msg_dict['instrument_host_id'] == 'CTX':
# Mars projection
eqc_proj = f'+proj=eqc +lat_ts={center_lat} +lat_0=0 +lon_0=180 +x_0=0 +y_0=0 +a=3396190 +b=3396190 +units=m +no_defs'
gdal.Warp(image_fpath, image_fpath_orig, dstSRS=eqc_proj)
logging.info('Reprojection complete.')
elif msg_dict['instrument_host_id'] == 'LRO':
# Moon projection
# Could also match the latitude to the images center here
eqc_proj = '+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=180 +x_0=0 +y_0=0 +a=1737400 +b=1737400 +units=m +no_defs'
gdal.Warp(image_fpath, image_fpath_orig, dstSRS=eqc_proj)
logging.info('Reprojection complete.')
else:
image_fpath = image_fpath_orig
###########################################################
# Slice image appropriately and pass to prediction endpoint
###########################################################
preds = {'detection_scores': [], 'detection_masks': [], 'proposal_boxes': [],
'polygons': [], 'resized_masks': []}
#slices = []
with rasterio.open(image_fpath) as dataset:
image = dataset.read(1) # Read data from first band
if image.dtype == np.uint16:
image = (image / 65535. * 255).astype(np.uint8)
if dataset.transform == rasterio.Affine.identity():
affine_transform = rasterio.Affine(1, 0, 0, 0, -1, 0) # If not mapped, use this transform for correct matching of image/crater coords
else:
affine_transform = dataset.transform
for scale in msg_dict['scales']:
logging.info('Processing at scale %s', scale)
try:
message.modify_ack_deadline(600)
logging.info('Updated acknowledgement deadline.')
except:
logging.info('Mod to ack deadline failed. Skipping')
# Rescale image, round, and convert back to orig datatype
scaled_image = rescale(image, scale, mode='edge', preserve_range=True,
anti_aliasing=True).round().astype(image.dtype)
# Calculate slice bounds and create generator
slice_bounds = get_slice_bounds(
scaled_image.shape,
slice_size=(msg_dict['window_size'], msg_dict['window_size']),
min_window_overlap=(msg_dict['min_window_overlap'],
msg_dict['min_window_overlap']))
if not slice_bounds:
continue
logging.info('Created %s slices. Running predictions at scale %s',
(len(slice_bounds)), scale)
slice_batch = windowed_reads_numpy(scaled_image, slice_bounds)
# Generate predictions. Use batched prediction if desired
if msg_dict['batch_size'] > 1:
pred_gen = pred_generator_batched(slice_batch,
msg_dict['prediction_endpoint'],
msg_dict['batch_size'])
start_inf_time = time.time()
pred_batch = [item for sublist in pred_gen for item in sublist]
elapsed_time = time.time() - start_inf_time
avg_sec = elapsed_time / len(pred_batch)
logging.info(f'Avg image processing ({len(pred_batch)} images): {avg_sec:0.3f}s')
else:
pred_gen = pred_generator(slice_batch,
msg_dict['prediction_endpoint'])
pred_batch = list(pred_gen)
logging.info('Crater predictions at scale %s complete.', (scale))
# Convert predictions to polygon in orig image coordinate frame
for pred_set, slice_set in tqdm(zip(pred_batch, slice_bounds),
desc='\tConvert slice pred batch masks to polygons'):
y_offset_img = np.int(slice_set[0] / scale)
x_offset_img = np.int(slice_set[1] / scale)
pred_set['polygons'] = []
pred_set['resized_masks'] = []
new_proposal_boxes = []
for mask, box in zip(pred_set['detection_masks'], pred_set['proposal_boxes']):
# Get width/height of image and compute offset of slice
width = np.int(np.max((np.around((box[3] - box[1]) / scale), 1)))
height = np.int(np.max((np.around((box[2] - box[0]) / scale), 1)))
x_offset_box = box[1] / scale
y_offset_box = box[0] / scale
x_offset = x_offset_img + x_offset_box
y_offset = y_offset_img + y_offset_box
box = [y_offset, x_offset, y_offset+height, x_offset+width] # Update proposal_boxes
new_proposal_boxes.append(box)
# Don't resize if scale is 1
if scale != 1:
mask_resized = resize(np.array(mask), (height, width),
mode='edge', anti_aliasing=True)
else:
mask_resized = np.array(mask)
mask_binary = mask_resized > 0.5 # Must be binary
pred_set['resized_masks'].append(mask_binary.astype(np.int))
# Generate polygon (with geospatial offset) from mask
mask_poly = convert_mask_to_polygon(mask_binary, (x_offset, y_offset))
pred_set['polygons'].append(mask_poly) # polygon in whole-image pixel coordinates
pred_set['proposal_boxes'] = new_proposal_boxes
for key in ['detection_scores', 'detection_masks',
'proposal_boxes', 'polygons', 'resized_masks']:
preds[key].extend(pred_set[key])
logging.info(f'Finished processing at scale {scale}')
###########################
# Run non-max suppression to remove duplicates in multiple scales of one image
logging.info(f"Found {len(preds['polygons'])} polygon predictions. Starting bbox-based NMS.")
if len(preds['polygons']):
# Non-max suppression for bounding boxes only
selected_inds = tf.image.non_max_suppression(preds['proposal_boxes'],
preds['detection_scores'],
iou_threshold=0.2,
max_output_size=len(preds['detection_scores'])).numpy()
# Select data from TF Serving column format
for key in ['detection_scores', 'detection_masks', 'proposal_boxes', 'resized_masks']:
preds[key] = [preds[key][ind] for ind in selected_inds]
# Convert polygons from pixel coords to geospatial coords
preds['polygons'] = [geospatial_polygon_transform(preds['polygons'][ind], affine_transform)
for ind in selected_inds]
logging.info(f"After NMS, {len(preds['polygons'])} predictions remain.")
try:
message.modify_ack_deadline(600)
logging.info('Updating acknowledgement deadline.')
except:
logging.info('Mod to ack deadline failed. Skipping')
###########################
# Save image and craters to DB
logging.info("Inserting %s polygon predictions.", len(preds['polygons']))
# Save image first to get the image ID
image_obj = Image(lon=msg_dict['center_longitude'],
lat=msg_dict['center_latitude'],
instrument_host_id=msg_dict.get('instrument_host_id', 'None'),
instrument_id=msg_dict['instrument_id'],
pds_id=msg_dict['pds_id'],
pds_version_id=msg_dict.get('pds_version_id', 'None'),
sub_solar_azimuth=msg_dict['sub_solar_azimuth'])
session.add(image_obj)
session.commit()
# Loop over predicted craters, determine properties, and store
for pi in range(len(preds['detection_scores'])):
# Resize mask to original prediction bbox dimensions
# TODO: will need to bring along original image for gradient calcs
#grad_h, grad_v = calculate_region_grad(preds['resized_masks'][pi].astype(np.bool))
shape_props = calculate_shape_props(preds['resized_masks'][pi])
export_geom = preds['polygons'][pi].ExportToWkt()
session.add(Crater(geometry=export_geom,
confidence=preds['detection_scores'][pi],
eccentricity=shape_props['eccentricity'],
gradient_angle=-1,
image_id=image_obj.id))
session.commit()
message.ack()
elapsed_time = time.time() - start_time
logging.info('***Processing complete *** %s', msg_dict["url"])
logging.info('Total processing time: %s',
time.strftime('%H:%M:%S', time.gmtime(elapsed_time)))
def _get_session(db_uri, use_batch_mode=True, echo=False):
"""Helper to get an SQLAlchemy DB session"""
# `use_batch_mode` is experimental currently, but needed for `executemany`
#engine = create_engine(db_uri, use_batch_mode=use_batch_mode, echo=echo)
engine = create_engine(db_uri, echo=echo)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
try:
connection = session.connection()
logging.info('Successfully connected to database.')
except:
raise RuntimeError(f'Couldn\'t connect to db: {db_uri}')
return session
def main(_):
# Get database connection
time.sleep(10) # Wait a bit to give DB proxy container time to start
# Setup pubsub subscriber client
if FLAGS.service_account_fpath:
credentials = service_account.Credentials.from_service_account_file(
FLAGS.service_account_fpath,
scopes=["https://www.googleapis.com/auth/cloud-platform"])
subscriber = pubsub_v1.SubscriberClient(credentials=credentials)
else:
subscriber = pubsub_v1.SubscriberClient()
# Set relevant subscription params
subscription_path = subscriber.subscription_path(FLAGS.gcp_project,
FLAGS.pubsub_subscription_name)
# Add flow control to keep from downloading too many messages at once
flow_control = pubsub_v1.types.FlowControl(
max_messages=FLAGS.max_outstanding_messages)
# Get DB session
session = _get_session(FLAGS.database_uri)
# Set callback for processing each message and begin pulling messages
callback = partial(proc_message, session=session)
streaming_pull_future = subscriber.subscribe(subscription_path,
callback=callback,
flow_control=flow_control)
with subscriber:
try:
streaming_pull_future.result()
except TimeoutError:
streaming_pull_future.cancel()
logging.info('Timeout error on {subscription_path}')
logging.info('\nFinished inference.')
if __name__ == '__main__':
app.run(main)
| [
"requests.post",
"rasterio.Affine.identity",
"io.BytesIO",
"absl.logging.info",
"time.sleep",
"numpy.array",
"os.remove",
"os.path.exists",
"sqlalchemy.orm.sessionmaker",
"google.oauth2.service_account.Credentials.from_service_account_file",
"osgeo.gdal.Warp",
"divdet.inference.utils_inference... | [((990, 1058), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""Google cloud project ID."""'], {}), "('gcp_project', None, 'Google cloud project ID.')\n", (1009, 1058), False, 'from absl import app, logging, flags\n'), ((1059, 1168), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""pubsub_subscription_name"""', 'None', '"""Google cloud pubsub subscription name for queue."""'], {}), "('pubsub_subscription_name', None,\n 'Google cloud pubsub subscription name for queue.')\n", (1078, 1168), False, 'from absl import app, logging, flags\n'), ((1165, 1262), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""database_uri"""', 'None', '"""Address of database to store prediction results."""'], {}), "('database_uri', None,\n 'Address of database to store prediction results.')\n", (1184, 1262), False, 'from absl import app, logging, flags\n'), ((1259, 1362), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_outstanding_messages"""', '(1)', '"""Number of messages to have in local backlog."""'], {}), "('max_outstanding_messages', 1,\n 'Number of messages to have in local backlog.')\n", (1279, 1362), False, 'from absl import app, logging, flags\n'), ((1359, 1464), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""service_account_fpath"""', 'None', '"""Filepath to service account with pubsub access."""'], {}), "('service_account_fpath', None,\n 'Filepath to service account with pubsub access.')\n", (1378, 1464), False, 'from absl import app, logging, flags\n'), ((4163, 4193), 'PIL.Image.fromarray', 'PIL_Image.fromarray', (['numpy_arr'], {}), '(numpy_arr)\n', (4182, 4193), True, 'from PIL import Image as PIL_Image\n'), ((4273, 4282), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4280, 4282), False, 'from io import BytesIO\n'), ((4921, 4971), 'tqdm.tqdm', 'tqdm', (['generator'], {'desc': '"""Making inference requests."""'}), "(generator, desc='Making inference requests.')\n", (4925, 4971), False, 'from tqdm import tqdm\n'), ((9037, 9048), 'time.time', 'time.time', ([], {}), '()\n', (9046, 9048), False, 'import time\n'), ((21178, 21240), 'absl.logging.info', 'logging.info', (['"""***Processing complete *** %s"""', "msg_dict['url']"], {}), "('***Processing complete *** %s', msg_dict['url'])\n", (21190, 21240), False, 'from absl import app, logging, flags\n'), ((21638, 21670), 'sqlalchemy.create_engine', 'create_engine', (['db_uri'], {'echo': 'echo'}), '(db_uri, echo=echo)\n', (21651, 21670), False, 'from sqlalchemy import create_engine\n'), ((21675, 21707), 'divdet.surface_feature.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (21699, 21707), False, 'from divdet.surface_feature import Crater, Image, Base\n'), ((21722, 21747), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (21734, 21747), False, 'from sqlalchemy.orm import sessionmaker\n'), ((22030, 22044), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (22040, 22044), False, 'import time\n'), ((22786, 22858), 'google.cloud.pubsub_v1.types.FlowControl', 'pubsub_v1.types.FlowControl', ([], {'max_messages': 'FLAGS.max_outstanding_messages'}), '(max_messages=FLAGS.max_outstanding_messages)\n', (22813, 22858), False, 'from google.cloud import pubsub_v1\n'), ((23027, 23065), 'functools.partial', 'partial', (['proc_message'], {'session': 'session'}), '(proc_message, session=session)\n', (23034, 23065), False, 'from functools import partial\n'), ((23497, 23537), 'absl.logging.info', 'logging.info', (['"""\nFinished inference."""'], {}), '("""\nFinished inference.""")\n', (23509, 23537), False, 'from absl import app, logging, flags\n'), ((23568, 23581), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (23575, 23581), False, 'from absl import app, logging, flags\n'), ((2533, 2554), 'os.path.exists', 'op.exists', (['save_fpath'], {}), '(save_fpath)\n', (2542, 2554), True, 'import os.path as op\n'), ((2679, 2743), 'requests.get', 'requests.get', (['url'], {'stream': '(True)', 'allow_redirects': '(True)', 'timeout': '(10)'}), '(url, stream=True, allow_redirects=True, timeout=10)\n', (2691, 2743), False, 'import requests\n'), ((3908, 3929), 'os.path.exists', 'op.exists', (['save_fpath'], {}), '(save_fpath)\n', (3917, 3929), True, 'import os.path as op\n'), ((5279, 5330), 'json.dumps', 'json.dumps', (["{'inputs': {'input_tensor': instances}}"], {}), "({'inputs': {'input_tensor': instances}})\n", (5289, 5330), False, 'import json\n'), ((5366, 5403), 'requests.post', 'requests.post', (['endpoint'], {'data': 'payload'}), '(endpoint, data=payload)\n', (5379, 5403), False, 'import requests\n'), ((5424, 5448), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (5434, 5448), False, 'import json\n'), ((6605, 6640), 'divdet.inference.utils_inference.iter_grouper', 'iter_grouper', (['generator', 'batch_size'], {}), '(generator, batch_size)\n', (6617, 6640), False, 'from divdet.inference.utils_inference import iter_grouper, get_slice_bounds, windowed_reads_numpy, calculate_region_grad, calculate_shape_props, poly_non_max_suppression, convert_mask_to_polygon, geospatial_polygon_transform\n'), ((7411, 7462), 'json.dumps', 'json.dumps', (["{'inputs': {'input_tensor': instances}}"], {}), "({'inputs': {'input_tensor': instances}})\n", (7421, 7462), False, 'import json\n'), ((9267, 9297), 'json.loads', 'json.loads', (["msg_dict['scales']"], {}), "(msg_dict['scales'])\n", (9277, 9297), False, 'import json\n'), ((9502, 9613), 'absl.logging.info', 'logging.info', (['f"""Image {msg_dict[\'pds_id\']} already exists in DB. Acknowledging message and skipping."""'], {}), '(\n f"Image {msg_dict[\'pds_id\']} already exists in DB. Acknowledging message and skipping."\n )\n', (9514, 9613), False, 'from absl import app, logging, flags\n'), ((10137, 10166), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (10164, 10166), False, 'import tempfile\n'), ((21149, 21160), 'time.time', 'time.time', ([], {}), '()\n', (21158, 21160), False, 'import time\n'), ((21832, 21883), 'absl.logging.info', 'logging.info', (['"""Successfully connected to database."""'], {}), "('Successfully connected to database.')\n", (21844, 21883), False, 'from absl import app, logging, flags\n'), ((22196, 22347), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['FLAGS.service_account_fpath'], {'scopes': "['https://www.googleapis.com/auth/cloud-platform']"}), "(FLAGS.\n service_account_fpath, scopes=[\n 'https://www.googleapis.com/auth/cloud-platform'])\n", (22249, 22347), False, 'from google.oauth2 import service_account\n'), ((22384, 22435), 'google.cloud.pubsub_v1.SubscriberClient', 'pubsub_v1.SubscriberClient', ([], {'credentials': 'credentials'}), '(credentials=credentials)\n', (22410, 22435), False, 'from google.cloud import pubsub_v1\n'), ((22467, 22495), 'google.cloud.pubsub_v1.SubscriberClient', 'pubsub_v1.SubscriberClient', ([], {}), '()\n', (22493, 22495), False, 'from google.cloud import pubsub_v1\n'), ((3169, 3217), 'absl.logging.info', 'logging.info', (['f"""Downloaded file from URL: {url}"""'], {}), "(f'Downloaded file from URL: {url}')\n", (3181, 3217), False, 'from absl import app, logging, flags\n'), ((3843, 3895), 'absl.logging.info', 'logging.info', (['f"""Too many repeats, stopping on {url}"""'], {}), "(f'Too many repeats, stopping on {url}')\n", (3855, 3895), False, 'from absl import app, logging, flags\n'), ((3943, 3964), 'os.remove', 'os.remove', (['save_fpath'], {}), '(save_fpath)\n', (3952, 3964), False, 'import os\n'), ((5565, 5630), 'absl.logging.error', 'logging.error', (['f"""Error in prediction step. Raw json: {resp_json}"""'], {}), "(f'Error in prediction step. Raw json: {resp_json}')\n", (5578, 5630), False, 'from absl import app, logging, flags\n'), ((10645, 10685), 'absl.logging.error', 'logging.error', (['"""Image download errored."""'], {}), "('Image download errored.')\n", (10658, 10685), False, 'from absl import app, logging, flags\n'), ((10803, 10853), 'absl.logging.info', 'logging.info', (['"""Updating acknowledgement deadline."""'], {}), "('Updating acknowledgement deadline.')\n", (10815, 10853), False, 'from absl import app, logging, flags\n'), ((11375, 11439), 'absl.logging.info', 'logging.info', (['f"""Reprojecting image and saving to {image_fpath}."""'], {}), "(f'Reprojecting image and saving to {image_fpath}.')\n", (11387, 11439), False, 'from absl import app, logging, flags\n'), ((12837, 12863), 'rasterio.open', 'rasterio.open', (['image_fpath'], {}), '(image_fpath)\n', (12850, 12863), False, 'import rasterio\n'), ((19392, 19442), 'absl.logging.info', 'logging.info', (['"""Updating acknowledgement deadline."""'], {}), "('Updating acknowledgement deadline.')\n", (19404, 19442), False, 'from absl import app, logging, flags\n'), ((20667, 20716), 'divdet.inference.utils_inference.calculate_shape_props', 'calculate_shape_props', (["preds['resized_masks'][pi]"], {}), "(preds['resized_masks'][pi])\n", (20688, 20716), False, 'from divdet.inference.utils_inference import iter_grouper, get_slice_bounds, windowed_reads_numpy, calculate_region_grad, calculate_shape_props, poly_non_max_suppression, convert_mask_to_polygon, geospatial_polygon_transform\n'), ((21330, 21355), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (21341, 21355), False, 'import time\n'), ((3356, 3393), 'absl.logging.error', 'logging.error', (['f"""HTTPError: {http_e}"""'], {}), "(f'HTTPError: {http_e}')\n", (3369, 3393), False, 'from absl import app, logging, flags\n'), ((3463, 3520), 'absl.logging.error', 'logging.error', (['f"""\nReceived invalid url error {url_e}"""'], {}), '(f"""\nReceived invalid url error {url_e}""")\n', (3476, 3520), False, 'from absl import app, logging, flags\n'), ((3670, 3715), 'absl.logging.error', 'logging.error', (['f"""Other error on {url}: {err}"""'], {}), "(f'Other error on {url}: {err}')\n", (3683, 3715), False, 'from absl import app, logging, flags\n'), ((7646, 7695), 'requests.post', 'requests.post', (['endpoint'], {'data': 'payload', 'timeout': '(10)'}), '(endpoint, data=payload, timeout=10)\n', (7659, 7695), False, 'import requests\n'), ((10882, 10934), 'absl.logging.info', 'logging.info', (['"""Mod to ack deadline failed. Skipping"""'], {}), "('Mod to ack deadline failed. Skipping')\n", (10894, 10934), False, 'from absl import app, logging, flags\n'), ((11458, 11489), 'rasterio.open', 'rasterio.open', (['image_fpath_orig'], {}), '(image_fpath_orig)\n', (11471, 11489), False, 'import rasterio\n'), ((11869, 11926), 'osgeo.gdal.Warp', 'gdal.Warp', (['image_fpath', 'image_fpath_orig'], {'dstSRS': 'eqc_proj'}), '(image_fpath, image_fpath_orig, dstSRS=eqc_proj)\n', (11878, 11926), False, 'from osgeo import gdal\n'), ((11943, 11981), 'absl.logging.info', 'logging.info', (['"""Reprojection complete."""'], {}), "('Reprojection complete.')\n", (11955, 11981), False, 'from absl import app, logging, flags\n'), ((13082, 13108), 'rasterio.Affine.identity', 'rasterio.Affine.identity', ([], {}), '()\n', (13106, 13108), False, 'import rasterio\n'), ((13145, 13179), 'rasterio.Affine', 'rasterio.Affine', (['(1)', '(0)', '(0)', '(0)', '(-1)', '(0)'], {}), '(1, 0, 0, 0, -1, 0)\n', (13160, 13179), False, 'import rasterio\n'), ((13395, 13440), 'absl.logging.info', 'logging.info', (['"""Processing at scale %s"""', 'scale'], {}), "('Processing at scale %s', scale)\n", (13407, 13440), False, 'from absl import app, logging, flags\n'), ((14026, 14219), 'divdet.inference.utils_inference.get_slice_bounds', 'get_slice_bounds', (['scaled_image.shape'], {'slice_size': "(msg_dict['window_size'], msg_dict['window_size'])", 'min_window_overlap': "(msg_dict['min_window_overlap'], msg_dict['min_window_overlap'])"}), "(scaled_image.shape, slice_size=(msg_dict['window_size'],\n msg_dict['window_size']), min_window_overlap=(msg_dict[\n 'min_window_overlap'], msg_dict['min_window_overlap']))\n", (14042, 14219), False, 'from divdet.inference.utils_inference import iter_grouper, get_slice_bounds, windowed_reads_numpy, calculate_region_grad, calculate_shape_props, poly_non_max_suppression, convert_mask_to_polygon, geospatial_polygon_transform\n'), ((14549, 14597), 'divdet.inference.utils_inference.windowed_reads_numpy', 'windowed_reads_numpy', (['scaled_image', 'slice_bounds'], {}), '(scaled_image, slice_bounds)\n', (14569, 14597), False, 'from divdet.inference.utils_inference import iter_grouper, get_slice_bounds, windowed_reads_numpy, calculate_region_grad, calculate_shape_props, poly_non_max_suppression, convert_mask_to_polygon, geospatial_polygon_transform\n'), ((15538, 15601), 'absl.logging.info', 'logging.info', (['"""Crater predictions at scale %s complete."""', 'scale'], {}), "('Crater predictions at scale %s complete.', scale)\n", (15550, 15601), False, 'from absl import app, logging, flags\n'), ((17980, 18033), 'absl.logging.info', 'logging.info', (['f"""Finished processing at scale {scale}"""'], {}), "(f'Finished processing at scale {scale}')\n", (17992, 18033), False, 'from absl import app, logging, flags\n'), ((19471, 19523), 'absl.logging.info', 'logging.info', (['"""Mod to ack deadline failed. Skipping"""'], {}), "('Mod to ack deadline failed. Skipping')\n", (19483, 19523), False, 'from absl import app, logging, flags\n'), ((20804, 20967), 'divdet.surface_feature.Crater', 'Crater', ([], {'geometry': 'export_geom', 'confidence': "preds['detection_scores'][pi]", 'eccentricity': "shape_props['eccentricity']", 'gradient_angle': '(-1)', 'image_id': 'image_obj.id'}), "(geometry=export_geom, confidence=preds['detection_scores'][pi],\n eccentricity=shape_props['eccentricity'], gradient_angle=-1, image_id=\n image_obj.id)\n", (20810, 20967), False, 'from divdet.surface_feature import Crater, Image, Base\n'), ((23439, 23491), 'absl.logging.info', 'logging.info', (['"""Timeout error on {subscription_path}"""'], {}), "('Timeout error on {subscription_path}')\n", (23451, 23491), False, 'from absl import app, logging, flags\n'), ((3580, 3623), 'absl.logging.error', 'logging.error', (['"""Known load error, retrying"""'], {}), "('Known load error, retrying')\n", (3593, 3623), False, 'from absl import app, logging, flags\n'), ((5850, 5895), 'numpy.array', 'np.array', (["resp_outputs['detection_scores'][0]"], {}), "(resp_outputs['detection_scores'][0])\n", (5858, 5895), True, 'import numpy as np\n'), ((7727, 7751), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (7737, 7751), False, 'import json\n'), ((8054, 8067), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8064, 8067), False, 'import time\n'), ((11330, 11359), 'os.path.splitext', 'op.splitext', (['image_fpath_orig'], {}), '(image_fpath_orig)\n', (11341, 11359), True, 'import os.path as op\n'), ((12287, 12344), 'osgeo.gdal.Warp', 'gdal.Warp', (['image_fpath', 'image_fpath_orig'], {'dstSRS': 'eqc_proj'}), '(image_fpath, image_fpath_orig, dstSRS=eqc_proj)\n', (12296, 12344), False, 'from osgeo import gdal\n'), ((12361, 12399), 'absl.logging.info', 'logging.info', (['"""Reprojection complete."""'], {}), "('Reprojection complete.')\n", (12373, 12399), False, 'from absl import app, logging, flags\n'), ((13535, 13584), 'absl.logging.info', 'logging.info', (['"""Updated acknowledgement deadline."""'], {}), "('Updated acknowledgement deadline.')\n", (13547, 13584), False, 'from absl import app, logging, flags\n'), ((14990, 15001), 'time.time', 'time.time', ([], {}), '()\n', (14999, 15001), False, 'import time\n'), ((15901, 15929), 'numpy.int', 'np.int', (['(slice_set[0] / scale)'], {}), '(slice_set[0] / scale)\n', (15907, 15929), True, 'import numpy as np\n'), ((15965, 15993), 'numpy.int', 'np.int', (['(slice_set[1] / scale)'], {}), '(slice_set[1] / scale)\n', (15971, 15993), True, 'import numpy as np\n'), ((19098, 19168), 'divdet.inference.utils_inference.geospatial_polygon_transform', 'geospatial_polygon_transform', (["preds['polygons'][ind]", 'affine_transform'], {}), "(preds['polygons'][ind], affine_transform)\n", (19126, 19168), False, 'from divdet.inference.utils_inference import iter_grouper, get_slice_bounds, windowed_reads_numpy, calculate_region_grad, calculate_shape_props, poly_non_max_suppression, convert_mask_to_polygon, geospatial_polygon_transform\n'), ((7903, 7947), 'absl.logging.error', 'logging.error', (['"""Problem in prediction step."""'], {}), "('Problem in prediction step.')\n", (7916, 7947), False, 'from absl import app, logging, flags\n'), ((8234, 8280), 'numpy.array', 'np.array', (["resp_outputs['detection_scores'][pi]"], {}), "(resp_outputs['detection_scores'][pi])\n", (8242, 8280), True, 'import numpy as np\n'), ((11267, 11296), 'os.path.splitext', 'op.splitext', (['image_fpath_orig'], {}), '(image_fpath_orig)\n', (11278, 11296), True, 'import os.path as op\n'), ((13629, 13681), 'absl.logging.info', 'logging.info', (['"""Mod to ack deadline failed. Skipping"""'], {}), "('Mod to ack deadline failed. Skipping')\n", (13641, 13681), False, 'from absl import app, logging, flags\n'), ((15121, 15132), 'time.time', 'time.time', ([], {}), '()\n', (15130, 15132), False, 'import time\n'), ((17520, 17578), 'divdet.inference.utils_inference.convert_mask_to_polygon', 'convert_mask_to_polygon', (['mask_binary', '(x_offset, y_offset)'], {}), '(mask_binary, (x_offset, y_offset))\n', (17543, 17578), False, 'from divdet.inference.utils_inference import iter_grouper, get_slice_bounds, windowed_reads_numpy, calculate_region_grad, calculate_shape_props, poly_non_max_suppression, convert_mask_to_polygon, geospatial_polygon_transform\n'), ((17230, 17244), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (17238, 17244), True, 'import numpy as np\n'), ((13789, 13864), 'skimage.transform.rescale', 'rescale', (['image', 'scale'], {'mode': '"""edge"""', 'preserve_range': '(True)', 'anti_aliasing': '(True)'}), "(image, scale, mode='edge', preserve_range=True, anti_aliasing=True)\n", (13796, 13864), False, 'from skimage.transform import rescale, resize\n'), ((17041, 17055), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (17049, 17055), True, 'import numpy as np\n'), ((16364, 16400), 'numpy.around', 'np.around', (['((box[3] - box[1]) / scale)'], {}), '((box[3] - box[1]) / scale)\n', (16373, 16400), True, 'import numpy as np\n'), ((16455, 16491), 'numpy.around', 'np.around', (['((box[2] - box[0]) / scale)'], {}), '((box[2] - box[0]) / scale)\n', (16464, 16491), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os, io, random
import string
import numpy as np
from Bio.Seq import Seq
from Bio.Align import MultipleSeqAlignment
from Bio import AlignIO, SeqIO
from io import StringIO
import panel as pn
import panel.widgets as pnw
import pandas as pd
pn.extension()
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Plot, Grid, Range1d
from bokeh.models.glyphs import Text, Rect
from bokeh.layouts import gridplot
# from bokeh.palettes import mpl
from bokeh.palettes import brewer
import random
from Bio import pairwise2
from bokeh.io import export_svgs, export_png
from tqdm import tqdm
from Bio.Align.Applications import MuscleCommandline
msa_size = 10
func_filename='functional_gen_sps_200129.csv'
def view_alignment(aln, fontsize="9pt", plot_width=800):
"""Bokeh sequence alignment view"""
#make sequence and id lists from the aln object
ids = [rec.id for rec in aln]
seqs = [rec.seq for rec in aln]
ids, seqs = zip(*sorted(zip(ids, seqs), reverse=True))
text = [i for s in list(seqs) for i in s]
colors = get_colors(seqs)
# breakpoint()
N = len(seqs[0])
S = len(seqs)
width = .5
x = np.arange(1,N+1)
y = np.arange(0,S,1)
#creates a 2D grid of coords from the 1D arrays
xx, yy = np.meshgrid(x, y)
#flattens the arrays
gx = xx.ravel()
gy = yy.flatten()
#use recty for rect coords with an offset
recty = gy+.5
h= 1/S
#now we can create the ColumnDataSource with all the arrays
source = ColumnDataSource(dict(x=gx, y=gy, recty=recty, text=text, colors=colors))
plot_height = len(seqs)*15+50
x_range = Range1d(0.5,N+1.5, bounds='auto')
if N>100:
viewlen=100
else:
viewlen=N
#view_range is for the close up view
view_range = (0.5,viewlen + 0.5)
tools="xpan, xwheel_zoom, reset, save"
#entire sequence view (no text, with zoom)
p = figure(title=None, plot_width= plot_width, plot_height=50,
x_range=x_range, y_range=(0,S), tools=tools,
min_border=0, toolbar_location='below')
rects = Rect(x="x", y="recty", width=1, height=1, fill_color="colors",
line_color=None, fill_alpha=0.6)
p.add_glyph(source, rects)
p.yaxis.visible = False
p.grid.visible = False
#sequence text view with ability to scroll along x axis
p1 = figure(title=None, plot_width=plot_width, plot_height=plot_height,
x_range=view_range, y_range=ids, tools="xpan,reset, previewsave",
min_border=0, toolbar_location='below', output_backend="svg")#, lod_factor=1)
rects = Rect(x="x", y="recty", width=1, height=1, fill_color="colors",
line_color=None, fill_alpha=0.5)
glyph = Text(x="x", y="y", text="text", text_align='center',text_color="black",
text_font="monospace",text_font_size=fontsize)
p1.add_glyph(source, glyph)
p1.add_glyph(source, rects)
p1.grid.visible = False
p1.xaxis.major_label_text_font_style = "bold"
p1.yaxis.minor_tick_line_width = 0
p1.yaxis.major_tick_line_width = 0
p = gridplot([[p],[p1]], toolbar_location='below')
return gridplot([[p1]])
def get_colors(seqs):
"""make colors for bases in sequence"""
text = [i for s in list(seqs) for i in s]
# aas = list("ACDEFGHIKLMNPQRSTVWY-")
# ClustalW-like splits
# http://www.jalview.org/help/html/colourSchemes/clustal.html
clrs = {}
ref_colors = brewer['Paired'][8] # https://docs.bokeh.org/en/latest/docs/reference/palettes.html
ref_colors.sort()
random.seed(26)
random.shuffle(ref_colors)
for aa in list('AILMFWV'):
clrs.update({aa:ref_colors[0]})
for aa in list('KR'):
clrs.update({aa:ref_colors[1]})
for aa in list('DE'):
clrs.update({aa:ref_colors[2]})
for aa in list('NQST'):
clrs.update({aa:ref_colors[3]})
clrs.update({'C':ref_colors[4]})
clrs.update({'G':ref_colors[5]})
clrs.update({'P':ref_colors[6]})
clrs.update({'H':ref_colors[7]})
clrs.update({'Y':ref_colors[7]})
clrs.update({'-':'white'})
colors = [clrs[i] for i in text]
return colors
df = pd.read_csv('sp_top_100_scores.csv')
df_func = pd.read_csv(func_filename)
func_sps = list(set(df_func['seq'].values))
closest_matches = []
closest_identities = []
for sample_sp in tqdm(func_sps):
_df = df[df['generated'] == sample_sp]
_df
# Generate .fasta input
gen_sp = sample_sp
fasta_filename = 'fasta/' + gen_sp + '.fasta'
aln_filename = 'alns/' + gen_sp + '.aln'
with open(fasta_filename, 'w+') as f:
f.write('>gen_sp\n')
f.write(gen_sp + '\n')
sps = _df[:msa_size]['natural'].values
for i, sp in enumerate(sps):
f.write(f'>nat_sp{i}\n')
f.write(f'{sp}\n')
# Make MUSCLE alignment
# http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc81
from Bio.Align.Applications import MuscleCommandline
muscle_exe = r"/home/wuzachar/bin/muscle3.8.31_i86linux64"
muscle_cline = MuscleCommandline(muscle_exe, input=fasta_filename, out=aln_filename)
stdout, stderr = muscle_cline()
# aln = list(AlignIO.parse(aln_filename, "fasta"))
# Get percent identity
gen_sp = _df.iloc[0]['generated']
nat_sp = _df.iloc[0]['natural']
actual_sp = gen_sp.replace('-','')
actual_length = len(actual_sp)
alignments = pairwise2.align.globalms(gen_sp, nat_sp, 2, -1, -1, -1)
aln_gen, aln_nat, score, _, _ = alignments[0]
match_count = 0
for i in range(len(aln_gen)):
if aln_gen[i] != '-':
if aln_gen[i] == aln_nat[i]:
match_count += 1
print(aln_gen)
print(aln_nat)
percent_identity = match_count / actual_length
print(match_count, actual_length, f"{percent_identity*100:0.2f} % identity")
# Export alignments
# aln = AlignIO.read('alns/sample_aln.fasta','fasta')
aln = AlignIO.read(aln_filename,'fasta')
p = view_alignment(aln, plot_width=800)
export_svgs(p, filename="figs/" + sample_sp + ".svg")
# export_png(p, filename="figs/" + sample_sp + ".png")
# pn.pane.Bokeh(p)
# export_svgs(p, filename="figs/" + sample_sp + ".svg")
closest_matches.append(aln_nat.replace('-',''))
closest_identities.append(percent_identity)
# write final file
df_func['closest_match'] = closest_matches
df_func['percent_identity'] = closest_identities
df_func.to_csv('alns/func_sps_matches.csv')
| [
"Bio.pairwise2.align.globalms",
"Bio.AlignIO.read",
"bokeh.plotting.figure",
"pandas.read_csv",
"random.shuffle",
"tqdm.tqdm",
"bokeh.models.Range1d",
"random.seed",
"panel.extension",
"bokeh.layouts.gridplot",
"Bio.Align.Applications.MuscleCommandline",
"bokeh.io.export_svgs",
"bokeh.models... | [((270, 284), 'panel.extension', 'pn.extension', ([], {}), '()\n', (282, 284), True, 'import panel as pn\n'), ((4193, 4229), 'pandas.read_csv', 'pd.read_csv', (['"""sp_top_100_scores.csv"""'], {}), "('sp_top_100_scores.csv')\n", (4204, 4229), True, 'import pandas as pd\n'), ((4241, 4267), 'pandas.read_csv', 'pd.read_csv', (['func_filename'], {}), '(func_filename)\n', (4252, 4267), True, 'import pandas as pd\n'), ((4376, 4390), 'tqdm.tqdm', 'tqdm', (['func_sps'], {}), '(func_sps)\n', (4380, 4390), False, 'from tqdm import tqdm\n'), ((1190, 1209), 'numpy.arange', 'np.arange', (['(1)', '(N + 1)'], {}), '(1, N + 1)\n', (1199, 1209), True, 'import numpy as np\n'), ((1215, 1233), 'numpy.arange', 'np.arange', (['(0)', 'S', '(1)'], {}), '(0, S, 1)\n', (1224, 1233), True, 'import numpy as np\n'), ((1297, 1314), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1308, 1314), True, 'import numpy as np\n'), ((1656, 1692), 'bokeh.models.Range1d', 'Range1d', (['(0.5)', '(N + 1.5)'], {'bounds': '"""auto"""'}), "(0.5, N + 1.5, bounds='auto')\n", (1663, 1692), False, 'from bokeh.models import ColumnDataSource, Plot, Grid, Range1d\n'), ((1929, 2076), 'bokeh.plotting.figure', 'figure', ([], {'title': 'None', 'plot_width': 'plot_width', 'plot_height': '(50)', 'x_range': 'x_range', 'y_range': '(0, S)', 'tools': 'tools', 'min_border': '(0)', 'toolbar_location': '"""below"""'}), "(title=None, plot_width=plot_width, plot_height=50, x_range=x_range,\n y_range=(0, S), tools=tools, min_border=0, toolbar_location='below')\n", (1935, 2076), False, 'from bokeh.plotting import figure\n'), ((2115, 2215), 'bokeh.models.glyphs.Rect', 'Rect', ([], {'x': '"""x"""', 'y': '"""recty"""', 'width': '(1)', 'height': '(1)', 'fill_color': '"""colors"""', 'line_color': 'None', 'fill_alpha': '(0.6)'}), "(x='x', y='recty', width=1, height=1, fill_color='colors', line_color=\n None, fill_alpha=0.6)\n", (2119, 2215), False, 'from bokeh.models.glyphs import Text, Rect\n'), ((2385, 2588), 'bokeh.plotting.figure', 'figure', ([], {'title': 'None', 'plot_width': 'plot_width', 'plot_height': 'plot_height', 'x_range': 'view_range', 'y_range': 'ids', 'tools': '"""xpan,reset, previewsave"""', 'min_border': '(0)', 'toolbar_location': '"""below"""', 'output_backend': '"""svg"""'}), "(title=None, plot_width=plot_width, plot_height=plot_height, x_range=\n view_range, y_range=ids, tools='xpan,reset, previewsave', min_border=0,\n toolbar_location='below', output_backend='svg')\n", (2391, 2588), False, 'from bokeh.plotting import figure\n'), ((2640, 2740), 'bokeh.models.glyphs.Rect', 'Rect', ([], {'x': '"""x"""', 'y': '"""recty"""', 'width': '(1)', 'height': '(1)', 'fill_color': '"""colors"""', 'line_color': 'None', 'fill_alpha': '(0.5)'}), "(x='x', y='recty', width=1, height=1, fill_color='colors', line_color=\n None, fill_alpha=0.5)\n", (2644, 2740), False, 'from bokeh.models.glyphs import Text, Rect\n'), ((2765, 2889), 'bokeh.models.glyphs.Text', 'Text', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""text"""', 'text_align': '"""center"""', 'text_color': '"""black"""', 'text_font': '"""monospace"""', 'text_font_size': 'fontsize'}), "(x='x', y='y', text='text', text_align='center', text_color='black',\n text_font='monospace', text_font_size=fontsize)\n", (2769, 2889), False, 'from bokeh.models.glyphs import Text, Rect\n'), ((3130, 3177), 'bokeh.layouts.gridplot', 'gridplot', (['[[p], [p1]]'], {'toolbar_location': '"""below"""'}), "([[p], [p1]], toolbar_location='below')\n", (3138, 3177), False, 'from bokeh.layouts import gridplot\n'), ((3188, 3204), 'bokeh.layouts.gridplot', 'gridplot', (['[[p1]]'], {}), '([[p1]])\n', (3196, 3204), False, 'from bokeh.layouts import gridplot\n'), ((3596, 3611), 'random.seed', 'random.seed', (['(26)'], {}), '(26)\n', (3607, 3611), False, 'import random\n'), ((3616, 3642), 'random.shuffle', 'random.shuffle', (['ref_colors'], {}), '(ref_colors)\n', (3630, 3642), False, 'import random\n'), ((5081, 5150), 'Bio.Align.Applications.MuscleCommandline', 'MuscleCommandline', (['muscle_exe'], {'input': 'fasta_filename', 'out': 'aln_filename'}), '(muscle_exe, input=fasta_filename, out=aln_filename)\n', (5098, 5150), False, 'from Bio.Align.Applications import MuscleCommandline\n'), ((5437, 5492), 'Bio.pairwise2.align.globalms', 'pairwise2.align.globalms', (['gen_sp', 'nat_sp', '(2)', '(-1)', '(-1)', '(-1)'], {}), '(gen_sp, nat_sp, 2, -1, -1, -1)\n', (5461, 5492), False, 'from Bio import pairwise2\n'), ((5966, 6001), 'Bio.AlignIO.read', 'AlignIO.read', (['aln_filename', '"""fasta"""'], {}), "(aln_filename, 'fasta')\n", (5978, 6001), False, 'from Bio import AlignIO, SeqIO\n'), ((6049, 6102), 'bokeh.io.export_svgs', 'export_svgs', (['p'], {'filename': "('figs/' + sample_sp + '.svg')"}), "(p, filename='figs/' + sample_sp + '.svg')\n", (6060, 6102), False, 'from bokeh.io import export_svgs, export_png\n')] |
import numpy as np
from rlscore.learner import LeaveOneOutRLS
from rlscore.measure import sqerror
from housing_data import load_housing
def train_rls():
#Selects both the gamma parameter for Gaussian kernel, and regparam with loocv
X_train, Y_train, X_test, Y_test = load_housing()
regparams = [2.**i for i in range(-15, 16)]
gammas = regparams
best_regparam = None
best_gamma = None
best_error = float("inf")
best_learner = None
for gamma in gammas:
#New RLS is initialized for each kernel parameter
learner = LeaveOneOutRLS(X_train, Y_train, kernel="GaussianKernel", gamma=gamma, regparams=regparams)
e = np.min(learner.cv_performances)
if e < best_error:
best_error = e
best_regparam = learner.regparam
best_gamma = gamma
best_learner = learner
P_test = best_learner.predict(X_test)
print("best parameters gamma %f regparam %f" %(best_gamma, best_regparam))
print("best leave-one-out error %f" %best_error)
print("test error %f" %sqerror(Y_test, P_test))
if __name__=="__main__":
train_rls()
| [
"rlscore.measure.sqerror",
"housing_data.load_housing",
"rlscore.learner.LeaveOneOutRLS",
"numpy.min"
] | [((278, 292), 'housing_data.load_housing', 'load_housing', ([], {}), '()\n', (290, 292), False, 'from housing_data import load_housing\n'), ((566, 661), 'rlscore.learner.LeaveOneOutRLS', 'LeaveOneOutRLS', (['X_train', 'Y_train'], {'kernel': '"""GaussianKernel"""', 'gamma': 'gamma', 'regparams': 'regparams'}), "(X_train, Y_train, kernel='GaussianKernel', gamma=gamma,\n regparams=regparams)\n", (580, 661), False, 'from rlscore.learner import LeaveOneOutRLS\n'), ((670, 701), 'numpy.min', 'np.min', (['learner.cv_performances'], {}), '(learner.cv_performances)\n', (676, 701), True, 'import numpy as np\n'), ((1068, 1091), 'rlscore.measure.sqerror', 'sqerror', (['Y_test', 'P_test'], {}), '(Y_test, P_test)\n', (1075, 1091), False, 'from rlscore.measure import sqerror\n')] |
"""
Gaussian Transformation with Scikit-learn
- Scikit-learn has recently released transformers to do Gaussian mappings as they call the variable transformations.
The PowerTransformer allows to do Box-Cox and Yeo-Johnson transformation.
With the FunctionTransformer, we can specify any function we want.
- The transformers per say, do not allow to select columns, but we can do so using a third transformer,
the ColumnTransformer
- Another thing to keep in mind is that Scikit-learn transformers return NumPy arrays, and not dataframes,
so we need to be mindful of the order of the columns not to mess up with our features.
Important
- Box-Cox and Yeo-Johnson transformations need to learn their parameters from the data.
Therefore, as always, before attempting any transformation it is important to divide the dataset into train
and test set.
- In this example, I will not do so for simplicity, but when using this transformation in your pipelines,
please make sure you do so.
In this example
We will see how to implement variable transformations using Scikit-learn and the House Prices dataset.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
from sklearn.preprocessing import FunctionTransformer, PowerTransformer
# load the data
data = pd.read_csv('dataset/house-prices-advanced-regression-techniques/train.csv')
data.head()
"""
Id MSSubClass MSZoning LotFrontage LotArea Street Alley LotShape LandContour Utilities ... PoolArea PoolQC Fence MiscFeature MiscVal MoSold YrSold SaleType SaleCondition SalePrice
0 1 60 RL 65.0 8450 Pave NaN Reg Lvl AllPub ... 0 NaN NaN NaN 0 2 2008 WD Normal 208500
1 2 20 RL 80.0 9600 Pave NaN Reg Lvl AllPub ... 0 NaN NaN NaN 0 5 2007 WD Normal 181500
2 3 60 RL 68.0 11250 Pave NaN IR1 Lvl AllPub ... 0 NaN NaN NaN 0 9 2008 WD Normal 223500
3 4 70 RL 60.0 9550 Pave NaN IR1 Lvl AllPub ... 0 NaN NaN NaN 0 2 2006 WD Abnorml 140000
4 5 60 RL 84.0 14260 Pave NaN IR1 Lvl AllPub ... 0 NaN NaN NaN 0 12 2008 WD Normal 250000 """
"""
- Let's select the numerical and positive variables in the dataset for this example.
As most of the transformations require the variables to be positive. """
cols = []
for col in data.columns:
if data[col].dtypes != 'O' and col != 'Id': # if the variable is numerical
if np.sum(np.where(data[col] <= 0, 1, 0)) == 0: # if the variable is positive
cols.append(col) # append variable to the list
cols
""" ['MSSubClass',
'LotFrontage',
'LotArea',
'OverallQual',
'OverallCond',
'YearBuilt',
'YearRemodAdd',
'1stFlrSF',
'GrLivArea',
'TotRmsAbvGrd',
'GarageYrBlt',
'MoSold',
'YrSold',
'SalePrice'] """
# let's explore the distribution of the numerical variables
data[cols].hist(figsize=(20,20))
plt.show()
"""
Plots to assess normality
- To visualise the distribution of the variables, we plot a histogram and a Q-Q plot.
In the Q-Q pLots, if the variable is normally distributed, the values of the variable should fall in a
45 degree line when plotted against the theoretical quantiles.
"""
# plot the histograms to have a quick look at the variable distribution
# histogram and Q-Q plots
def diagnostic_plots(df, variable):
# function to plot a histogram and a Q-Q plot
# side by side, for a certain variable
plt.figure(figsize=(15,6))
plt.subplot(1, 2, 1)
df[variable].hist(bins=30)
plt.subplot(1, 2, 2)
stats.probplot(df[variable], dist="norm", plot=plt)
plt.show()
# Logarithmic transformation
# create a log transformer
transformer = FunctionTransformer(np.log, validate=True)
# transform all the numerical and positive variables
data_t = transformer.transform(data[cols].fillna(1))
# Scikit-learn returns NumPy arrays, so capture in dataframe
# note that Scikit-learn will return an array with
# only the columns indicated in cols
data_t = pd.DataFrame(data_t, columns = cols)
# original distribution
diagnostic_plots(data, 'GrLivArea')
# transformed distribution
diagnostic_plots(data_t, 'GrLivArea')
# original distribution
diagnostic_plots(data, 'MSSubClass')
# transformed distribution
diagnostic_plots(data_t, 'MSSubClass')
# Reciprocal transformation
# create the transformer
transformer = FunctionTransformer(lambda x: 1/x, validate=True)
# also # transformer = FunctionTransformer(np.reciprocal, validate=True)
# transform the positive variables
data_t = transformer.transform(data[cols].fillna(1))
# re-capture in a dataframe
data_t = pd.DataFrame(data_t, columns = cols)
# transformed variable
diagnostic_plots(data_t, 'GrLivArea')
# transformed variable
diagnostic_plots(data_t, 'MSSubClass')
# Square root transformation
transformer = FunctionTransformer(lambda x: x**(1/2), validate=True)
# also
# transformer = FunctionTransformer(np.sqrt, validate=True)
data_t = transformer.transform(data[cols].fillna(1))
data_t = pd.DataFrame(data_t, columns = cols)
diagnostic_plots(data_t, 'GrLivArea')
diagnostic_plots(data_t, 'MSSubClass')
# Exponential
transformer = FunctionTransformer(lambda x: x**(1/1.2), validate=True)
data_t = transformer.transform(data[cols].fillna(1))
data_t = pd.DataFrame(data_t, columns = cols)
diagnostic_plots(data_t, 'GrLivArea')
diagnostic_plots(data_t, 'MSSubClass')
# Box-Cox transformation
# create the transformer
transformer = PowerTransformer(method='box-cox', standardize=False)
# find the optimal lambda using the train set
transformer.fit(data[cols].fillna(1))
# transform the data
data_t = transformer.transform(data[cols].fillna(1))
# capture data in a dataframe
data_t = pd.DataFrame(data_t, columns = cols)
diagnostic_plots(data_t, 'GrLivArea')
diagnostic_plots(data_t, 'MSSubClass')
"""
Yeo-Johnson
- Yeo-Johnson is an adaptation of Box-Cox that can also be used in negative value variables.
So let's expand the list of variables for the example, to include those that contain zero and negative
values as well.
"""
cols = [
'MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual',
'OverallCond', 'MasVnrArea', 'BsmtFinSF1',
'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF',
'LowQualFinSF', 'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath',
'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd',
'Fireplaces', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'WoodDeckSF',
'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea',
'MiscVal', 'SalePrice'
]
# call the transformer
transformer = PowerTransformer(method='yeo-johnson', standardize=False)
# learn the lambda from the train set
transformer.fit(data[cols].fillna(1))
# transform the data
data_t = transformer.transform(data[cols].fillna(1))
# capture data in a dataframe
data_t = pd.DataFrame(data_t, columns = cols)
diagnostic_plots(data_t, 'GrLivArea')
diagnostic_plots(data_t, 'MSSubClass')
| [
"pandas.read_csv",
"numpy.where",
"sklearn.preprocessing.PowerTransformer",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"sklearn.preprocessing.FunctionTransformer",
"scipy.stats.probplot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1400, 1476), 'pandas.read_csv', 'pd.read_csv', (['"""dataset/house-prices-advanced-regression-techniques/train.csv"""'], {}), "('dataset/house-prices-advanced-regression-techniques/train.csv')\n", (1411, 1476), True, 'import pandas as pd\n'), ((3426, 3436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3434, 3436), True, 'import matplotlib.pyplot as plt\n'), ((4254, 4296), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['np.log'], {'validate': '(True)'}), '(np.log, validate=True)\n', (4273, 4296), False, 'from sklearn.preprocessing import FunctionTransformer, PowerTransformer\n'), ((4571, 4605), 'pandas.DataFrame', 'pd.DataFrame', (['data_t'], {'columns': 'cols'}), '(data_t, columns=cols)\n', (4583, 4605), True, 'import pandas as pd\n'), ((4948, 4999), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['(lambda x: 1 / x)'], {'validate': '(True)'}), '(lambda x: 1 / x, validate=True)\n', (4967, 4999), False, 'from sklearn.preprocessing import FunctionTransformer, PowerTransformer\n'), ((5207, 5241), 'pandas.DataFrame', 'pd.DataFrame', (['data_t'], {'columns': 'cols'}), '(data_t, columns=cols)\n', (5219, 5241), True, 'import pandas as pd\n'), ((5422, 5480), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['(lambda x: x ** (1 / 2))'], {'validate': '(True)'}), '(lambda x: x ** (1 / 2), validate=True)\n', (5441, 5480), False, 'from sklearn.preprocessing import FunctionTransformer, PowerTransformer\n'), ((5616, 5650), 'pandas.DataFrame', 'pd.DataFrame', (['data_t'], {'columns': 'cols'}), '(data_t, columns=cols)\n', (5628, 5650), True, 'import pandas as pd\n'), ((5768, 5828), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['(lambda x: x ** (1 / 1.2))'], {'validate': '(True)'}), '(lambda x: x ** (1 / 1.2), validate=True)\n', (5787, 5828), False, 'from sklearn.preprocessing import FunctionTransformer, PowerTransformer\n'), ((5893, 5927), 'pandas.DataFrame', 'pd.DataFrame', (['data_t'], {'columns': 'cols'}), '(data_t, columns=cols)\n', (5905, 5927), True, 'import pandas as pd\n'), ((6080, 6133), 'sklearn.preprocessing.PowerTransformer', 'PowerTransformer', ([], {'method': '"""box-cox"""', 'standardize': '(False)'}), "(method='box-cox', standardize=False)\n", (6096, 6133), False, 'from sklearn.preprocessing import FunctionTransformer, PowerTransformer\n'), ((6343, 6377), 'pandas.DataFrame', 'pd.DataFrame', (['data_t'], {'columns': 'cols'}), '(data_t, columns=cols)\n', (6355, 6377), True, 'import pandas as pd\n'), ((7278, 7335), 'sklearn.preprocessing.PowerTransformer', 'PowerTransformer', ([], {'method': '"""yeo-johnson"""', 'standardize': '(False)'}), "(method='yeo-johnson', standardize=False)\n", (7294, 7335), False, 'from sklearn.preprocessing import FunctionTransformer, PowerTransformer\n'), ((7537, 7571), 'pandas.DataFrame', 'pd.DataFrame', (['data_t'], {'columns': 'cols'}), '(data_t, columns=cols)\n', (7549, 7571), True, 'import pandas as pd\n'), ((3993, 4020), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (4003, 4020), True, 'import matplotlib.pyplot as plt\n'), ((4025, 4045), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4036, 4045), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4103), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4094, 4103), True, 'import matplotlib.pyplot as plt\n'), ((4109, 4160), 'scipy.stats.probplot', 'stats.probplot', (['df[variable]'], {'dist': '"""norm"""', 'plot': 'plt'}), "(df[variable], dist='norm', plot=plt)\n", (4123, 4160), True, 'import scipy.stats as stats\n'), ((4166, 4176), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4174, 4176), True, 'import matplotlib.pyplot as plt\n'), ((2966, 2996), 'numpy.where', 'np.where', (['(data[col] <= 0)', '(1)', '(0)'], {}), '(data[col] <= 0, 1, 0)\n', (2974, 2996), True, 'import numpy as np\n')] |
"""
.. versionadded:: 0.4
This function generates Uniform white noise series. This function uses
`numpy.random.uniform`.
Function Documentation
======================================
"""
import numpy as np
def uniform_white_noise(n, minimum=-1, maximum=1):
"""
Random values with uniform distribution.
**Args:**
* `n` - length of the output data (int) - how many samples will be on output
**Kwargs:**
* `minimum` - minimal value (float)
* `maximum` - maximal value (float)
**Returns:**
* vector of values representing the noise (1d array)
"""
return np.random.uniform(low=minimum, high=maximum, size=n)
| [
"numpy.random.uniform"
] | [((632, 684), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'minimum', 'high': 'maximum', 'size': 'n'}), '(low=minimum, high=maximum, size=n)\n', (649, 684), True, 'import numpy as np\n')] |
import json
import numpy as np
import itertools
import sys
path_name = sys.argv[1] + "/" + sys.argv[2]
pred_labels = np.load("./tmp/" + path_name + "/pred_labels_valid.npy")
num_classes = int(sys.argv[3])
num_layers = int(sys.argv[4])
total_layers = [x for x in range(num_layers)]
print("path_name: {}, num_classes: {}, num_layers: {}".format(path_name, num_classes, num_layers))
def calculate_accuracy(layers, pred_label_idx):
kde_preds = np.zeros([pred_label_idx.shape[0], num_classes])
count_idx = 0
for idx in pred_label_idx:
for layer_idx in layers:
kde_preds[count_idx][int(pred_labels[idx][layer_idx])] += 1
count_idx += 1
kde_pred = np.argmax(kde_preds, axis=1)
kde_accuracy = np.mean(kde_pred == pred_labels[pred_label_idx].T[-1])
return kde_accuracy
def selected_layer_condition(idx_in_origin):
max_acc = 0
selected_layers = None
for count in range(1, num_layers+1):
for layers in itertools.combinations(total_layers, count):
acc = calculate_accuracy(layers, idx_in_origin)
if acc >= max_acc:
max_acc = acc
selected_layers = layers
kde_acc = calculate_accuracy(selected_layers, idx_in_origin)
model_acc = np.mean(pred_labels[idx_in_origin].T[-2] == pred_labels[idx_in_origin].T[-1])
print("selected layers: {}, acc: {}".format(selected_layers, kde_acc))
print("model acc: {}\n".format(model_acc))
return selected_layers, kde_acc
def selected_layer_for_label(label, selected_layers_dict, weights_dict):
# split dataset into subset according to their predictions
pred_label_idx = np.where(pred_labels.T[-2] == label)[0]
# count the number of layers that agree with the final prediction
num_selected_layers = len(layers_agree[str(label)])
pred_count = np.zeros([pred_labels[pred_label_idx].shape[0], num_selected_layers])
misbh_count = np.zeros([pred_labels[pred_label_idx].shape[0], num_selected_layers])
for idx in range(pred_labels[pred_label_idx].shape[0]):
count_idx = 0
for layer_idx in layers_agree[str(label)]:
if pred_labels[pred_label_idx[idx]][layer_idx] == pred_labels[pred_label_idx[idx]][-2]:
pred_count[idx][count_idx] = 1
if pred_labels[pred_label_idx[idx]][layer_idx] != pred_labels[pred_label_idx[idx]][-2]:
misbh_count[idx][count_idx] = 1
count_idx += 1
# calculate confidence
sum_pred_example = np.sum(pred_count, axis=1)
sum_misbh_example = np.sum(misbh_count, axis=1)
pos_indexes = np.where(sum_misbh_example >= sum_pred_example)[0]
KdePredPositive = sum_misbh_example >= sum_pred_example
TrueMisBehaviour = pred_labels[pred_label_idx].T[-2] != pred_labels[pred_label_idx].T[-1]
FP = np.sum(~TrueMisBehaviour & KdePredPositive)
# searches for the best layer combination where the model predicts the input with label 'label_con' as 'label'
for label_con in range(num_classes):
pos_indexes_label = np.where(pred_labels[pred_label_idx[pos_indexes]].T[-1] == label_con)[0]
print("label: {}, total_len: {}, label_con: {}, len: {}".format(label, pos_indexes.shape[0], label_con,
pos_indexes_label.shape[0]))
if pos_indexes_label.shape[0] == 0:
print("check!")
continue
selected_layers_dict[str(label) + str(label_con)], kde_acc = selected_layer_condition(pred_label_idx[pos_indexes[pos_indexes_label]])
if label_con == label:
weights_dict[str(label) + str(label_con)] = pos_indexes_label.shape[0] * kde_acc / pos_indexes.shape[0]
else:
weights_dict[str(label) + str(label_con)] = pos_indexes_label.shape[0] * kde_acc / (pos_indexes.shape[0] - FP)
selected_layers_dict = {}
weights_dict = {}
# # single-thread version
# for label in range(num_classes):
# # load selected layers for alarm
# filename = "./tmp/" + path_name + "/selected_layers_agree_" + str(label) + ".json"
# with open(filename, "r") as json_file:
# layers_agree = json.load(json_file)
# json_file.close()
#
# print("label: {}".format(label))
# # generate selected layers per class
# selected_layer_for_label(label, selected_layers_dict, weights_dict)
# print("\n")
#
# # save the index of selected layers per class
# filename = "./tmp/" + path_name + "/selected_layers_accuracy_" + str(label) + ".json"
# with open(filename, 'w') as json_file:
# json.dump(selected_layers_dict, json_file, ensure_ascii=False)
# json_file.close()
# filename = "./tmp/" + path_name + "/weights_" + str(label) + ".json"
# with open(filename, 'w') as json_file:
# json.dump(weights_dict, json_file, ensure_ascii=False)
# json_file.close()
# multi-thread version
label = int(sys.argv[5])
print("label: {}".format(label))
# load selected layers for alarm
filename = "./tmp/" + path_name + "/selected_layers_agree_" + str(label) + ".json"
with open(filename, "r") as json_file:
layers_agree = json.load(json_file)
json_file.close()
# generate selected layers per class
selected_layer_for_label(label, selected_layers_dict, weights_dict)
# save the index of selected layers per class
filename = "./tmp/" + path_name + "/selected_layers_accuracy_" + str(label) + ".json"
with open(filename, 'w') as json_file:
json.dump(selected_layers_dict, json_file, ensure_ascii=False)
json_file.close()
filename = "./tmp/" + path_name + "/weights_" + str(label) + ".json"
with open(filename, 'w') as json_file:
json.dump(weights_dict, json_file, ensure_ascii=False)
json_file.close() | [
"numpy.mean",
"numpy.where",
"numpy.argmax",
"itertools.combinations",
"numpy.sum",
"numpy.zeros",
"json.load",
"numpy.load",
"json.dump"
] | [((119, 175), 'numpy.load', 'np.load', (["('./tmp/' + path_name + '/pred_labels_valid.npy')"], {}), "('./tmp/' + path_name + '/pred_labels_valid.npy')\n", (126, 175), True, 'import numpy as np\n'), ((449, 497), 'numpy.zeros', 'np.zeros', (['[pred_label_idx.shape[0], num_classes]'], {}), '([pred_label_idx.shape[0], num_classes])\n', (457, 497), True, 'import numpy as np\n'), ((691, 719), 'numpy.argmax', 'np.argmax', (['kde_preds'], {'axis': '(1)'}), '(kde_preds, axis=1)\n', (700, 719), True, 'import numpy as np\n'), ((739, 793), 'numpy.mean', 'np.mean', (['(kde_pred == pred_labels[pred_label_idx].T[-1])'], {}), '(kde_pred == pred_labels[pred_label_idx].T[-1])\n', (746, 793), True, 'import numpy as np\n'), ((1262, 1339), 'numpy.mean', 'np.mean', (['(pred_labels[idx_in_origin].T[-2] == pred_labels[idx_in_origin].T[-1])'], {}), '(pred_labels[idx_in_origin].T[-2] == pred_labels[idx_in_origin].T[-1])\n', (1269, 1339), True, 'import numpy as np\n'), ((1841, 1910), 'numpy.zeros', 'np.zeros', (['[pred_labels[pred_label_idx].shape[0], num_selected_layers]'], {}), '([pred_labels[pred_label_idx].shape[0], num_selected_layers])\n', (1849, 1910), True, 'import numpy as np\n'), ((1929, 1998), 'numpy.zeros', 'np.zeros', (['[pred_labels[pred_label_idx].shape[0], num_selected_layers]'], {}), '([pred_labels[pred_label_idx].shape[0], num_selected_layers])\n', (1937, 1998), True, 'import numpy as np\n'), ((2504, 2530), 'numpy.sum', 'np.sum', (['pred_count'], {'axis': '(1)'}), '(pred_count, axis=1)\n', (2510, 2530), True, 'import numpy as np\n'), ((2555, 2582), 'numpy.sum', 'np.sum', (['misbh_count'], {'axis': '(1)'}), '(misbh_count, axis=1)\n', (2561, 2582), True, 'import numpy as np\n'), ((2817, 2860), 'numpy.sum', 'np.sum', (['(~TrueMisBehaviour & KdePredPositive)'], {}), '(~TrueMisBehaviour & KdePredPositive)\n', (2823, 2860), True, 'import numpy as np\n'), ((5133, 5153), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (5142, 5153), False, 'import json\n'), ((5454, 5516), 'json.dump', 'json.dump', (['selected_layers_dict', 'json_file'], {'ensure_ascii': '(False)'}), '(selected_layers_dict, json_file, ensure_ascii=False)\n', (5463, 5516), False, 'import json\n'), ((5647, 5701), 'json.dump', 'json.dump', (['weights_dict', 'json_file'], {'ensure_ascii': '(False)'}), '(weights_dict, json_file, ensure_ascii=False)\n', (5656, 5701), False, 'import json\n'), ((972, 1015), 'itertools.combinations', 'itertools.combinations', (['total_layers', 'count'], {}), '(total_layers, count)\n', (994, 1015), False, 'import itertools\n'), ((1657, 1693), 'numpy.where', 'np.where', (['(pred_labels.T[-2] == label)'], {}), '(pred_labels.T[-2] == label)\n', (1665, 1693), True, 'import numpy as np\n'), ((2601, 2648), 'numpy.where', 'np.where', (['(sum_misbh_example >= sum_pred_example)'], {}), '(sum_misbh_example >= sum_pred_example)\n', (2609, 2648), True, 'import numpy as np\n'), ((3046, 3115), 'numpy.where', 'np.where', (['(pred_labels[pred_label_idx[pos_indexes]].T[-1] == label_con)'], {}), '(pred_labels[pred_label_idx[pos_indexes]].T[-1] == label_con)\n', (3054, 3115), True, 'import numpy as np\n')] |
from typing import Optional
import numpy as np
import skimage.draw as skdraw
from gdsfactory.component import Component
from gdsfactory.types import Floats, Layers
def to_np(
component: Component,
nm_per_pixel: int = 20,
layers: Layers = ((1, 0),),
values: Optional[Floats] = None,
pad_width: int = 1,
) -> np.ndarray:
"""Returns a pixelated numpy array from Component polygons.
Args:
component: Component
nm_per_pixel: you can go from 20 (coarse) to 4 (fine)
layers: to convert. Order matters (latter overwrite former)
values: associated to each layer (defaults to 1)
pad_width: padding pixels around the image
"""
pixels_per_um = (1 / nm_per_pixel) * 1e3
xmin, ymin = component.bbox[0]
xmax, ymax = component.bbox[1]
shape = (
int(np.ceil(xmax - xmin) * pixels_per_um),
int(np.ceil(ymax - ymin) * pixels_per_um),
)
img = np.zeros(shape, dtype=float)
layer_to_polygons = component.get_polygons(by_spec=True, depth=None)
values = values or [1] * len(layers)
for layer, value in zip(layers, values):
if layer in layer_to_polygons:
polygons = layer_to_polygons[layer]
for polygon in polygons:
r = polygon[:, 0] - xmin
c = polygon[:, 1] - ymin
rr, cc = skdraw.polygon(
r * pixels_per_um, c * pixels_per_um, shape=shape
)
img[rr, cc] = value
img_with_padding = np.pad(img, pad_width=pad_width)
return img_with_padding
if __name__ == "__main__":
import matplotlib.pyplot as plt
import gdsfactory as gf
c = gf.c.straight()
c = gf.c.bend_circular(layers_cladding=[gf.LAYER.WGCLAD], cladding_offset=3.0)
# i = to_np(c, nm_per_pixel=250)
i = to_np(c, nm_per_pixel=20)
c.show()
plt.imshow(i.transpose(), origin="lower")
plt.colorbar()
plt.show()
| [
"numpy.ceil",
"matplotlib.pyplot.show",
"gdsfactory.c.bend_circular",
"matplotlib.pyplot.colorbar",
"numpy.zeros",
"skimage.draw.polygon",
"numpy.pad",
"gdsfactory.c.straight"
] | [((941, 969), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (949, 969), True, 'import numpy as np\n'), ((1526, 1558), 'numpy.pad', 'np.pad', (['img'], {'pad_width': 'pad_width'}), '(img, pad_width=pad_width)\n', (1532, 1558), True, 'import numpy as np\n'), ((1690, 1705), 'gdsfactory.c.straight', 'gf.c.straight', ([], {}), '()\n', (1703, 1705), True, 'import gdsfactory as gf\n'), ((1714, 1788), 'gdsfactory.c.bend_circular', 'gf.c.bend_circular', ([], {'layers_cladding': '[gf.LAYER.WGCLAD]', 'cladding_offset': '(3.0)'}), '(layers_cladding=[gf.LAYER.WGCLAD], cladding_offset=3.0)\n', (1732, 1788), True, 'import gdsfactory as gf\n'), ((1923, 1937), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1935, 1937), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1950, 1952), True, 'import matplotlib.pyplot as plt\n'), ((835, 855), 'numpy.ceil', 'np.ceil', (['(xmax - xmin)'], {}), '(xmax - xmin)\n', (842, 855), True, 'import numpy as np\n'), ((886, 906), 'numpy.ceil', 'np.ceil', (['(ymax - ymin)'], {}), '(ymax - ymin)\n', (893, 906), True, 'import numpy as np\n'), ((1362, 1427), 'skimage.draw.polygon', 'skdraw.polygon', (['(r * pixels_per_um)', '(c * pixels_per_um)'], {'shape': 'shape'}), '(r * pixels_per_um, c * pixels_per_um, shape=shape)\n', (1376, 1427), True, 'import skimage.draw as skdraw\n')] |
#!/usr/bin/env python3
# Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :train.py
# @author :jvo
# @contact :<EMAIL>
# @created :07/08/2019
# @version :1.0
# @python_version :3.6.8
"""
Continual learning of splitMNIST with hypernetworks.
-----------------------------------------------------
The module :mod:`mnist.train_splitMNIST` implements all training logic
for the MNIST experiments (splitMNIST, permutedMNIST).
See :ref:`README <mnist-readme-reference-label>`
for an overview how to use this script.
"""
# Do not delete the following import for all executable scripts!
import matplotlib
matplotlib.use('Agg')
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import os
import copy
from mnist.replay.train_gan import sample as sample_gan, train_gan_one_t
from mnist.replay.train_replay import run as replay_model
from mnist.replay.train_replay import train_vae_one_t, init_plotting_embedding
from mnist.replay.train_replay import sample as sample_vae
from mnist.train_args_default import _set_default
from mnist import train_utils
from mnist import train_args
from mnist.plotting import _plotImages
import mnist.hp_search_splitMNIST as hpsearch
from mnets.classifier_interface import Classifier
from utils import misc
import utils.optim_step as opstep
import utils.hnet_regularizer as hreg
def _save_performance_summary(config, train_iter=None):
"""Save a summary of the test results achieved so far in a easy to parse
file (for humans and subsequent programs).
Args:
config: Command-line arguments.
train_iter: (optional) The current training iteration.
Though, the results written in the file correspond have there own
training iteration assigned.
"""
if train_iter is None:
train_iter = config.n_iter
tp = dict()
if config.upper_bound or (config.infer_task_id and config.cl_scenario == 1):
config.num_weights_rp_net = 0
config.num_weights_rp_hyper_net = 0
config.compression_ratio_rp = 0
tp["acc_after_list"] = misc.list_to_str(config.overall_acc_list)
tp["acc_during_list"] = misc.list_to_str(config.during_accs_final)
tp["acc_after_mean"] = config.acc_mean
tp["acc_during_mean"] = sum(config.during_accs_final) / config.num_tasks
tp["num_weights_class_net"] = config.num_weights_class_net
tp["num_weights_rp_net"] = config.num_weights_rp_net
tp["num_weights_rp_hyper_net"] = config.num_weights_rp_hyper_net
tp["num_weights_class_hyper_net"] = config.num_weights_class_hyper_net
tp["compression_ratio_rp"] = config.compression_ratio_rp
tp["compression_ratio_class"] = config.compression_ratio_class
tp["overall_task_infer_accuracy_list"] = \
misc.list_to_str(config.overall_task_infer_accuracy_list)
tp["acc_task_infer_mean"] = config.acc_task_infer_mean
# Note, the keywords of this dictionary are defined by the array:
# hpsearch._SUMMARY_KEYWORDS
with open(os.path.join(config.out_dir,
hpsearch._SUMMARY_FILENAME), 'w') as f:
assert ('num_train_iter' in hpsearch._SUMMARY_KEYWORDS)
for kw in hpsearch._SUMMARY_KEYWORDS:
if kw == 'num_train_iter':
f.write('%s %d\n' % ('num_train_iter', train_iter))
continue
if kw == 'finished':
continue
else:
try:
f.write('%s %f\n' % (kw, tp[kw]))
except:
f.write('%s %s\n' % (kw, tp[kw]))
def test(dhandlers, class_nets, infer_net, device, config, writer,
task_id=None):
""" Test continual learning experiments on MNIST dataset. This can either
be splitMNIST or permutedMNIST.
Depending on the method and cl scenario used, this methods manages
to measure the test accuracy of a given task or all tasks after
training. In order to do so, correct targets need to be constructed
and output heads need to be set (or inferred).
Furthermore, this method distinguises between classification accuracy
on a task or on the accuracy to infer task id's if applicable.
Args:
(....): See docstring of function :func:`train_tasks`.
task_id: (optional) If not None, the method will compute and return
test acc for the the given task id, not all tasks.
Returns:
Scalar represting the test accuracy for the given task id.
If ``task_id`` is None, the accuracy of the last task of the cl
experiment is returned.
"""
# get hnet if this option is given
if class_nets is not None:
if config.training_with_hnet:
c_net_hnet = class_nets[1]
c_net = class_nets[0]
c_net.eval()
c_net_hnet.eval()
else:
c_net = class_nets
if infer_net is not None:
infer_net.eval()
with torch.no_grad():
overall_acc = 0
overall_acc_list = []
overall_task_infer_accuracy = 0
overall_task_infer_accuracy_list = []
# choose tasks to test
if task_id is not None:
task_range = range(task_id, task_id + 1)
else:
task_range = range(config.num_tasks)
# iterate through all old tasks
for t in task_range:
print("Testing task: ", t)
# reset data
if task_id is not None:
dhandler = dhandlers[0]
else:
dhandler = dhandlers[t]
# create some variables
N_processed = 0
test_size = dhandler.num_test_samples
# is task id has to be inferred, for every x we have to do that
# and therefore have one h(e) = W per data point - this is only
# possible with batch size one, for now
if (config.infer_task_id and infer_net is not None) or \
config.infer_with_entropy:
curr_bs = 1
else:
curr_bs = config.test_batch_size
classifier_accuracy = 0
task_infer_accuracy = 0
Y_hat_all = []
T_all = []
# go through test set
while N_processed < test_size:
# test size of tasks might be "arbitrary"
if N_processed + curr_bs > test_size:
curr_bs = test_size - N_processed
N_processed += curr_bs
# get data
real_batch = dhandler.next_test_batch(curr_bs)
X_real = dhandler.input_to_torch_tensor(real_batch[0], device,
mode='inference')
T_real = dhandler.output_to_torch_tensor(real_batch[1], device,
mode='inference')
# get short version of output dim
od = config.out_dim
#######################################
# SET THE OUTPUT HEAD / COMPUTE TARGETS
#######################################
# get dummy for easy access to the output dim of our main
# network as a dummy, only needed for the first iteration
if class_nets is not None:
if config.training_with_hnet:
weights_dummy = c_net_hnet.forward(0)
Y_dummies = c_net.forward(X_real, weights_dummy)
else:
Y_dummies = c_net.forward(X_real)
else:
Y_dummies = infer_net.forward(X_real)
# build one hots if this option was chosen
# here we build targets if only have one neuron per task
# which we set to 1
if config.class_incremental:
task_out = [0, config.num_tasks]
T_real = torch.zeros((Y_dummies.shape[0],
config.num_tasks)).to(device)
T_real[:, t] = 1
# compute targets - this is a bit unelegant, cl 3 requires hacks
elif config.cl_scenario == 1 or config.cl_scenario == 2:
if config.cl_scenario == 1:
# take the task specific output neuron
task_out = [t * od, t * od + od]
else:
# always all output neurons (only one head is used)
task_out = [0, od]
else:
# This here is the classic CL 3 scenario
# first we get the predictions, this is over all neurons
task_out = [0, config.num_tasks * od]
# Here we build the targets, this is zero everywhere
# except for the current task - here the correct target
# is inserted
# build the two zero tensors that surround the targets
zeros1 = torch.zeros(Y_dummies[:, 0:t * od].shape). \
to(device)
zeros2 = torch.zeros(Y_dummies[:, 0:(config.num_tasks \
- 1 - t) * od].shape).to(device)
T_real = torch.cat([zeros1, T_real, zeros2], dim=-1)
#################
# TASK PREDICTION
#################
# get task predictions
if config.cl_scenario != 1:
if infer_net is not None:
# get infer net to predict the apparent task id
task_pred = infer_net.forward(X_real)
task_pred = task_pred[:, 0:config.num_tasks]
task_pred = torch.sigmoid(task_pred)
_, inf_task_id = torch.max(task_pred, 1)
# measure acc of prediction
task_infer_accuracy += (inf_task_id == t).float()
elif config.infer_with_entropy and class_nets is not None \
and config.training_with_hnet:
entropies = []
if task_id is not None:
entrop_to_test = range(0, task_id + 1)
else:
entrop_to_test = range(config.num_tasks)
# infer task id through entropy of softmax outputs of
# different models
for e in entrop_to_test:
weights_c = c_net_hnet.forward(e)
Y_hat_logits = c_net.forward(X_real, weights_c)
if config.cl_scenario == 2:
task_out = [0, od]
else:
task_out = [e * od, e * od + od]
Y_hat = F.softmax(Y_hat_logits[:,
task_out[0]:task_out[1]] / config.soft_temp, -1)
entropy = -1 * torch.sum(Y_hat * torch.log(Y_hat))
entropies.append(entropy)
inf_task_id = torch.argmin(torch.stack(entropies))
task_infer_accuracy += (inf_task_id == t).float()
if config.cl_scenario == 3 and config.infer_output_head:
task_out = [inf_task_id * od, inf_task_id * od + od]
else:
# if task id is known, task inference acc is 100%
task_infer_accuracy += 1
inf_task_id = t
if class_nets is not None:
# from the given inf_task_id we try to produce the
# correct model for that tasks
if config.training_with_hnet:
weights_c = c_net_hnet.forward(inf_task_id)
Y_hat_logits = c_net.forward(X_real, weights_c)
else:
Y_hat_logits = c_net.forward(X_real)
#################
# CLASSIFICATION
#################
if class_nets is not None:
# save predictions of current batch
Y_hat_logits = Y_hat_logits[:, task_out[0]:task_out[1]]
Y_hat = F.softmax(Y_hat_logits, dim=1)
if config.cl_scenario == 3 and config.infer_output_head:
# this is the special case where the output head is
# inferred. Here we compute the argmax of the single
# head and add the number of previous neurons such that
# it coincides with the argmax of a hot enc target
# that is build for all heads. Example: we detect that
# task 3 is present, and every task consist of two
# classes. The argmax of Y_hat will either give us 0
# or 1, since Y_hat_logits was already cut to two
# dimensions. Now we have to add 3*2 to the argmax
# of Y_hat to get a prediction between class 0 and
# num_tasks*class_per_task.
Y_hat = Y_hat.argmax(dim=1, keepdim=False) + \
inf_task_id * od
Y_hat_all.append(Y_hat)
T_all.append(T_real)
if class_nets is not None:
# append predictions
Y_hat_all = torch.cat(Y_hat_all)
T_all = torch.cat(T_all)
# check if all test samples are used
assert (Y_hat_all.shape[0] == dhandler.num_test_samples)
# compute class acc's
if config.cl_scenario == 3 and class_nets is not None and \
config.infer_output_head:
# this is a special case, we compare the
targets = T_all.argmax(dim=1, keepdim=False)
classifier_accuracy = (Y_hat_all == targets).float().mean()
else:
classifier_accuracy = Classifier.accuracy(Y_hat_all, T_all)
classifier_accuracy *= 100.
print("Accuracy of task: ", t, " % ", classifier_accuracy)
overall_acc_list.append(classifier_accuracy)
overall_acc += classifier_accuracy
# compute task inference acc"s
ti_accuracy = task_infer_accuracy / dhandler.num_test_samples * 100.
if config.training_task_infer or config.infer_with_entropy:
print("Accuracy of task inference: ", t, " % ", ti_accuracy)
overall_task_infer_accuracy += ti_accuracy
overall_task_infer_accuracy_list.append(ti_accuracy)
# testing all tasks
if task_id is None:
if class_nets is not None:
print("Overall mean acc: ", overall_acc / config.num_tasks)
if config.training_task_infer or config.infer_with_entropy:
print("Overall task inf acc: ", overall_task_infer_accuracy / \
config.num_tasks)
config.overall_acc_list = overall_acc_list
config.acc_mean = overall_acc / config.num_tasks
config.overall_task_infer_accuracy_list = \
overall_task_infer_accuracy_list
config.acc_task_infer_mean = \
overall_task_infer_accuracy / config.num_tasks
print(config.overall_task_infer_accuracy_list, config.acc_task_infer_mean)
return classifier_accuracy
def get_fake_data_loss(dhandlers_rp, net, dec, d_hnet, device, config, writer,
t, i, net_copy):
""" Sample fake data from generator for tasks up to t and compute a loss
compared to predictions of a checkpointed network.
We must take caution when considering the different learning scenarios
and methods and training stages, see detailed comments in the code.
In general, we build a batch of replayed data from all previous tasks.
Since we do not know the labels of the replayed data, we consider the
output of the checkpointed network as ground thruth i.e. we must compute
a loss between two logits.See :class:`mnets.classifier_interface.Classifier`
for a detailed describtion of the different loss functions.
Args:
(....): See docstring of function :func:`train_tasks`.
t: Task id.
i: Current training iteration.
net_copy: Copy/checkpoint of the classifier network before
learning task ``t``.
Returns:
The loss between predictions and predictions of a
checkpointed network or replayed data.
"""
all_Y_hat_ls = []
all_targets = []
# we have to choose from which embeddings (multiple?!) to sample from
if config.class_incremental or config.single_class_replay:
# if we trained every class with a different generator
emb_num = t * config.out_dim
else:
# here samples from the whole task come from one generator
emb_num = t
# we have to choose from which embeddings to sample from
if config.fake_data_full_range:
ran = range(0, emb_num)
bs_per_task = int(np.ceil(config.batch_size / emb_num))
else:
random_t = np.random.randint(0, emb_num)
ran = range(random_t, random_t + 1)
bs_per_task = config.batch_size
for re in ran:
# exchange replay data with real data to compute upper bounds
if config.upper_bound:
real_batch = dhandlers_rp[re].next_train_batch(bs_per_task)
X_fake = dhandlers_rp[re].input_to_torch_tensor(real_batch[0],
device, mode='train')
else:
# get fake data
if config.replay_method == 'gan':
X_fake = sample_gan(dec, d_hnet, config, re, device,
bs=bs_per_task)
else:
X_fake = sample_vae(dec, d_hnet, config, re, device,
bs=bs_per_task)
# save some fake data to the writer
if i % 100 == 0:
if X_fake.shape[0] >= 15:
fig_fake = _plotImages(X_fake, config, bs_per_task)
writer.add_figure('train_class_' + str(re) + '_fake',
fig_fake, global_step=i)
# compute soft targets with copied network
target_logits = net_copy.forward(X_fake).detach()
Y_hat_ls = net.forward(X_fake.detach())
###############
# BUILD TARGETS
###############
od = config.out_dim
if config.class_incremental or config.training_task_infer:
# This is a bit complicated: If we train class/task incrementally
# we skip thraining the classifier on the first task.
# So when starting to train the classifier on task 2, we have to
# build a hard target for this first output neuron trained by
# replay data. A soft target (on an untrained output) would not
# make sense.
# output head over all output neurons already available
task_out = [0, (t + 1) * od]
# create target with zero everywhere except from the current re
zeros = torch.zeros(target_logits[:, 0:(t + 1) * od].shape).to(device)
if config.hard_targets or (t == 1 and re == 0):
zeros[:, re] = 1
else:
zeros[:, 0:t * od] = target_logits[:, 0:t * od]
targets = zeros
Y_hat_ls = Y_hat_ls[:, task_out[0]:task_out[1]]
elif config.cl_scenario == 1 or config.cl_scenario == 2:
if config.cl_scenario == 1:
# take the task specific output neuron
task_out = [re * od, re * od + od]
else:
# always all output neurons, only one head is used
task_out = [0, od]
Y_hat_ls = Y_hat_ls[:, task_out[0]:task_out[1]]
target_logits = target_logits[:, task_out[0]:task_out[1]]
# build hard targets i.e. one hots if this option is chosen
if config.hard_targets:
soft_targets = torch.sigmoid(target_logits)
zeros = torch.zeros(Y_hat_ls.shape).to(device)
_, argmax = torch.max(soft_targets, 1)
targets = zeros.scatter_(1, argmax.view(-1, 1), 1)
else:
# loss expects logits
targets = target_logits
else:
# take all neurons used up until now
# output head over all output neurons already available
task_out = [0, (t + 1) * od]
# create target with zero everywhere except from the current re
zeros = torch.zeros(target_logits[:, 0:(t + 1) * od].shape).to(device)
# sigmoid over the output head(s) from all previous task
soft_targets = torch.sigmoid(target_logits[:, 0:t * od])
# compute one hots
if config.hard_targets:
_, argmax = torch.max(soft_targets, 1)
zeros.scatter_(1, argmax.view(-1, 1), 1)
else:
# loss expects logits
zeros[:, 0:t * od] = target_logits[:, 0:t * od]
targets = zeros
# choose the correct output size for the actual
Y_hat_ls = Y_hat_ls[:, task_out[0]:task_out[1]]
# add to list
all_targets.append(targets)
all_Y_hat_ls.append(Y_hat_ls)
# cat to one tensor
all_targets = torch.cat(all_targets)
Y_hat_ls = torch.cat(all_Y_hat_ls)
if i % 200 == 0:
classifier_accuracy = Classifier.accuracy(Y_hat_ls, all_targets) * 100.0
msg = 'Training step {}: Classifier Accuracy: {:.3f} ' + \
'(on current FAKE DATA training batch).'
print(msg.format(i, classifier_accuracy))
# dependent on the target softness, the loss function is chosen
if config.hard_targets or (config.class_incremental and t == 1):
return Classifier.logit_cross_entropy_loss(Y_hat_ls, all_targets)
else:
return Classifier.knowledge_distillation_loss(Y_hat_ls, all_targets)
def train_class_one_t(dhandler_class, dhandlers_rp, dec, d_hnet, net,
device, config, writer, t):
"""Train continual learning experiments on MNIST dataset for one task.
In this function the main training logic is implemented.
After setting the optimizers for the network and hypernetwork if
applicable, the training is structured as follows:
First, we get the a training batch of the current task. Depending on
the learning scenario, we choose output heads and build targets
accordingly.
Second, if ``t`` is greater than 1, we add a loss term concerning
predictions of replayed data. See :func:`get_fake_data_loss` for
details. Third, to protect the hypernetwork from forgetting, we add an
additional L2 loss term namely the difference between its current output
given an embedding and checkpointed targets.
Finally, we track some training statistics.
Args:
(....): See docstring of function :func:`train_tasks`.
t: Task id.
"""
# if cl with task inference we have the classifier empowered with a hnet
if config.training_with_hnet:
net_hnet = net[1]
net = net[0]
net.train()
net_hnet.train()
params_to_regularize = list(net_hnet.theta)
optimizer = optim.Adam(params_to_regularize,
lr=config.class_lr, betas=(0.9, 0.999))
c_emb_optimizer = optim.Adam([net_hnet.get_task_emb(t)],
lr=config.class_lr_emb, betas=(0.9, 0.999))
else:
net.train()
net_hnet = None
optimizer = optim.Adam(net.parameters(),
lr=config.class_lr, betas=(0.9, 0.999))
# dont train the replay model if available
if dec is not None:
dec.eval()
if d_hnet is not None:
d_hnet.eval()
# compute targets if classifier is trained with hnet
if t > 0 and config.training_with_hnet:
if config.online_target_computation:
# Compute targets for the regularizer whenever they are needed.
# -> Computationally expensive.
targets_C = None
prev_theta = [p.detach().clone() for p in net_hnet.theta]
prev_task_embs = [p.detach().clone() for p in \
net_hnet.get_task_embs()]
else:
# Compute targets for the regularizer once and keep them all in
# memory -> Memory expensive.
targets_C = hreg.get_current_targets(t, net_hnet)
prev_theta = None
prev_task_embs = None
dhandler_class.reset_batch_generator()
# make copy of network
if t >= 1:
net_copy = copy.deepcopy(net)
# set training_iterations if epochs are set
if config.epochs == -1:
training_iterations = config.n_iter
else:
assert (config.epochs > 0)
training_iterations = config.epochs * \
int(np.ceil(dhandler_class.num_train_samples / config.batch_size))
if config.class_incremental:
training_iterations = int(training_iterations / config.out_dim)
# Whether we will calculate the regularizer.
calc_reg = t > 0 and config.class_beta > 0 and config.training_with_hnet
# set if we want the reg only computed for a subset of the previous tasks
if config.hnet_reg_batch_size != -1:
hnet_reg_batch_size = config.hnet_reg_batch_size
else:
hnet_reg_batch_size = None
for i in range(training_iterations):
# set optimizer to zero
optimizer.zero_grad()
if net_hnet is not None:
c_emb_optimizer.zero_grad()
# Get real data
real_batch = dhandler_class.next_train_batch(config.batch_size)
X_real = dhandler_class.input_to_torch_tensor(real_batch[0], device,
mode='train')
T_real = dhandler_class.output_to_torch_tensor(real_batch[1], device,
mode='train')
if i % 100 == 0 and config.show_plots:
fig_real = _plotImages(X_real, config)
writer.add_figure('train_class_' + str(t) + '_real',
fig_real, global_step=i)
#################################################
# Choosing output heads and constructing targets
#################################################
# If we train a task inference net or class incremental learning we
# we construct a target for every single class/task
if config.class_incremental or config.training_task_infer:
# in the beginning of training, we look at two output neuron
task_out = [0, t + 1]
T_real = torch.zeros((config.batch_size, task_out[1])).to(device)
T_real[:, task_out[1] - 1] = 1
elif config.cl_scenario == 1 or config.cl_scenario == 2:
if config.cl_scenario == 1:
# take the task specific output neuron
task_out = [t * config.out_dim, t * config.out_dim + config.out_dim]
else:
# always all output neurons, only one head is used
task_out = [0, config.out_dim]
else:
# The number of output neurons is generic and can grow i.e. we
# do not have to know the number of tasks before we start
# learning.
if not config.infer_output_head:
task_out = [0, (t + 1) * config.out_dim]
T_real = torch.cat((torch.zeros((config.batch_size,
t * config.out_dim)).to(device),
T_real), dim=1)
# this is a special case where we will infer the task id by another
# neural network so we can train on the correct output head direclty
# and use the infered output head to compute the prediction
else:
task_out = [t * config.out_dim, t * config.out_dim + config.out_dim]
# compute loss of current data
if config.training_with_hnet:
weights_c = net_hnet.forward(t)
else:
weights_c = None
Y_hat_logits = net.forward(X_real, weights_c)
Y_hat_logits = Y_hat_logits[:, task_out[0]:task_out[1]]
if config.soft_targets:
soft_label = 0.95
num_classes = T_real.shape[1]
soft_targets = torch.where(T_real == 1,
torch.Tensor([soft_label]).to(device),
torch.Tensor([(1 - soft_label) / (num_classes - 1)]).to(device))
soft_targets = soft_targets.to(device)
loss_task = Classifier.softmax_and_cross_entropy(Y_hat_logits,
soft_targets)
else:
loss_task = Classifier.softmax_and_cross_entropy(Y_hat_logits, T_real)
############################
# compute loss for fake data
############################
# Get fake data (of all tasks up until now and merge into list)
if t >= 1 and not config.training_with_hnet:
fake_loss = get_fake_data_loss(dhandlers_rp, net, dec, d_hnet, device,
config, writer, t, i, net_copy)
loss_task = (1 - config.l_rew) * loss_task + config.l_rew * fake_loss
loss_task.backward(retain_graph=calc_reg, create_graph=calc_reg and \
config.backprop_dt)
# compute hypernet loss and fix embedding -> change current embs
if calc_reg:
if config.no_lookahead:
dTheta = None
else:
dTheta = opstep.calc_delta_theta(optimizer,
config.use_sgd_change, lr=config.class_lr,
detach_dt=not config.backprop_dt)
loss_reg = config.class_beta * hreg.calc_fix_target_reg(net_hnet, t,
targets=targets_C, mnet=net, dTheta=dTheta,
dTembs=None,
prev_theta=prev_theta,
prev_task_embs=prev_task_embs,
batch_size=hnet_reg_batch_size)
loss_reg.backward()
# compute backward passloss_task.backward()
if not config.dont_train_main_model:
optimizer.step()
if net_hnet is not None and config.train_class_embeddings:
c_emb_optimizer.step()
# same stats saving
if i % 50 == 0:
# compute accuracies for tracking
Y_hat_logits = net.forward(X_real, weights_c)
Y_hat_logits = Y_hat_logits[:, task_out[0]:task_out[1]]
Y_hat = F.softmax(Y_hat_logits, dim=1)
classifier_accuracy = Classifier.accuracy(Y_hat, T_real) * 100.0
writer.add_scalar('train/task_%d/class_accuracy' % t,
classifier_accuracy, i)
writer.add_scalar('train/task_%d/loss_task' % t,
loss_task, i)
if t >= 1 and not config.training_with_hnet:
writer.add_scalar('train/task_%d/fake_loss' % t,
fake_loss, i)
# plot some gradient statistics
if i % 200 == 0:
if not config.dont_train_main_model:
total_norm = 0
if config.training_with_hnet:
params = net_hnet.theta
else:
params = net.parameters()
for p in params:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
# TODO write gradient histograms?
writer.add_scalar('train/task_%d/main_params_grad_norms' % t,
total_norm, i)
if net_hnet is not None and config.train_class_embeddings:
total_norm = 0
for p in [net_hnet.get_task_emb(t)]:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
writer.add_scalar('train/task_%d/hnet_emb_grad_norms' % t,
total_norm, i)
if i % 200 == 0:
msg = 'Training step {}: Classifier Accuracy: {:.3f} ' + \
'(on current training batch).'
print(msg.format(i, classifier_accuracy))
def train_tasks(dhandlers_class, dhandlers_rp, enc, dec, d_hnet, class_net,
device, config, writer, infer_net=None):
""" Train continual learning experiments on MNIST dataset.
This is a helper function that loops over the range of tasks and
iteratively starts training the classifier and the replay model
on new tasks. Additionally, we save the task performace just after
training which can later be compared to the performance after training
on all tasks.
Args:
dhandlers_class: The dataset handlers for classification.
dhandlers_rp: The dataset handlers from the replay.
enc: The model of the encoder network.
dec: The model of the decoder network.
d_hnet. The model of the decoder hyper network.
class_net: The model of the classifier.
device: Torch device (cpu or gpu).
config: The command line arguments.
writer: The tensorboard summary writer.
infer_net: (optional) Task inference net, only used for testing.
Returns:
A list of test accuracies of all tasks directly after training.
"""
print('Training MNIST (task inference) classifier ...')
if not (config.upper_bound or (config.infer_task_id and
config.cl_scenario == 1)):
if not config.trained_replay_model:
embd_list = init_plotting_embedding(dhandlers_rp,
d_hnet, writer, config)
during_accs = []
# Begin training loop for the single tasks
for t in range(0, config.num_tasks):
dhandler = dhandlers_class[t]
if class_net is not None:
if not (config.class_incremental and t == 0):
print("Training classifier on data handler: ", t)
train_class_one_t(dhandler, dhandlers_rp, dec,
d_hnet, class_net, device, config, writer, t)
else:
if t > 0:
print("Training task inference system on data handler: ", t)
train_class_one_t(dhandler, dhandlers_rp, dec,
d_hnet, infer_net, device, config, writer, t)
if not (t == 0 and class_net is None):
durring_cc = test([dhandler], class_net, infer_net, device,
config, writer, task_id=t)
during_accs.append(durring_cc)
if not (config.upper_bound or (config.infer_task_id and
config.cl_scenario == 1)):
if not config.trained_replay_model and t < config.num_tasks - 1:
if config.replay_method == 'gan':
train_gan_one_t(dhandlers_rp[t], enc, dec, d_hnet, device,
config, writer, embd_list, t)
else:
train_vae_one_t(dhandlers_rp[t], enc, dec, d_hnet, device,
config, writer, embd_list, t)
return during_accs
def run(mode='split'):
""" Method to start MNIST experiments.
Depending on the configurations, here we control the creation and
training of the different (replay) modules for classification or
task inference build out of standart neural networks and their
corresponding hypernetworks.
Args:
mode (str): Training mode defines which experiments and default values
are loaded. Options are splitMNIST or permutedMNIST:
- ``split``
- ``perm``
"""
### Get command line arguments.
config = train_args.parse_cmd_arguments(mode=mode)
assert (config.experiment == "splitMNIST" or \
config.experiment == "permutedMNIST")
if not config.dont_set_default:
config = _set_default(config)
if config.infer_output_head:
assert (config.infer_task_id == True)
if config.cl_scenario == 1:
assert (config.class_incremental == False)
assert (config.single_class_replay == False)
if config.infer_with_entropy:
assert (config.infer_task_id == True)
# single class only implemented for splitMNIST
if config.single_class_replay or config.class_incremental:
assert (config.experiment == "splitMNIST")
# check range of number of tasks
assert (config.num_tasks > 0)
if config.experiment == "splitMNIST":
if config.class_incremental:
assert (config.num_tasks <= 10)
else:
assert (config.num_tasks <= 5)
# the following combination is not supported
if config.infer_task_id:
assert (config.class_incremental == False)
# enforce correct cl scenario
if config.class_incremental:
config.single_class_replay = 1
config.cl_scenario = 3
print("Attention: Cl scenario 3 is enforced!")
steps = 1
else:
steps = 2
#### Get data handlers
dhandlers_class = train_utils._generate_tasks(config, steps)
# decide if you want to train a replay model
# in the case where you only want a classifier and you know the task id
# we only train a classifier + hnet. Upper bound considers the replay case
# but you replay real data as if the replayu model would be "perfect".
if config.upper_bound or (config.infer_task_id and config.cl_scenario == 1):
train_rp = False
else:
train_rp = True
### Get replay model trained continually with hnet.
dec, d_hnet, enc, dhandlers_rp, device, writer, config = \
replay_model(config, train_rp)
# if we have a replay model trained, we now train a classifier
# that either solves a task directly (HNET+replay) or we train a model
# that infers the task from input.
###############################
# Train task inference network
###############################
if config.infer_task_id and not config.cl_scenario == 1 and \
not config.infer_with_entropy:
print("Training task inference model ...")
config.trained_replay_model = False
config.training_task_infer = True
config.training_with_hnet = False
### Generate task inference network.
infer_net = train_utils.generate_classifier(config,
dhandlers_class, device)
### Train the task inference network.
config.during_accs_inference = train_tasks(dhandlers_class,
dhandlers_rp, enc, dec, d_hnet, None,
device, config, writer, infer_net=infer_net)
### Test network.
print("Testing task inference model ...")
test(dhandlers_class, None, infer_net, device, config, writer)
config.training_with_hnet = True
config.trained_replay_model = True
else:
# if we do not train an inference network we just train a model
# that knows it all and not
infer_net = None
if config.infer_with_entropy:
config.trained_replay_model = True
else:
config.trained_replay_model = False
if config.infer_task_id:
config.training_with_hnet = True
else:
config.training_with_hnet = False
###################
# Train classifier
###################
config.training_task_infer = False
print("Training final classifier ...")
### Generate another classifier network.
class_nets = train_utils.generate_classifier(config,
dhandlers_class, device)
### Train the network.
config.during_accs_final = train_tasks(dhandlers_class, dhandlers_rp, enc,
dec, d_hnet, class_nets, device, config, writer, infer_net)
print("Testing final classifier ...")
### Test network.
test(dhandlers_class, class_nets, infer_net, device, config, writer)
_save_performance_summary(config)
writer.close()
print('Program finished successfully.')
if __name__ == '__main__':
run()
| [
"mnets.classifier_interface.Classifier.softmax_and_cross_entropy",
"mnist.train_args.parse_cmd_arguments",
"torch.max",
"mnets.classifier_interface.Classifier.knowledge_distillation_loss",
"copy.deepcopy",
"torch.nn.functional.softmax",
"mnist.train_utils.generate_classifier",
"mnist.train_args_defaul... | [((1176, 1197), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1190, 1197), False, 'import matplotlib\n'), ((2663, 2704), 'utils.misc.list_to_str', 'misc.list_to_str', (['config.overall_acc_list'], {}), '(config.overall_acc_list)\n', (2679, 2704), False, 'from utils import misc\n'), ((2733, 2775), 'utils.misc.list_to_str', 'misc.list_to_str', (['config.during_accs_final'], {}), '(config.during_accs_final)\n', (2749, 2775), False, 'from utils import misc\n'), ((3343, 3400), 'utils.misc.list_to_str', 'misc.list_to_str', (['config.overall_task_infer_accuracy_list'], {}), '(config.overall_task_infer_accuracy_list)\n', (3359, 3400), False, 'from utils import misc\n'), ((22570, 22592), 'torch.cat', 'torch.cat', (['all_targets'], {}), '(all_targets)\n', (22579, 22592), False, 'import torch\n'), ((22608, 22631), 'torch.cat', 'torch.cat', (['all_Y_hat_ls'], {}), '(all_Y_hat_ls)\n', (22617, 22631), False, 'import torch\n'), ((37784, 37825), 'mnist.train_args.parse_cmd_arguments', 'train_args.parse_cmd_arguments', ([], {'mode': 'mode'}), '(mode=mode)\n', (37814, 37825), False, 'from mnist import train_args\n'), ((39141, 39183), 'mnist.train_utils._generate_tasks', 'train_utils._generate_tasks', (['config', 'steps'], {}), '(config, steps)\n', (39168, 39183), False, 'from mnist import train_utils\n'), ((39733, 39763), 'mnist.replay.train_replay.run', 'replay_model', (['config', 'train_rp'], {}), '(config, train_rp)\n', (39745, 39763), True, 'from mnist.replay.train_replay import run as replay_model\n'), ((41688, 41752), 'mnist.train_utils.generate_classifier', 'train_utils.generate_classifier', (['config', 'dhandlers_class', 'device'], {}), '(config, dhandlers_class, device)\n', (41719, 41752), False, 'from mnist import train_utils\n'), ((5533, 5548), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5546, 5548), False, 'import torch\n'), ((18219, 18248), 'numpy.random.randint', 'np.random.randint', (['(0)', 'emb_num'], {}), '(0, emb_num)\n', (18236, 18248), True, 'import numpy as np\n'), ((23060, 23118), 'mnets.classifier_interface.Classifier.logit_cross_entropy_loss', 'Classifier.logit_cross_entropy_loss', (['Y_hat_ls', 'all_targets'], {}), '(Y_hat_ls, all_targets)\n', (23095, 23118), False, 'from mnets.classifier_interface import Classifier\n'), ((23144, 23205), 'mnets.classifier_interface.Classifier.knowledge_distillation_loss', 'Classifier.knowledge_distillation_loss', (['Y_hat_ls', 'all_targets'], {}), '(Y_hat_ls, all_targets)\n', (23182, 23205), False, 'from mnets.classifier_interface import Classifier\n'), ((24523, 24595), 'torch.optim.Adam', 'optim.Adam', (['params_to_regularize'], {'lr': 'config.class_lr', 'betas': '(0.9, 0.999)'}), '(params_to_regularize, lr=config.class_lr, betas=(0.9, 0.999))\n', (24533, 24595), True, 'import torch.optim as optim\n'), ((25934, 25952), 'copy.deepcopy', 'copy.deepcopy', (['net'], {}), '(net)\n', (25947, 25952), False, 'import copy\n'), ((37981, 38001), 'mnist.train_args_default._set_default', '_set_default', (['config'], {}), '(config)\n', (37993, 38001), False, 'from mnist.train_args_default import _set_default\n'), ((40408, 40472), 'mnist.train_utils.generate_classifier', 'train_utils.generate_classifier', (['config', 'dhandlers_class', 'device'], {}), '(config, dhandlers_class, device)\n', (40439, 40472), False, 'from mnist import train_utils\n'), ((3580, 3636), 'os.path.join', 'os.path.join', (['config.out_dir', 'hpsearch._SUMMARY_FILENAME'], {}), '(config.out_dir, hpsearch._SUMMARY_FILENAME)\n', (3592, 3636), False, 'import os\n'), ((18152, 18188), 'numpy.ceil', 'np.ceil', (['(config.batch_size / emb_num)'], {}), '(config.batch_size / emb_num)\n', (18159, 18188), True, 'import numpy as np\n'), ((22684, 22726), 'mnets.classifier_interface.Classifier.accuracy', 'Classifier.accuracy', (['Y_hat_ls', 'all_targets'], {}), '(Y_hat_ls, all_targets)\n', (22703, 22726), False, 'from mnets.classifier_interface import Classifier\n'), ((25726, 25763), 'utils.hnet_regularizer.get_current_targets', 'hreg.get_current_targets', (['t', 'net_hnet'], {}), '(t, net_hnet)\n', (25750, 25763), True, 'import utils.hnet_regularizer as hreg\n'), ((27358, 27385), 'mnist.plotting._plotImages', '_plotImages', (['X_real', 'config'], {}), '(X_real, config)\n', (27369, 27385), False, 'from mnist.plotting import _plotImages\n'), ((30015, 30079), 'mnets.classifier_interface.Classifier.softmax_and_cross_entropy', 'Classifier.softmax_and_cross_entropy', (['Y_hat_logits', 'soft_targets'], {}), '(Y_hat_logits, soft_targets)\n', (30051, 30079), False, 'from mnets.classifier_interface import Classifier\n'), ((30179, 30237), 'mnets.classifier_interface.Classifier.softmax_and_cross_entropy', 'Classifier.softmax_and_cross_entropy', (['Y_hat_logits', 'T_real'], {}), '(Y_hat_logits, T_real)\n', (30215, 30237), False, 'from mnets.classifier_interface import Classifier\n'), ((32363, 32393), 'torch.nn.functional.softmax', 'F.softmax', (['Y_hat_logits'], {'dim': '(1)'}), '(Y_hat_logits, dim=1)\n', (32372, 32393), True, 'import torch.nn.functional as F\n'), ((35577, 35638), 'mnist.replay.train_replay.init_plotting_embedding', 'init_plotting_embedding', (['dhandlers_rp', 'd_hnet', 'writer', 'config'], {}), '(dhandlers_rp, d_hnet, writer, config)\n', (35600, 35638), False, 'from mnist.replay.train_replay import train_vae_one_t, init_plotting_embedding\n'), ((14364, 14384), 'torch.cat', 'torch.cat', (['Y_hat_all'], {}), '(Y_hat_all)\n', (14373, 14384), False, 'import torch\n'), ((14409, 14425), 'torch.cat', 'torch.cat', (['T_all'], {}), '(T_all)\n', (14418, 14425), False, 'import torch\n'), ((18798, 18857), 'mnist.replay.train_gan.sample', 'sample_gan', (['dec', 'd_hnet', 'config', 're', 'device'], {'bs': 'bs_per_task'}), '(dec, d_hnet, config, re, device, bs=bs_per_task)\n', (18808, 18857), True, 'from mnist.replay.train_gan import sample as sample_gan, train_gan_one_t\n'), ((18937, 18996), 'mnist.replay.train_replay.sample', 'sample_vae', (['dec', 'd_hnet', 'config', 're', 'device'], {'bs': 'bs_per_task'}), '(dec, d_hnet, config, re, device, bs=bs_per_task)\n', (18947, 18996), True, 'from mnist.replay.train_replay import sample as sample_vae\n'), ((19168, 19208), 'mnist.plotting._plotImages', '_plotImages', (['X_fake', 'config', 'bs_per_task'], {}), '(X_fake, config, bs_per_task)\n', (19179, 19208), False, 'from mnist.plotting import _plotImages\n'), ((21939, 21980), 'torch.sigmoid', 'torch.sigmoid', (['target_logits[:, 0:t * od]'], {}), '(target_logits[:, 0:t * od])\n', (21952, 21980), False, 'import torch\n'), ((26201, 26262), 'numpy.ceil', 'np.ceil', (['(dhandler_class.num_train_samples / config.batch_size)'], {}), '(dhandler_class.num_train_samples / config.batch_size)\n', (26208, 26262), True, 'import numpy as np\n'), ((31082, 31198), 'utils.optim_step.calc_delta_theta', 'opstep.calc_delta_theta', (['optimizer', 'config.use_sgd_change'], {'lr': 'config.class_lr', 'detach_dt': '(not config.backprop_dt)'}), '(optimizer, config.use_sgd_change, lr=config.\n class_lr, detach_dt=not config.backprop_dt)\n', (31105, 31198), True, 'import utils.optim_step as opstep\n'), ((31335, 31525), 'utils.hnet_regularizer.calc_fix_target_reg', 'hreg.calc_fix_target_reg', (['net_hnet', 't'], {'targets': 'targets_C', 'mnet': 'net', 'dTheta': 'dTheta', 'dTembs': 'None', 'prev_theta': 'prev_theta', 'prev_task_embs': 'prev_task_embs', 'batch_size': 'hnet_reg_batch_size'}), '(net_hnet, t, targets=targets_C, mnet=net, dTheta=\n dTheta, dTembs=None, prev_theta=prev_theta, prev_task_embs=\n prev_task_embs, batch_size=hnet_reg_batch_size)\n', (31359, 31525), True, 'import utils.hnet_regularizer as hreg\n'), ((32428, 32462), 'mnets.classifier_interface.Classifier.accuracy', 'Classifier.accuracy', (['Y_hat', 'T_real'], {}), '(Y_hat, T_real)\n', (32447, 32462), False, 'from mnets.classifier_interface import Classifier\n'), ((13122, 13152), 'torch.nn.functional.softmax', 'F.softmax', (['Y_hat_logits'], {'dim': '(1)'}), '(Y_hat_logits, dim=1)\n', (13131, 13152), True, 'import torch.nn.functional as F\n'), ((14987, 15024), 'mnets.classifier_interface.Classifier.accuracy', 'Classifier.accuracy', (['Y_hat_all', 'T_all'], {}), '(Y_hat_all, T_all)\n', (15006, 15024), False, 'from mnets.classifier_interface import Classifier\n'), ((20270, 20321), 'torch.zeros', 'torch.zeros', (['target_logits[:, 0:(t + 1) * od].shape'], {}), '(target_logits[:, 0:(t + 1) * od].shape)\n', (20281, 20321), False, 'import torch\n'), ((21200, 21228), 'torch.sigmoid', 'torch.sigmoid', (['target_logits'], {}), '(target_logits)\n', (21213, 21228), False, 'import torch\n'), ((21320, 21346), 'torch.max', 'torch.max', (['soft_targets', '(1)'], {}), '(soft_targets, 1)\n', (21329, 21346), False, 'import torch\n'), ((22077, 22103), 'torch.max', 'torch.max', (['soft_targets', '(1)'], {}), '(soft_targets, 1)\n', (22086, 22103), False, 'import torch\n'), ((28014, 28059), 'torch.zeros', 'torch.zeros', (['(config.batch_size, task_out[1])'], {}), '((config.batch_size, task_out[1]))\n', (28025, 28059), False, 'import torch\n'), ((36891, 36983), 'mnist.replay.train_gan.train_gan_one_t', 'train_gan_one_t', (['dhandlers_rp[t]', 'enc', 'dec', 'd_hnet', 'device', 'config', 'writer', 'embd_list', 't'], {}), '(dhandlers_rp[t], enc, dec, d_hnet, device, config, writer,\n embd_list, t)\n', (36906, 36983), False, 'from mnist.replay.train_gan import sample as sample_gan, train_gan_one_t\n'), ((37058, 37150), 'mnist.replay.train_replay.train_vae_one_t', 'train_vae_one_t', (['dhandlers_rp[t]', 'enc', 'dec', 'd_hnet', 'device', 'config', 'writer', 'embd_list', 't'], {}), '(dhandlers_rp[t], enc, dec, d_hnet, device, config, writer,\n embd_list, t)\n', (37073, 37150), False, 'from mnist.replay.train_replay import train_vae_one_t, init_plotting_embedding\n'), ((9964, 10007), 'torch.cat', 'torch.cat', (['[zeros1, T_real, zeros2]'], {'dim': '(-1)'}), '([zeros1, T_real, zeros2], dim=-1)\n', (9973, 10007), False, 'import torch\n'), ((10481, 10505), 'torch.sigmoid', 'torch.sigmoid', (['task_pred'], {}), '(task_pred)\n', (10494, 10505), False, 'import torch\n'), ((10547, 10570), 'torch.max', 'torch.max', (['task_pred', '(1)'], {}), '(task_pred, 1)\n', (10556, 10570), False, 'import torch\n'), ((21779, 21830), 'torch.zeros', 'torch.zeros', (['target_logits[:, 0:(t + 1) * od].shape'], {}), '(target_logits[:, 0:(t + 1) * od].shape)\n', (21790, 21830), False, 'import torch\n'), ((29797, 29823), 'torch.Tensor', 'torch.Tensor', (['[soft_label]'], {}), '([soft_label])\n', (29809, 29823), False, 'import torch\n'), ((29875, 29927), 'torch.Tensor', 'torch.Tensor', (['[(1 - soft_label) / (num_classes - 1)]'], {}), '([(1 - soft_label) / (num_classes - 1)])\n', (29887, 29927), False, 'import torch\n'), ((8572, 8623), 'torch.zeros', 'torch.zeros', (['(Y_dummies.shape[0], config.num_tasks)'], {}), '((Y_dummies.shape[0], config.num_tasks))\n', (8583, 8623), False, 'import torch\n'), ((21253, 21280), 'torch.zeros', 'torch.zeros', (['Y_hat_ls.shape'], {}), '(Y_hat_ls.shape)\n', (21264, 21280), False, 'import torch\n'), ((9689, 9730), 'torch.zeros', 'torch.zeros', (['Y_dummies[:, 0:t * od].shape'], {}), '(Y_dummies[:, 0:t * od].shape)\n', (9700, 9730), False, 'import torch\n'), ((9798, 9864), 'torch.zeros', 'torch.zeros', (['Y_dummies[:, 0:(config.num_tasks - 1 - t) * od].shape'], {}), '(Y_dummies[:, 0:(config.num_tasks - 1 - t) * od].shape)\n', (9809, 9864), False, 'import torch\n'), ((11642, 11716), 'torch.nn.functional.softmax', 'F.softmax', (['(Y_hat_logits[:, task_out[0]:task_out[1]] / config.soft_temp)', '(-1)'], {}), '(Y_hat_logits[:, task_out[0]:task_out[1]] / config.soft_temp, -1)\n', (11651, 11716), True, 'import torch.nn.functional as F\n'), ((11947, 11969), 'torch.stack', 'torch.stack', (['entropies'], {}), '(entropies)\n', (11958, 11969), False, 'import torch\n'), ((28814, 28866), 'torch.zeros', 'torch.zeros', (['(config.batch_size, t * config.out_dim)'], {}), '((config.batch_size, t * config.out_dim))\n', (28825, 28866), False, 'import torch\n'), ((11824, 11840), 'torch.log', 'torch.log', (['Y_hat'], {}), '(Y_hat)\n', (11833, 11840), False, 'import torch\n')] |
################################################################
# The contents of this file are subject to the BSD 3Clause (New) License
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is <NAME> and <NAME>,
# Portions created by <NAME> are Copyright (C) 2006-2015
# All Rights Reserved.
# Contributor(s): ______________________________________.
################################################################
"""
This module provides a few probability utility functions, most of which are used in the
high level CCD and CMOS staring array model pyradi.rystare.
One of the utility functions provide a packaged version of scikit learn's
kernel density estimation tool to provide a better estimate than does a
simple histogram.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__version__= ""
__author__='<NAME> and <NAME>'
__all__=['distribution_exp','distribution_lognormal','distribution_inversegauss','distribution_logistic',
'distribution_wald','distributions_generator','validateParam','checkParamsNum']
import sys
import numpy as np
import re
######################################################################################
def KDEbounded(x_d,x,bandwidth=np.nan,lowbnd=np.nan,uppbnd=np.nan,kernel = 'gaussian'):
"""Estimate the probability by Kernel Density Estimation
If bandwidth is np.nan, calculate the optimal kernel width, aka bandwidth.
Be careful, this can take a while.
Mirrors the data at either or both of the edges if the domain is bounded.
Args:
| x_d (np.array[N,]): domain over which values must be returned
| x (np.array[N,]): input ample data set
| bandwidth (float): the bandwidth width to be used, np.nan if to be calculated
| lowbnd (float): lower mirror fold boundary, np.nan means no lower bound and mirror
| uppbnd (float): upper mirror fold boundary, np.nan means no upper bound and mirror
| kernel (str): kernel to be used ['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Returns:
| x_d (np.array[N,]): input vector used as domain for the calculations
| x (np.array[N,]): probability density over x_d, the range of the PDF
| bandwidth (float): bandwidth used in the KDE
| kernel (str): kernel used
Raises:
| No exception is raised.
See here for more detail and examples:
https://github.com/NelisW/PythonNotesToSelf/tree/master/KernelDensityEstimation
Other references:
https://jakevdp.github.io/PythonDataScienceHandbook/05.13-kernel-density-estimation.html#Motivating-KDE:-Histograms
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
https://scikit-learn.org/stable/auto_examples/neighbors/plot_kde_1d.html
https://stats.stackexchange.com/questions/405357/how-to-choose-the-bandwidth-of-a-kde-in-python
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
https://stats.stackexchange.com/questions/405357/how-to-choose-the-bandwidth-of-a-kde-in-python
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation
https://towardsdatascience.com/how-to-find-probability-from-probability-density-plots-7c392b218bab
https://github.com/admond1994/calculate-probability-from-probability-density-plots/blob/master/cal_probability.ipynb
"""
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
# find optimal bandwidth if not supplied
if np.isnan(bandwidth):
bandwidths = 10 ** np.linspace(-1, 1, 100)
kd = KernelDensity(kernel=kernel)
grid = GridSearchCV(kd,param_grid={'bandwidth': bandwidths},
cv=LeaveOneOut())
grid.fit(x[:, None]); # create additional axes of length one
bandwidth = grid.best_params_['bandwidth']
X = []
Xo = []
# base data, and if required lower flipped, upper flipped
X.append(x)
if not np.isnan(lowbnd):
X.append(-x + 2 * lowbnd)
if not np.isnan(uppbnd):
X.append(-x + 2 * uppbnd)
# do for base, and if present lower and upper flipped
for i,x in enumerate(X):
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel)
kde.fit(x[:, None]) # create additional axes of length one
# score_samples returns the log of the probability density
prob = np.exp(kde.score_samples(x_d[:, None])) # create additional axes of length one
Xo.append(prob)
# add base and flipped together
x = np.zeros_like(x_d)
for xi in Xo:
x += xi
# only cut out the base domain
if not np.isnan(lowbnd):
x = np.where(x_d<=lowbnd,0,x)
if not np.isnan(uppbnd):
x = np.where(x_d>=uppbnd,0,x)
return x_d,x,bandwidth, kernel
######################################################################################
def distribution_exp(distribParams, out, funcName):
r"""Exponential Distribution
This function is meant to be called via the `distributions_generator` function.
:math:`\textrm{pdf} = \lambda * \exp( -\lambda * y )`
:math:`\textrm{cdf} = 1 - \exp(-\lambda * y)`
- Mean = 1/lambda
- Variance = 1/lambda^2
- Mode = lambda
- Median = log(2)/lambda
- Skewness = 2
- Kurtosis = 6
GENERATING FUNCTION: :math:`T=-\log_e(U)/\lambda`
PARAMETERS: distribParams[0] is lambda - inverse scale or rate (lambda>0)
SUPPORT: y, y>= 0
CLASS: Continuous skewed distributions
NOTES: The discrete version of the Exponential distribution is
the Geometric distribution.
USAGE:
- y = randraw('exp', lambda, sampleSize) - generate sampleSize number of variates from the Exponential distribution with parameter 'lambda';
EXAMPLES:
1. y = randraw('exp', 1, [1 1e5]);
2. y = randraw('exp', 1.5, 1, 1e5);
3. y = randraw('exp', 2, 1e5 );
4. y = randraw('exp', 3, [1e5 1] );
SEE ALSO:
GEOMETRIC, GAMMA, POISSON, WEIBULL distributions
http://en.wikipedia.org/wiki/Exponential_distribution
"""
if distribParams is None or len(distribParams)==0:
distribParams = [1.]
if checkParamsNum(funcName,'Exponential','exp',distribParams,[1]):
_lambda = distribParams[0]
if validateParam(funcName,'Exponential','exp','lambda','lambda',_lambda,[str('> 0')]):
out = - np.log(np.random.rand(*out.shape)) / _lambda
return out
######################################################################################
def distribution_lognormal(distribParams, out, funcName):
"""THe Log-normal Distribution (sometimes: Cobb-Douglas or antilognormal distribution)
This function is meant to be called via the `distributions_generator` function.
pdf = 1/(y*sigma*sqrt(2*pi)) * exp(-1/2*((log(y)-mu)/sigma)^2)
cdf = 1/2*(1 + erf((log(y)-mu)/(sigma*sqrt(2))));
- Mean = exp( mu + sigma^2/2 );
- Variance = exp(2*mu+sigma^2)*( exp(sigma^2)-1 );
- Skewness = (exp(1)+2)*sqrt(exp(1)-1), for mu=0 and sigma=1;
- Kurtosis = exp(4) + 2*exp(3) + 3*exp(2) - 6; for mu=0 and sigma=1;
- Mode = exp(mu-sigma^2);
PARAMETERS: mu - location, sigma - scale (sigma>0)
SUPPORT: y, y>0
CLASS: Continuous skewed distribution
NOTES:
1. The LogNormal distribution is always right-skewed
2. Parameters mu and sigma are the mean and standard deviation of y in (natural) log space.
3. mu = log(mean(y)) - 1/2*log(1 + var(y)/(mean(y))^2)
4. sigma = sqrt( log( 1 + var(y)/(mean(y))^2) )
USAGE:
- randraw('lognorm', [], sampleSize) - generate sampleSize number
of variates from the standard Lognormal distribution with
location parameter mu=0 and scale parameter sigma=1
- randraw('lognorm', [mu, sigma], sampleSize) - generate sampleSize number
of variates from the Lognormal distribution with
location parameter 'mu' and scale parameter 'sigma'
EXAMPLES:
1. y = randraw('lognorm', [], [1 1e5]);
2. y = randraw('lognorm', [0, 4], 1, 1e5);
3. y = randraw('lognorm', [-1, 10.2], 1e5 );
4. y = randraw('lognorm', [3.2, 0.3], [1e5 1] );
"""
if distribParams is None or len(distribParams)==0:
distribParams = [0., 1.]
if checkParamsNum(funcName,'Lognormal','lognorm',distribParams,[0,2]):
mu = distribParams[0]
sigma = distribParams[1]
if validateParam(funcName,'Lognormal','lognorm','[mu, sigma]','sigma',sigma,[str('> 0')]):
out = np.exp(mu + sigma * np.random.randn(*out.shape))
return out
######################################################################################
def distribution_inversegauss(distribParams, out, funcName):
"""The Inverse Gaussian Distribution
This function is meant to be called via the `distributions_generator` function.
The Inverse Gaussian distribution is left skewed distribution whose
location is set by the mean with the profile determined by the
scale factor. The random variable can take a value between zero and
infinity. The skewness increases rapidly with decreasing values of
the scale parameter.
pdf(y) = sqrt(_lambda/(2*pi*y^3)) * exp(-_lambda./(2*y).*(y/mu-1).^2)
cdf(y) = normcdf(sqrt(_lambda./y).*(y/mu-1)) + exp(2*_lambda/mu)*normcdf(sqrt(_lambda./y).*(-y/mu-1))
where normcdf(x) = 0.5*(1+erf(y/sqrt(2))); is the standard normal CDF
- Mean = mu
- Variance = mu^3/_lambda
- Skewness = sqrt(9*mu/_lambda)
- Kurtosis = 15*mean/scale
- Mode = mu/(2*_lambda)*(sqrt(9*mu^2+4*_lambda^2)-3*mu)
PARAMETERS: mu - location; (mu>0), _lambda - scale; (_lambda>0)
SUPPORT: y, y>0
CLASS: Continuous skewed distribution
NOTES:
1. There are several alternate forms for the PDF, some of which have more than two parameters
2. The Inverse Gaussian distribution is often called the Inverse Normal
3. Wald distribution is a special case of The Inverse Gaussian distribution where the mean is a
constant with the value one.
4. The Inverse Gaussian distribution is a special case of The Generalized Hyperbolic Distribution
USAGE:
- randraw('ig', [mu, _lambda], sampleSize) - generate sampleSize number of variates
from the Inverse Gaussian distribution with parameters mu and _lambda;
EXAMPLES:
1. y = randraw('ig', [0.1, 1], [1 1e5]);
2. y = randraw('ig', [3.2, 10], 1, 1e5);
3. y = randraw('ig', [100.2, 6], 1e5 );
4. y = randraw('ig', [10, 10.5], [1e5 1] );
SEE ALSO: WALD distribution
Method:
There is an efficient procedure that utilizes a transformation yielding two roots.
If Y is Inverse Gauss random variable, then following [1] we can write:
V = _lambda*(Y-mu)^2/(Y*mu^2) ~ Chi-Square(1)
i.e. V is distributed as a _lambda-square random variable with one degree of freedom.
So it can be simply generated by taking a square of a standard normal random number.
Solving this equation for Y yields two roots:
y1 = mu + 0.5*mu/_lambda * ( mu*V - sqrt(4*mu*_lambda*V + mu^2*V.^2) );
and
y2 = mu^2/y1;
In [2] showed that Y can be simulated by choosing y1 with probability
mu/(mu+y1) and y2 with probability 1-mu/(mu+y1)
References:
[1] <NAME>. (1968). On the Inverse Gaussian Distribution Function, Journal of the American
Statistical Association 63: 1514-1516.
[2] <NAME>., <NAME>. and <NAME>. (1976). Generating Random Variates Using
Transformations with Multiple Roots, The American Statistician 30: 88-90.
http://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
"""
if distribParams is None or len(distribParams)==0:
distribParams = [0., 1.]
if checkParamsNum(funcName,'Inverse Gaussian','ig',distribParams,[2]):
mu = distribParams[0]
_lambda = distribParams[1]
if validateParam(funcName,'Inverse Gaussian','ig','[mu, _lambda]','mu',mu,[str('> 0')]) & \
validateParam(funcName,'Inverse Gaussian','ig','[mu, _lambda]','_lambda',_lambda,[str('> 0')]):
chisq1 = np.random.randn(*out.shape) ** 2
out = mu + 0.5 * mu / _lambda * (mu * chisq1 - np.sqrt(4 * mu * _lambda * chisq1 + mu ** 2 * chisq1 ** 2))
l = np.random.rand(*out.shape) >= mu / (mu + out)
out[l] = mu ** 2.0 / out[l]
return out
######################################################################################
def distribution_logistic(distribParams, out, funcName):
"""The Logistic Distribution
This function is meant to be called via the `distributions_generator` function.
The logistic distribution is a symmetrical bell shaped distribution.
One of its applications is an alternative to the Normal distribution
when a higher proportion of the population being modeled is
distributed in the tails.
pdf(y) = exp((y-a)/k)./(k*(1+exp((y-a)/k)).^2)
cdf(y) = 1 ./ (1+exp(-(y-a)/k))
- Mean = a
- Variance = k^2*pi^2/3
- Skewness = 0
- Kurtosis = 1.2
PARAMETERS: a - location, k - scale (k>0);
SUPPORT: y, -Inf < y < Inf
CLASS: Continuous symmetric distribution
USAGE:
- randraw('logistic', [], sampleSize) - generate sampleSize number of variates from the
standard Logistic distribution with location parameter a=0 and scale parameter k=1;
- Logistic distribution with location parameter 'a' and scale parameter 'k';
EXAMPLES:
1. y = randraw('logistic', [], [1 1e5]);
2. y = randraw('logistic', [0, 4], 1, 1e5);
3. y = randraw('logistic', [-1, 10.2], 1e5 );
4. y = randraw('logistic', [3.2, 0.3], [1e5 1] );
Method:
Inverse CDF transformation method.
http://en.wikipedia.org/wiki/Logistic_distribution
"""
if distribParams is None or len(distribParams)==0:
distribParams = [0., 1.]
if checkParamsNum(funcName,'Logistic','logistic',distribParams,[0,2]):
a = distribParams[0]
k = distribParams[1]
if validateParam(funcName,'Laplace','laplace','[a, k]','k',k,[str('> 0')]):
u1 = np.random.rand(*out.shape)
out = a - k * np.log(1.0 / u1 - 1)
return out
######################################################################################
def distribution_wald(distribParams, out, funcName):
"""The Wald Distribution
This function is meant to be called via the `distributions_generator` function.
The Wald distribution is as special case of the Inverse Gaussian Distribution
where the mean is a constant with the value one.
pdf = sqrt(chi/(2*pi*y^3)) * exp(-chi./(2*y).*(y-1).^2);
- Mean = 1
- Variance = 1/chi
- Skewness = sqrt(9/chi)
- Kurtosis = 3+ 15/scale
PARAMETERS: chi - scale parameter; (chi>0)
SUPPORT: y, y>0
CLASS: Continuous skewed distributions
USAGE:
- randraw('wald', chi, sampleSize) - generate sampleSize number of variates from the
Wald distribution with scale parameter 'chi';
EXAMPLES:
1. y = randraw('wald', 0.5, [1 1e5]);
2. y = randraw('wald', 1, 1, 1e5);
3. y = randraw('wald', 1.5, 1e5 );
4. y = randraw('wald', 2, [1e5 1] );
"""
if distribParams is None or len(distribParams)==0:
distribParams = [0.]
if checkParamsNum(funcName,'Wald','wald',distribParams,[1]):
chi = distribParams[0]
if validateParam(funcName,'Wald','wald','chi','chi',chi,[str('> 0')]):
# out = feval_(funcName,'ig',[1,chi],*out.shape)
out = distributions_generator('ig', [1, chi], sampleSize=out.shape)
return out
######################################################################################
def distributions_generator(distribName=None, distribParams=None, sampleSize=None):
"""The routine contains various models for simulation of FPN (DSNU or PRNU).
This function allows the user to select the distribution by name and pass requisite
parameters in a list (which differs for different distrubutions). The size of the
distribution is defined by a scalar or list.
sampleSize follows Matlab conventions:
- if None then return a single scalar value
- if scalar int N then return NxN array
- if tuple then return tuple-sized array
Possible values for distribName:
| 'exp','exponential'
| 'lognorm','lognormal','cobbdouglas','antilognormal'
| 'ig', 'inversegauss', 'invgauss'
| 'logistic'
| 'wald'
Args:
| distribName (string): required distribution name
| distribParams ([float]): list of distribution parameters (see below)
| sampleSize (None,int,[int,int]): Size of the returned random set
Returns:
| out (float, np.array[N,M]): set of random variables for selected distribution.
Raises:
| No exception is raised.
The routine set generates various types of random distributions, and is based on the
code randraw by <NAME> & <NAME>
These programs are distributed in the hope that they will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
Author: <NAME>, comments to <EMAIL>
"""
funcName = distributions_generator.__name__
#remove spaces from distribution name
# print(distribName)
pattern = re.compile(r'\s+')
distribNameInner = re.sub(pattern, '', distribName).lower()
if type(sampleSize) is int:
out = np.zeros((sampleSize, sampleSize))
elif type(sampleSize) is list or type(sampleSize) is tuple:
out = np.zeros(sampleSize)
else:
out = np.zeros(1)
if distribName is not None:
if distribNameInner in ['exp','exponential']:
out = distribution_exp(distribParams=None, out=out, funcName=funcName)
elif distribNameInner in ['lognorm','lognormal','cobbdouglas','antilognormal']:
out = distribution_lognormal(distribParams=None, out=out, funcName=funcName)
elif distribNameInner in ['ig', 'inversegauss', 'invgauss']:
out = distribution_inversegauss(distribParams=None, out=out, funcName=funcName)
elif distribNameInner in ['logistic']:
out = distribution_logistic(distribParams=None, out=out, funcName=funcName)
elif distribNameInner in ['wald']:
out = distribution_wald(distribParams=None, out=out, funcName=funcName)
else:
print('\n distributions_generator: Unknown distribution name: {}/{} \n'.format(distribName, distribNameInner))
if out.shape == (1,):
out = out[0]
return out
##########################################################################################3
def validateParam(funcName=None, distribName=None, runDistribName=None, distribParamsName=None, paramName=None, param=None, conditionStr=None):
"""Validate the range and number of parameters
Args:
| funcName (string): distribution name
| distribName (string): distribution name
| runDistribName (string): run distribution name
| distribParamsName
| paramName
| param
| conditionStr
Returns:
| True if the requirements are matched
Raises:
| No exception is raised.
"""
import math
condLogical = True
eqCondStr = ''
for i,strn in enumerate(conditionStr):
if i == 0:
eqCondStr = eqCondStr + conditionStr[i]
else:
eqCondStr = eqCondStr + ' and ' + conditionStr[i]
eqCond = conditionStr[i][0:2]
# print('{} {} '.format(conditionStr[i], eqCond), end='')
#remove spaces
pattern = re.compile(r'\s+')
eqCond = re.sub(pattern, '', eqCond)
# print('{}'.format(eqCond))
# print(eqCond)
# funcName=funcName, distribName='Wald', runDistribName='wald', distribParamsName='chi', paramName='chi', param=chi, conditionStr[i]='> 0')
if eqCond in ['<']:
condLogical &= param < float(conditionStr[i][2:])
elif eqCond in ['<=']:
condLogical &= param <= float(conditionStr[i][2:])
elif eqCond in ['>']:
condLogical &= param > float(conditionStr[i][2:])
elif eqCond in ['>=']:
condLogical &= param >= float(conditionStr[i][2:])
elif eqCond in ['!=']:
condLogical &= param != float(conditionStr[i][2:])
elif eqCond in ['==']:
if 'integer' in conditionStr[i][2:]:
condLogical &= param == math.floor_(param)
else:
condLogical &= param == math.float(conditionStr[i][2:])
if not condLogical:
print('{} Variates Generation: {}({}, {});\n Parameter {} should be {}\n (run {} ({}) for help)'.format(distribName,
funcName,runDistribName,distribParamsName,paramName,eqCondStr,funcName,runDistribName))
return condLogical
##########################################################################################3
def checkParamsNum(funcName,distribName,runDistribName,distribParams,correctNum):
"""See if the correct number of parameters was supplied. More than one number may apply
Args:
| funcName (string): distribution name
| distribName (string): distribution name
| distribParams ([float]): list of distribution parameters (see below)
| correctNum ([int]): list with the possible numbers of parameters
Returns:
| True if the requirements are matched
Raises:
| No exception is raised.
"""
proceed = True
if not len(distribParams) in correctNum:
print('{} Variates Generation:\n {} {} {} {} {}'.format(distribName,
'Wrong number of parameters (run ', funcName, "('", runDistribName, "') for help) "))
proceed = False
# print(distribParams, correctNum, proceed)
return proceed
################################################################
################################################################
##
## confirm the correctness of the functions
if __name__ == '__main__':
import datetime as dt
import ryutils
rit = ryutils.intify_tuple
doAll = False
#no tests at this time
| [
"sklearn.model_selection.LeaveOneOut",
"numpy.sqrt",
"numpy.random.rand",
"re.compile",
"numpy.where",
"numpy.log",
"math.float",
"sklearn.neighbors.KernelDensity",
"math.floor_",
"numpy.zeros",
"numpy.linspace",
"numpy.isnan",
"numpy.random.randn",
"re.sub",
"numpy.zeros_like"
] | [((4176, 4195), 'numpy.isnan', 'np.isnan', (['bandwidth'], {}), '(bandwidth)\n', (4184, 4195), True, 'import numpy as np\n'), ((5252, 5270), 'numpy.zeros_like', 'np.zeros_like', (['x_d'], {}), '(x_d)\n', (5265, 5270), True, 'import numpy as np\n'), ((18241, 18259), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (18251, 18259), False, 'import re\n'), ((4261, 4289), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': 'kernel'}), '(kernel=kernel)\n', (4274, 4289), False, 'from sklearn.neighbors import KernelDensity\n'), ((4644, 4660), 'numpy.isnan', 'np.isnan', (['lowbnd'], {}), '(lowbnd)\n', (4652, 4660), True, 'import numpy as np\n'), ((4707, 4723), 'numpy.isnan', 'np.isnan', (['uppbnd'], {}), '(uppbnd)\n', (4715, 4723), True, 'import numpy as np\n'), ((4905, 4954), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': 'bandwidth', 'kernel': 'kernel'}), '(bandwidth=bandwidth, kernel=kernel)\n', (4918, 4954), False, 'from sklearn.neighbors import KernelDensity\n'), ((5352, 5368), 'numpy.isnan', 'np.isnan', (['lowbnd'], {}), '(lowbnd)\n', (5360, 5368), True, 'import numpy as np\n'), ((5382, 5411), 'numpy.where', 'np.where', (['(x_d <= lowbnd)', '(0)', 'x'], {}), '(x_d <= lowbnd, 0, x)\n', (5390, 5411), True, 'import numpy as np\n'), ((5419, 5435), 'numpy.isnan', 'np.isnan', (['uppbnd'], {}), '(uppbnd)\n', (5427, 5435), True, 'import numpy as np\n'), ((5449, 5478), 'numpy.where', 'np.where', (['(x_d >= uppbnd)', '(0)', 'x'], {}), '(x_d >= uppbnd, 0, x)\n', (5457, 5478), True, 'import numpy as np\n'), ((18375, 18409), 'numpy.zeros', 'np.zeros', (['(sampleSize, sampleSize)'], {}), '((sampleSize, sampleSize))\n', (18383, 18409), True, 'import numpy as np\n'), ((20588, 20606), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (20598, 20606), False, 'import re\n'), ((20624, 20651), 're.sub', 're.sub', (['pattern', '""""""', 'eqCond'], {}), "(pattern, '', eqCond)\n", (20630, 20651), False, 'import re\n'), ((4224, 4247), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (4235, 4247), True, 'import numpy as np\n'), ((14939, 14965), 'numpy.random.rand', 'np.random.rand', (['*out.shape'], {}), '(*out.shape)\n', (14953, 14965), True, 'import numpy as np\n'), ((18283, 18315), 're.sub', 're.sub', (['pattern', '""""""', 'distribName'], {}), "(pattern, '', distribName)\n", (18289, 18315), False, 'import re\n'), ((18488, 18508), 'numpy.zeros', 'np.zeros', (['sampleSize'], {}), '(sampleSize)\n', (18496, 18508), True, 'import numpy as np\n'), ((18533, 18544), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (18541, 18544), True, 'import numpy as np\n'), ((4390, 4403), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (4401, 4403), False, 'from sklearn.model_selection import LeaveOneOut\n'), ((12883, 12910), 'numpy.random.randn', 'np.random.randn', (['*out.shape'], {}), '(*out.shape)\n', (12898, 12910), True, 'import numpy as np\n'), ((13051, 13077), 'numpy.random.rand', 'np.random.rand', (['*out.shape'], {}), '(*out.shape)\n', (13065, 13077), True, 'import numpy as np\n'), ((14992, 15012), 'numpy.log', 'np.log', (['(1.0 / u1 - 1)'], {}), '(1.0 / u1 - 1)\n', (14998, 15012), True, 'import numpy as np\n'), ((7112, 7138), 'numpy.random.rand', 'np.random.rand', (['*out.shape'], {}), '(*out.shape)\n', (7126, 7138), True, 'import numpy as np\n'), ((9284, 9311), 'numpy.random.randn', 'np.random.randn', (['*out.shape'], {}), '(*out.shape)\n', (9299, 9311), True, 'import numpy as np\n'), ((12975, 13033), 'numpy.sqrt', 'np.sqrt', (['(4 * mu * _lambda * chisq1 + mu ** 2 * chisq1 ** 2)'], {}), '(4 * mu * _lambda * chisq1 + mu ** 2 * chisq1 ** 2)\n', (12982, 13033), True, 'import numpy as np\n'), ((21460, 21478), 'math.floor_', 'math.floor_', (['param'], {}), '(param)\n', (21471, 21478), False, 'import math\n'), ((21537, 21568), 'math.float', 'math.float', (['conditionStr[i][2:]'], {}), '(conditionStr[i][2:])\n', (21547, 21568), False, 'import math\n')] |
import pypsa, os
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
network = pypsa.Network()
folder_name = "ac-dc-data"
network.import_from_csv_folder(folder_name)
network.lopf(network.snapshots)
fig, ax = plt.subplots(subplot_kw={'projection': ccrs.EqualEarth()},
figsize=(5,5))
line_colors = network.lines.bus0.map(network.buses.carrier)\
.replace({'AC': 'indianred', 'DC': 'limegreen'})
network.plot(bus_colors='grey', ax=ax,
margin=.5, line_widths={'Line':2., 'Link':0},
line_colors=line_colors,
geomap='10m', title='Mixed AC-DC (red - green) network',
# flow='mean',
color_geomap=True)
fig.canvas.draw(); fig.tight_layout()
fig.savefig('ac_dc_meshed.png')
for sn in network.sub_networks.obj:
print(sn,network.sub_networks.at[sn.name,"carrier"],len(sn.buses()),len(sn.branches()))
print("\nControllable branches:")
print(network.links)
now = network.snapshots[5]
print("\nCheck power balance at each bus:")
for bus in network.buses.index:
print("\n"*3+bus)
generators = sum(network.generators_t.p.loc[now,network.generators.bus==bus])
loads = sum(network.loads_t.p.loc[now,network.loads.bus==bus])
print("Generators:",generators)
print("Loads:",loads)
print("Total:",generators-loads)
p0 = 0.
p1 = 0.
for c in network.iterate_components(network.branch_components):
bs = (c.df.bus0 == bus)
if bs.any():
print(c,"\n",c.pnl.p0.loc[now,bs])
p0 += c.pnl.p0.loc[now,bs].sum()
bs = (c.df.bus1 == bus)
if bs.any():
print(c,"\n",c.pnl.p1.loc[now,bs])
p1 += c.pnl.p1.loc[now,bs].sum()
print("Branches",p0+p1)
np.testing.assert_allclose(generators-loads+1.,p0+p1+1.)
print("")
print(sum(network.generators_t.p.loc[now]))
print(sum(network.loads_t.p.loc[now]))
results_folder_name = os.path.join(folder_name,"results-lopf")
if True:
network.export_to_csv_folder(results_folder_name)
| [
"os.path.join",
"cartopy.crs.EqualEarth",
"numpy.testing.assert_allclose",
"pypsa.Network"
] | [((106, 121), 'pypsa.Network', 'pypsa.Network', ([], {}), '()\n', (119, 121), False, 'import pypsa, os\n'), ((1949, 1990), 'os.path.join', 'os.path.join', (['folder_name', '"""results-lopf"""'], {}), "(folder_name, 'results-lopf')\n", (1961, 1990), False, 'import pypsa, os\n'), ((1769, 1836), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(generators - loads + 1.0)', '(p0 + p1 + 1.0)'], {}), '(generators - loads + 1.0, p0 + p1 + 1.0)\n', (1795, 1836), True, 'import numpy as np\n'), ((278, 295), 'cartopy.crs.EqualEarth', 'ccrs.EqualEarth', ([], {}), '()\n', (293, 295), True, 'import cartopy.crs as ccrs\n')] |
import numpy as np
import torch
"""
this file contains various functions for point cloud transformation,
some of which are not used in the clean version of code,
but feel free to use them if you have different forms of point clouds.
"""
def swap_axis(input_np, swap_mode='n210'):
"""
swap axis for point clouds with different canonical frame
e.g., pcl2pcl and MPC
"""
if swap_mode == '021':
output_np = np.stack([input_np[:,0], input_np[:,2], input_np[:,1]],axis=1)
elif swap_mode == 'n210':
output_np = np.stack([input_np[:,2]*(-1), input_np[:,1], input_np[:,0]],axis=1)
elif swap_mode == '210':
output_np = np.stack([input_np[:,2], input_np[:,1], input_np[:,0]],axis=1)
else:
raise NotImplementedError
return output_np
def scale_numpy(input_array, range=0.25,ax_wise=True):
"""
scale point cloud in the form of numpy array
"""
if ax_wise:
max_abs = np.max(np.abs(input_array),axis=0)
d0 = input_array[:,0] * (range/max_abs[0])
d1 = input_array[:,1] * (range/max_abs[1])
d2 = input_array[:,2] * (range/max_abs[2])
scaled_array = np.stack([d0,d1,d2], axis=1)
else:
"""
scale all dimension by the same value, ie the max(abs)
"""
max_abs = np.max(np.abs(input_array))
scaled_array = input_array * (range/max_abs)
return scaled_array
def scale_numpy_ls(input_ls, range=0.25):
"""
calling a list of point clouds
"""
output_ls = []
for itm in input_ls:
output = scale_numpy(itm, range=range)
output_ls.append(output)
return output_ls
def shift_numpy(input_array, mode='center',additional_limit=None):
"""
shift
"""
if mode == 'center':
ax_max = np.max(input_array,axis=0)
ax_min = np.min(input_array,axis=0)
ax_center = (ax_max+ax_min)/2
shifted_np = input_array - ax_center
elif mode == 'given_some_limit':
print(additional_limit)
if additional_limit[0] != 'yl':
raise NotImplementedError
ax_max = np.max(input_array,axis=0)
ax_min = np.min(input_array,axis=0)
ax_min[1] = additional_limit[1] # addtional step
ax_center = (ax_max+ax_min)/2
shifted_np = input_array - ax_center
else:
raise NotImplementedError # weighted center, pc_mean
return shifted_np
def shift_np_one_dim(input_array, dim=2):
max_dim = input_array.max(axis=0)
min_dim = input_array.min(axis=0)
mean_dim = (max_dim[dim]+min_dim[dim])/2
input_array[:,dim] -= mean_dim
return input_array
def downsample_numpy(input_np, points=1024,seed=0):
if input_np.shape[0] <= points:
return input_np
else:
np.random.seed(seed)
indices = np.array(range(input_np.shape[0]))
np.random.shuffle(indices)
input_downsampled = input_np[indices[:points]]
return input_downsampled
def voxelize(image, n_bins=32, pcd_limit=0.5, threshold=0):
"""
voxelize a point cloud
"""
if isinstance(image, np.ndarray):
image = torch.from_numpy(image).unsqueeze(0)
pcd_new = image * n_bins + n_bins * 0.5
pcd_new = pcd_new.type(torch.int64)
ls_voxels = pcd_new.squeeze(0).tolist() # 2028 of sublists
try:
tuple_voxels = [tuple(itm) for itm in ls_voxels]
except:
import pdb; pdb.set_trace()
mask_dict = {}
for tuple_voxel in tuple_voxels:
if tuple_voxel not in mask_dict:
mask_dict[tuple_voxel] = 1
else:
mask_dict[tuple_voxel] += 1
for voxel, cnt in mask_dict.items():
if cnt <= threshold:
del mask_dict[voxel]
return mask_dict
def return_plot_range(pcd, plot_range):
"""
return a range of point cloud,
to plot Fig.3 in the main paper
"""
pcd_new = []
x1, x2 = plot_range[0]
y1, y2 = plot_range[1]
z1, z2 = plot_range[2]
for i in range(2048):
xs = pcd[i,0]
ys = pcd[i,2]
zs = pcd[i,1]
if x1 < xs < x2 and y1 < ys < y2 and z1 < zs < z2:
pcd_new.append(pcd[i])
pcd_new = np.stack(pcd_new)
return pcd_new
def reverse_normalize(pc, pc_CRN):
"""
scale up by m and relocate
"""
m = np.max(np.sqrt(np.sum(pc_CRN**2, axis=1)))
pc = pc * m
centroid = np.mean(pc_CRN, axis=0)
pc = pc + centroid
return pc
def remove_zeros(partial):
"""
remove zeros (masked) from a point cloud
"""
if isinstance(partial, np.ndarray):
partial = torch.from_numpy(partial)
norm = torch.norm(partial,dim=1)
idx = torch.where(norm > 0)
partial = partial[idx[0]]
return partial.numpy()
def retrieve_region(pcd, retrieve_range):
"""
retrieve a range
input: np.array (N,3)
"""
x1, x2 = retrieve_range[0]
y1, y2 = retrieve_range[1]
z1, z2 = retrieve_range[2]
points = []
for i in range(pcd.shape[0]):
xs = pcd[i,0]
ys = pcd[i,2]
zs = pcd[i,1]
if x1 < xs < x2 and y1 < ys < y2 and z1 < zs < z2:
points.append(pcd[i])
new_pcd = np.stack(points)
print('new_pcd shape',new_pcd.shape)
return new_pcd
| [
"numpy.mean",
"numpy.abs",
"numpy.random.shuffle",
"torch.from_numpy",
"numpy.max",
"numpy.stack",
"torch.norm",
"numpy.sum",
"numpy.random.seed",
"pdb.set_trace",
"numpy.min",
"torch.where"
] | [((4191, 4208), 'numpy.stack', 'np.stack', (['pcd_new'], {}), '(pcd_new)\n', (4199, 4208), True, 'import numpy as np\n'), ((4394, 4417), 'numpy.mean', 'np.mean', (['pc_CRN'], {'axis': '(0)'}), '(pc_CRN, axis=0)\n', (4401, 4417), True, 'import numpy as np\n'), ((4639, 4665), 'torch.norm', 'torch.norm', (['partial'], {'dim': '(1)'}), '(partial, dim=1)\n', (4649, 4665), False, 'import torch\n'), ((4676, 4697), 'torch.where', 'torch.where', (['(norm > 0)'], {}), '(norm > 0)\n', (4687, 4697), False, 'import torch\n'), ((5177, 5193), 'numpy.stack', 'np.stack', (['points'], {}), '(points)\n', (5185, 5193), True, 'import numpy as np\n'), ((434, 500), 'numpy.stack', 'np.stack', (['[input_np[:, 0], input_np[:, 2], input_np[:, 1]]'], {'axis': '(1)'}), '([input_np[:, 0], input_np[:, 2], input_np[:, 1]], axis=1)\n', (442, 500), True, 'import numpy as np\n'), ((1169, 1199), 'numpy.stack', 'np.stack', (['[d0, d1, d2]'], {'axis': '(1)'}), '([d0, d1, d2], axis=1)\n', (1177, 1199), True, 'import numpy as np\n'), ((1812, 1839), 'numpy.max', 'np.max', (['input_array'], {'axis': '(0)'}), '(input_array, axis=0)\n', (1818, 1839), True, 'import numpy as np\n'), ((1856, 1883), 'numpy.min', 'np.min', (['input_array'], {'axis': '(0)'}), '(input_array, axis=0)\n', (1862, 1883), True, 'import numpy as np\n'), ((2793, 2813), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2807, 2813), True, 'import numpy as np\n'), ((2875, 2901), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2892, 2901), True, 'import numpy as np\n'), ((4602, 4627), 'torch.from_numpy', 'torch.from_numpy', (['partial'], {}), '(partial)\n', (4618, 4627), False, 'import torch\n'), ((548, 619), 'numpy.stack', 'np.stack', (['[input_np[:, 2] * -1, input_np[:, 1], input_np[:, 0]]'], {'axis': '(1)'}), '([input_np[:, 2] * -1, input_np[:, 1], input_np[:, 0]], axis=1)\n', (556, 619), True, 'import numpy as np\n'), ((965, 984), 'numpy.abs', 'np.abs', (['input_array'], {}), '(input_array)\n', (971, 984), True, 'import numpy as np\n'), ((1320, 1339), 'numpy.abs', 'np.abs', (['input_array'], {}), '(input_array)\n', (1326, 1339), True, 'import numpy as np\n'), ((2130, 2157), 'numpy.max', 'np.max', (['input_array'], {'axis': '(0)'}), '(input_array, axis=0)\n', (2136, 2157), True, 'import numpy as np\n'), ((2174, 2201), 'numpy.min', 'np.min', (['input_array'], {'axis': '(0)'}), '(input_array, axis=0)\n', (2180, 2201), True, 'import numpy as np\n'), ((3430, 3445), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3443, 3445), False, 'import pdb\n'), ((4335, 4362), 'numpy.sum', 'np.sum', (['(pc_CRN ** 2)'], {'axis': '(1)'}), '(pc_CRN ** 2, axis=1)\n', (4341, 4362), True, 'import numpy as np\n'), ((665, 731), 'numpy.stack', 'np.stack', (['[input_np[:, 2], input_np[:, 1], input_np[:, 0]]'], {'axis': '(1)'}), '([input_np[:, 2], input_np[:, 1], input_np[:, 0]], axis=1)\n', (673, 731), True, 'import numpy as np\n'), ((3148, 3171), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (3164, 3171), False, 'import torch\n')] |
import numpy as np
from web.evaluate import calculate_purity, evaluate_categorization
from web.embedding import Embedding
from web.datasets.utils import _fetch_file
from web.datasets.categorization import fetch_ESSLI_2c
def test_purity():
y_true = np.array([1,1,2,2,3])
y_pred = np.array([2,2,2,2,1])
assert abs(0.6 - calculate_purity(y_true, y_pred)) < 1e-10
def test_categorization():
data = fetch_ESSLI_2c()
url = "https://www.dropbox.com/s/5occ4p7k28gvxfj/ganalogy-sg-wiki-en-400.bin?dl=1"
file_name = _fetch_file(url, "test")
w = Embedding.from_word2vec(file_name, binary=True)
assert evaluate_categorization(w, data.X, data.y, seed=777, method="all") >= 0.2 | [
"web.datasets.categorization.fetch_ESSLI_2c",
"web.datasets.utils._fetch_file",
"web.evaluate.calculate_purity",
"numpy.array",
"web.evaluate.evaluate_categorization",
"web.embedding.Embedding.from_word2vec"
] | [((253, 278), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 3]'], {}), '([1, 1, 2, 2, 3])\n', (261, 278), True, 'import numpy as np\n'), ((288, 313), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 1]'], {}), '([2, 2, 2, 2, 1])\n', (296, 313), True, 'import numpy as np\n'), ((412, 428), 'web.datasets.categorization.fetch_ESSLI_2c', 'fetch_ESSLI_2c', ([], {}), '()\n', (426, 428), False, 'from web.datasets.categorization import fetch_ESSLI_2c\n'), ((532, 556), 'web.datasets.utils._fetch_file', '_fetch_file', (['url', '"""test"""'], {}), "(url, 'test')\n", (543, 556), False, 'from web.datasets.utils import _fetch_file\n'), ((565, 612), 'web.embedding.Embedding.from_word2vec', 'Embedding.from_word2vec', (['file_name'], {'binary': '(True)'}), '(file_name, binary=True)\n', (588, 612), False, 'from web.embedding import Embedding\n'), ((624, 690), 'web.evaluate.evaluate_categorization', 'evaluate_categorization', (['w', 'data.X', 'data.y'], {'seed': '(777)', 'method': '"""all"""'}), "(w, data.X, data.y, seed=777, method='all')\n", (647, 690), False, 'from web.evaluate import calculate_purity, evaluate_categorization\n'), ((331, 363), 'web.evaluate.calculate_purity', 'calculate_purity', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (347, 363), False, 'from web.evaluate import calculate_purity, evaluate_categorization\n')] |
import argparse
import numpy as np
from squeezenet import SqueezeNet
import os
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
SIZE = 227
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint-path', required=True)
parser.add_argument('--image', nargs='+', required=True)
parser.add_argument('--num-classes', type=int, required=True)
args = parser.parse_args()
model = SqueezeNet(weights=None, classes=args.num_classes)
model.load_weights(args.checkpoint_path)
xs = []
for path in args.image:
img = image.load_img(path, target_size=(SIZE, SIZE))
x = image.img_to_array(img)
xs.append(x)
xs = np.array(xs)
xs = preprocess_input(xs)
probs = model.predict(xs)
print('')
for i, path in enumerate(args.image):
print('%s' % path)
print(' Prediction: %s' % np.argmax(probs[i]))
if __name__ == '__main__':
main()
| [
"keras.preprocessing.image.img_to_array",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.array",
"keras.applications.imagenet_utils.preprocess_input",
"squeezenet.SqueezeNet",
"keras.preprocessing.image.load_img"
] | [((218, 243), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (241, 243), False, 'import argparse\n'), ((475, 525), 'squeezenet.SqueezeNet', 'SqueezeNet', ([], {'weights': 'None', 'classes': 'args.num_classes'}), '(weights=None, classes=args.num_classes)\n', (485, 525), False, 'from squeezenet import SqueezeNet\n'), ((740, 752), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (748, 752), True, 'import numpy as np\n'), ((762, 782), 'keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['xs'], {}), '(xs)\n', (778, 782), False, 'from keras.applications.imagenet_utils import preprocess_input\n'), ((626, 672), 'keras.preprocessing.image.load_img', 'image.load_img', (['path'], {'target_size': '(SIZE, SIZE)'}), '(path, target_size=(SIZE, SIZE))\n', (640, 672), False, 'from keras.preprocessing import image\n'), ((685, 708), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (703, 708), False, 'from keras.preprocessing import image\n'), ((935, 954), 'numpy.argmax', 'np.argmax', (['probs[i]'], {}), '(probs[i])\n', (944, 954), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from rbergomi.rbergomi_utils import *
class rBergomi(object):
"""
Class for generating paths of the rBergomi model.
Integral equations for reference:
Y(t) := sqrt(2a + 1) int 0,t (t - u)^a dW(u)
V(t) := xi exp(eta Y - 0.5 eta^2 t^(2a + 1))
S(t) := S0 int 0,t sqrt(V) dB(u) - 0.5 V du
"""
def __init__(self, n=256, N=1024, T=1.0):
"""
Constructor for class.
"""
# Basic assignments.
self.n = n # Steps per year
self.N = N # Paths
self.T = T # Maturity
self.dt = 1.0/n # Step size
self.s = int(n*T) # Steps
self.t = np.linspace(0,T,1+self.s)[np.newaxis,:] # Time grid
def dW(self, α=0.4, β=-0.4, seed=0):
"""
.
"""
self.α = α
self.β = β
s = self.s
# Store required covariance matrices
cov1 = cov(α, self.n)
cov2 = cov(β, self.n)
chol1 = np.linalg.cholesky(cov1)[np.newaxis,np.newaxis,:,:]
chol2 = np.linalg.cholesky(cov2)[np.newaxis,np.newaxis,:,:]
# fn = 'sobol/'+str(seed)+'-'+str(self.N)+'-'+str(4*s)+'.csv'
# random_numbers = np.array(pd.read_csv(fn))
## SHOULD BE OUTSIDE CALIBRATION ROUTINE
np.random.seed(seed)
random_numbers = np.random.normal(size=(self.N,4*s))
# Obviously generalise
dB11 = random_numbers[:,0*s:1*s]
dB12 = random_numbers[:,1*s:2*s]
dB21 = random_numbers[:,2*s:3*s]
dB22 = random_numbers[:,3*s:4*s]
# Prepare for operations
dB1 = np.zeros((self.N,s,2,1))
dB2 = np.zeros((self.N,s,2,1))
dB1[:,:,0,0] = dB11
dB1[:,:,1,0] = dB12
dB2[:,:,0,0] = dB21
dB2[:,:,1,0] = dB22
# Finally, correlate in C-layer
dW1 = np.squeeze(np.matmul(chol1,dB1))
dW2 = np.squeeze(np.matmul(chol2,dB2))
dW = np.zeros((self.N,s,2,2))
dW[:,:,:,0] = dW1
dW[:,:,:,1] = dW2
return dW
# Should promote this for two dimensions given α, β use
def Y(self, dW, α):
"""
Constructs Volterra process from appropriately
correlated 2d Brownian increments.
"""
Y1 = np.zeros((self.N, 1 + self.s)) # Exact integral
Y2 = np.zeros((self.N, 1 + self.s)) # Riemann sum
# Construct Y1 through exact integral
# for i in range(1 + self.s):
# Use np.cumsum here? - must time this
# for i in np.arange(1, 1 + self.s, 1): # See (3.6)
# Y1[:,i] += dW[:,i-1,1] # Assumes kappa = 1
# Construct Y1 through exact integral
Y1[:,1:1+self.s] = dW[:,:self.s,1] # Assumes kappa = 1
# Construct arrays for convolution
Γ = np.zeros(1 + self.s) # Gamma
for k in np.arange(2, 1 + self.s, 1): # Assumes kappa = 1
Γ[k] = g(b(k, α)/self.n, α)
Ξ = dW[:,:,0] # Xi
# Initialise convolution result, GX
ΓΞ = np.zeros((self.N, len(Ξ[0,:]) + len(Γ) - 1))
# Compute convolution, FFT not used for small n
# Not able to compute all paths in C-layer
for i in range(self.N):
ΓΞ[i,:] = np.convolve(Γ, Ξ[i,:])
# Extract appropriate part of convolution
Y2 = ΓΞ[:,:1 + self.s]
# Finally contruct and return full process
Y = np.sqrt(2 * α + 1) * (Y1 + Y2)
return Y
# Yes should raise dimens
def V(self, Yα, Yβ, ξ=1.0, ζ=-0.5, η=1.5):
"""
rBergomi variance process.
SHOULD ALSO WRITE INTEGRATED PROCESS METHOD FOR EFFICIENT LATER USE.
"""
self.ξ = ξ
self.ζ = ζ
self.η = η
α = self.α
β = self.β
t = self.t
Vα = np.exp(ζ*Yα - 0.5*ζ**2 * t**(2*α+1))
Vβ = np.exp(η*Yβ - 0.5*η**2 * t**(2*β+1))
V = ξ * Vα * Vβ
return V
def S(self, V, dB):
"""
rBergomi price process.
"""
dt = self.dt
# Construct non-anticipative Riemann increments
increments = np.sqrt(V[:,:-1]) * dB - 0.5 * V[:,:-1] * dt
# Cumsum is actually a little slower than Python loop. Not terribly
integral = np.cumsum(increments, axis = 1)
S = np.zeros_like(V)
S[:,0] = 1.
S[:,1:] = np.exp(integral)
return S
def surface(self, S, surf):
"""
Provides the implied Black volatility surface for every option
implicitely in a Surface object.
"""
vec_bsinv = np.vectorize(bsinv)
indices = (surf.maturities * self.n).astype(int)
ST = S[:,indices][:,:,np.newaxis]
K = np.array(surf.strikes())[np.newaxis,:,:]
Δ = np.array(surf.forward_deltas())
T = surf.maturities[:,np.newaxis]
call_payoffs = np.maximum(ST - K,0) #- (1-Δ)*(ST - 1)
call_prices = np.mean(call_payoffs, axis=0)
call_vols = vec_bsinv(call_prices, 1., np.squeeze(K), T, ϕ=1)
put_payoffs = np.maximum(K - ST,0) #+ Δ*(ST - 1)
put_prices = np.mean(put_payoffs, axis=0)
put_vols = vec_bsinv(put_prices, 1., np.squeeze(K), T, ϕ=-1)
# don't think helpful when have control
vols = (call_vols + put_vols) / 2
return pd.DataFrame(vols, index=surf.tenors, columns=surf.deltas)
def cross_surface(self, X1, X2, surf):
"""
Provides surface for X3 := X1 / X2.
"""
vec_bsinv = np.vectorize(bsinv)
indices = (surf.maturities * self.n).astype(int)
X1T = X1[:,indices][:,:,np.newaxis]
X2T = X2[:,indices][:,:,np.newaxis]
X3T = X1T / X2T
K = np.array(surf.strikes())[np.newaxis,:,:]
# Try controlling with both X1 and X2
# Δ = np.array(surf.forward_deltas())
T = surf.maturities[:,np.newaxis]
# Make sure makes sense to have X2T
call_payoffs = X2T * np.maximum(X3T - K,0) #- (1-Δ)*(ST - 1)
call_prices = np.mean(call_payoffs, axis=0)
call_vols = vec_bsinv(call_prices, 1., np.squeeze(K), T, ϕ=1)
put_payoffs = X2T * np.maximum(K - X3T,0) #+ Δ*(ST - 1)
put_prices = np.mean(put_payoffs, axis=0)
put_vols = vec_bsinv(put_prices, 1., np.squeeze(K), T, ϕ=-1)
# don't think helpful when have control
vols = (call_vols + put_vols) / 2
return pd.DataFrame(vols, index=surf.tenors, columns=surf.deltas)
| [
"numpy.random.normal",
"numpy.mean",
"numpy.convolve",
"numpy.sqrt",
"numpy.squeeze",
"numpy.exp",
"numpy.linalg.cholesky",
"numpy.zeros",
"numpy.linspace",
"numpy.matmul",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.cumsum",
"numpy.maximum",
"numpy.zeros_like",
"numpy.arange",
"... | [((1276, 1296), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1290, 1296), True, 'import numpy as np\n'), ((1322, 1360), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.N, 4 * s)'}), '(size=(self.N, 4 * s))\n', (1338, 1360), True, 'import numpy as np\n'), ((1602, 1629), 'numpy.zeros', 'np.zeros', (['(self.N, s, 2, 1)'], {}), '((self.N, s, 2, 1))\n', (1610, 1629), True, 'import numpy as np\n'), ((1641, 1668), 'numpy.zeros', 'np.zeros', (['(self.N, s, 2, 1)'], {}), '((self.N, s, 2, 1))\n', (1649, 1668), True, 'import numpy as np\n'), ((1928, 1955), 'numpy.zeros', 'np.zeros', (['(self.N, s, 2, 2)'], {}), '((self.N, s, 2, 2))\n', (1936, 1955), True, 'import numpy as np\n'), ((2244, 2274), 'numpy.zeros', 'np.zeros', (['(self.N, 1 + self.s)'], {}), '((self.N, 1 + self.s))\n', (2252, 2274), True, 'import numpy as np\n'), ((2305, 2335), 'numpy.zeros', 'np.zeros', (['(self.N, 1 + self.s)'], {}), '((self.N, 1 + self.s))\n', (2313, 2335), True, 'import numpy as np\n'), ((2766, 2786), 'numpy.zeros', 'np.zeros', (['(1 + self.s)'], {}), '(1 + self.s)\n', (2774, 2786), True, 'import numpy as np\n'), ((2811, 2838), 'numpy.arange', 'np.arange', (['(2)', '(1 + self.s)', '(1)'], {}), '(2, 1 + self.s, 1)\n', (2820, 2838), True, 'import numpy as np\n'), ((3755, 3803), 'numpy.exp', 'np.exp', (['(ζ * Yα - 0.5 * ζ ** 2 * t ** (2 * α + 1))'], {}), '(ζ * Yα - 0.5 * ζ ** 2 * t ** (2 * α + 1))\n', (3761, 3803), True, 'import numpy as np\n'), ((3805, 3853), 'numpy.exp', 'np.exp', (['(η * Yβ - 0.5 * η ** 2 * t ** (2 * β + 1))'], {}), '(η * Yβ - 0.5 * η ** 2 * t ** (2 * β + 1))\n', (3811, 3853), True, 'import numpy as np\n'), ((4204, 4233), 'numpy.cumsum', 'np.cumsum', (['increments'], {'axis': '(1)'}), '(increments, axis=1)\n', (4213, 4233), True, 'import numpy as np\n'), ((4249, 4265), 'numpy.zeros_like', 'np.zeros_like', (['V'], {}), '(V)\n', (4262, 4265), True, 'import numpy as np\n'), ((4304, 4320), 'numpy.exp', 'np.exp', (['integral'], {}), '(integral)\n', (4310, 4320), True, 'import numpy as np\n'), ((4527, 4546), 'numpy.vectorize', 'np.vectorize', (['bsinv'], {}), '(bsinv)\n', (4539, 4546), True, 'import numpy as np\n'), ((4810, 4831), 'numpy.maximum', 'np.maximum', (['(ST - K)', '(0)'], {}), '(ST - K, 0)\n', (4820, 4831), True, 'import numpy as np\n'), ((4871, 4900), 'numpy.mean', 'np.mean', (['call_payoffs'], {'axis': '(0)'}), '(call_payoffs, axis=0)\n', (4878, 4900), True, 'import numpy as np\n'), ((4994, 5015), 'numpy.maximum', 'np.maximum', (['(K - ST)', '(0)'], {}), '(K - ST, 0)\n', (5004, 5015), True, 'import numpy as np\n'), ((5050, 5078), 'numpy.mean', 'np.mean', (['put_payoffs'], {'axis': '(0)'}), '(put_payoffs, axis=0)\n', (5057, 5078), True, 'import numpy as np\n'), ((5255, 5313), 'pandas.DataFrame', 'pd.DataFrame', (['vols'], {'index': 'surf.tenors', 'columns': 'surf.deltas'}), '(vols, index=surf.tenors, columns=surf.deltas)\n', (5267, 5313), True, 'import pandas as pd\n'), ((5446, 5465), 'numpy.vectorize', 'np.vectorize', (['bsinv'], {}), '(bsinv)\n', (5458, 5465), True, 'import numpy as np\n'), ((5960, 5989), 'numpy.mean', 'np.mean', (['call_payoffs'], {'axis': '(0)'}), '(call_payoffs, axis=0)\n', (5967, 5989), True, 'import numpy as np\n'), ((6146, 6174), 'numpy.mean', 'np.mean', (['put_payoffs'], {'axis': '(0)'}), '(put_payoffs, axis=0)\n', (6153, 6174), True, 'import numpy as np\n'), ((6351, 6409), 'pandas.DataFrame', 'pd.DataFrame', (['vols'], {'index': 'surf.tenors', 'columns': 'surf.deltas'}), '(vols, index=surf.tenors, columns=surf.deltas)\n', (6363, 6409), True, 'import pandas as pd\n'), ((667, 696), 'numpy.linspace', 'np.linspace', (['(0)', 'T', '(1 + self.s)'], {}), '(0, T, 1 + self.s)\n', (678, 696), True, 'import numpy as np\n'), ((974, 998), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cov1'], {}), '(cov1)\n', (992, 998), True, 'import numpy as np\n'), ((1042, 1066), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cov2'], {}), '(cov2)\n', (1060, 1066), True, 'import numpy as np\n'), ((1845, 1866), 'numpy.matmul', 'np.matmul', (['chol1', 'dB1'], {}), '(chol1, dB1)\n', (1854, 1866), True, 'import numpy as np\n'), ((1892, 1913), 'numpy.matmul', 'np.matmul', (['chol2', 'dB2'], {}), '(chol2, dB2)\n', (1901, 1913), True, 'import numpy as np\n'), ((3195, 3218), 'numpy.convolve', 'np.convolve', (['Γ', 'Ξ[i, :]'], {}), '(Γ, Ξ[i, :])\n', (3206, 3218), True, 'import numpy as np\n'), ((3362, 3380), 'numpy.sqrt', 'np.sqrt', (['(2 * α + 1)'], {}), '(2 * α + 1)\n', (3369, 3380), True, 'import numpy as np\n'), ((4948, 4961), 'numpy.squeeze', 'np.squeeze', (['K'], {}), '(K)\n', (4958, 4961), True, 'import numpy as np\n'), ((5124, 5137), 'numpy.squeeze', 'np.squeeze', (['K'], {}), '(K)\n', (5134, 5137), True, 'import numpy as np\n'), ((5898, 5920), 'numpy.maximum', 'np.maximum', (['(X3T - K)', '(0)'], {}), '(X3T - K, 0)\n', (5908, 5920), True, 'import numpy as np\n'), ((6037, 6050), 'numpy.squeeze', 'np.squeeze', (['K'], {}), '(K)\n', (6047, 6050), True, 'import numpy as np\n'), ((6089, 6111), 'numpy.maximum', 'np.maximum', (['(K - X3T)', '(0)'], {}), '(K - X3T, 0)\n', (6099, 6111), True, 'import numpy as np\n'), ((6220, 6233), 'numpy.squeeze', 'np.squeeze', (['K'], {}), '(K)\n', (6230, 6233), True, 'import numpy as np\n'), ((4063, 4081), 'numpy.sqrt', 'np.sqrt', (['V[:, :-1]'], {}), '(V[:, :-1])\n', (4070, 4081), True, 'import numpy as np\n')] |
'''
Created on 2014-8-28
@author: xiajie
'''
import numpy as np
def centering(X):
N = len(X)
D = len(X[0])
centered = np.zeros((N, D))
mean = np.mean(X, axis=0)
for i in range(N):
centered[i] = X[i] - mean
return centered
def eigen_decomposition(X):
cov = X.dot(np.transpose(X))
w, v = np.linalg.eig(cov)
return w, v
def true_eigens(X, w, v):
pass
if __name__ == '__main__':
X = np.zeros()
X = centering(X)
w, v = eigen_decomposition(X)
| [
"numpy.mean",
"numpy.zeros",
"numpy.transpose",
"numpy.linalg.eig"
] | [((132, 148), 'numpy.zeros', 'np.zeros', (['(N, D)'], {}), '((N, D))\n', (140, 148), True, 'import numpy as np\n'), ((160, 178), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (167, 178), True, 'import numpy as np\n'), ((329, 347), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (342, 347), True, 'import numpy as np\n'), ((436, 446), 'numpy.zeros', 'np.zeros', ([], {}), '()\n', (444, 446), True, 'import numpy as np\n'), ((301, 316), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (313, 316), True, 'import numpy as np\n')] |
import os
import glob
import pickle
from functools import wraps
from concurrent import futures
import cv2
import numpy as np
from PIL import Image
import yaml
from matplotlib import pyplot as plt
import layoutparser as lp
from tqdm import tqdm
def detect_wrapper(fn):
@wraps(fn)
def wrap(parser, im, *args, **kwargs):
try:
impath = None
if isinstance(im, str):
impath = im
im = cv2.imread(im)
im = im[:, :, ::-1]
elif isinstance(im, Image.Image):
im = np.ndarray(im)
return fn(parser, im, *args, **kwargs)
except Exception as e:
print(f"Error: {e} {impath}")
return wrap
def percent_to_px(im_h, im_w, bbx):
left, top, right, bottom = bbx[:4]
if right < 1 or bottom < 1:
left = int(left * im_w)
top = int(top * im_h)
right = int(right * im_w)
bottom = int(bottom * im_h)
return left, top, right, bottom
def show_bbxes_on(im, bbxes, show=False, color=128):
if isinstance(im, str):
im = cv2.imread(im)
h, w = im.shape[:2]
for i, item in enumerate(bbxes):
try:
left, top, right, bottom, rtype, score = item
except:
left, top, right, bottom = item[:4]
# Convert % to px
left, top, right, bottom = percent_to_px(h, w, (left, top, right, bottom))
cv2.rectangle(im, (left, top), (right, bottom), color=color, thickness=5)
if show:
plt.imshow(im)
plt.show()
class LayoutBaseParser(object):
def detect(self, im, *args, **kwargs):
# This function returns the layout of the query image in an internal format
# Call .dump() to get a general type to save
raise NotImplementedError
def draw(self, im, layout, *args, **kwargs):
raise NotImplementedError
def dump(self, im, layout):
# This function dumps the detected layout to an array:
# left(%), top(%), right(%), bottom(%), bbx_type(str)
raise NotImplementedError
def batch_detect(self, img_folder, start=-1, end=-1):
def fn(impath):
x = cv2.imread(impath)
r = self.detect(impath)
return {
"path": impath,
"h": x.shape[0],
"w": x.shape[1],
"layout": r
}
impaths = []
for dirpath, dirnames, filenames in os.walk(img_folder):
valids = filter(lambda x: x.endswith(".jpg"), filenames)
files = map(lambda f: os.path.join(dirpath, f), valids)
impaths.extend(files)
impaths = list(sorted(impaths))
if end > start >= 0:
impaths = impaths[start: end]
print(f"Process {len(impaths)} images.")
with futures.ThreadPoolExecutor() as executor:
# results = list(tqdm(executor.map(self.detect, impaths), total=len(impaths)))
results = list(tqdm(executor.map(fn, impaths), total=len(impaths)))
# if not img_folder.endswith("/"):
# img_folder += "/"
# return list(zip(map(lambda x: x.replace(img_folder, ""), impaths), results))
# return list(zip(impaths, results))
return results
class HarvardLayoutParser(LayoutBaseParser):
# Source code: https://github.com/Layout-Parser/layout-parser
# Model zoo: https://layout-parser.readthedocs.io/en/latest/notes/modelzoo.html
PresetLabels = {
"HJDataset": {1: "Page Frame", 2: "Row", 3: "Title Region", 4: "Text Region", 5: "Title", 6: "Subtitle",
7: "Other"},
"PubLayNet": {0: "Text", 1: "Title", 2: "List", 3: "Table", 4: "Figure"},
"PrimaLayout": {1: "TextRegion", 2: "ImageRegion", 3: "TableRegion", 4: "MathsRegion", 5: "SeparatorRegion",
6: "OtherRegion"},
"NewspaperNavigator": {0: "Photograph", 1: "Illustration", 2: "Map", 3: "Comics/Cartoon",
4: "Editorial Cartoon",
5: "Headline", 6: "Advertisement"}
}
def __init__(self, model_name, model_path=None, config_path=None, label_map=None, score_thresh=0.5):
if label_map is None:
label_map = HarvardLayoutParser.PresetLabels.get(model_name, None)
self.model = lp.Detectron2LayoutModel(config_path,
model_path=model_path,
extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", score_thresh],
label_map=label_map)
@staticmethod
def is_text(type_cls):
try:
s = type_cls.lower()
return "text" in s or "title" in s or "headline" in s
except:
return True
@detect_wrapper
def detect(self, im, keep_text_only=True, dump_to_tuples=True, **kwargs):
layout = self.model.detect(im)
if keep_text_only:
layout = lp.Layout([b for b in layout if self.is_text(b.type)])
if dump_to_tuples:
return self.dump(im, layout)
return layout
def draw(self, im, layout, **kwargs):
return lp.draw_box(im, layout, box_width=5, show_element_id=True)
def dump(self, im, layout):
bbxes = []
h, w = im.shape[:2]
for t in layout:
x1, y1, x2, y2 = t.coordinates
x1 /= w
x2 /= w
y1 /= h
y2 /= h
bbxes.append((x1, y1, x2, y2, t.type, t.score))
# Sort by score
bbxes.sort(key=lambda x: x[-1], reverse=True)
return bbxes
if __name__ == '__main__':
models_dir = "../../models"
jpgs_dir = "../../data/jpgs"
layout_output_dir = "../../data/layout"
vis_output_dir = "../../data/vis"
score_thresh = 0.2
override = False
vis = True
for dataset in os.listdir(models_dir):
subdir = os.path.join(models_dir, dataset)
configs = glob.glob(os.path.join(subdir, "*.yaml"))
for conf in configs:
pth = conf.replace("yaml", "pth")
name = f"{dataset}-{os.path.basename(conf).split('.')[0]}"
print(f"===={name}====")
parser = HarvardLayoutParser(dataset,
model_path=pth,
config_path=conf,
score_thresh=score_thresh)
layout_outpath = os.path.join(layout_output_dir, f"{name}")
if override or not os.path.exists(layout_outpath):
results = parser.batch_detect(jpgs_dir)
print(results[0])
pickle.dump(results, open(layout_outpath, "wb"), protocol=pickle.HIGHEST_PROTOCOL)
else:
results = None
if vis:
if results is None:
results = pickle.load(open(layout_outpath, "rb"))
this_dir = os.path.join(vis_output_dir, name)
if os.path.exists(this_dir):
continue
os.makedirs(this_dir, exist_ok=False)
def draw_and_save(item):
impath = item["path"]
layout = item["layout"]
x = cv2.imread(impath)
show_bbxes_on(x, layout)
cv2.imwrite(os.path.join(this_dir, os.path.basename(impath)), x)
with futures.ThreadPoolExecutor() as executor:
results = list(tqdm(executor.map(draw_and_save, results), total=len(results)))
| [
"cv2.rectangle",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"os.makedirs",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join",
"functools.wraps",
"numpy.ndarray",
"os.path.basename",
"layoutparser.draw_box",
"layoutparser.Detectron2LayoutModel",
"cv2.imread",
"os.walk... | [((276, 285), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (281, 285), False, 'from functools import wraps\n'), ((5933, 5955), 'os.listdir', 'os.listdir', (['models_dir'], {}), '(models_dir)\n', (5943, 5955), False, 'import os\n'), ((1103, 1117), 'cv2.imread', 'cv2.imread', (['im'], {}), '(im)\n', (1113, 1117), False, 'import cv2\n'), ((1433, 1506), 'cv2.rectangle', 'cv2.rectangle', (['im', '(left, top)', '(right, bottom)'], {'color': 'color', 'thickness': '(5)'}), '(im, (left, top), (right, bottom), color=color, thickness=5)\n', (1446, 1506), False, 'import cv2\n'), ((1529, 1543), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (1539, 1543), True, 'from matplotlib import pyplot as plt\n'), ((1552, 1562), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1560, 1562), True, 'from matplotlib import pyplot as plt\n'), ((2471, 2490), 'os.walk', 'os.walk', (['img_folder'], {}), '(img_folder)\n', (2478, 2490), False, 'import os\n'), ((4354, 4506), 'layoutparser.Detectron2LayoutModel', 'lp.Detectron2LayoutModel', (['config_path'], {'model_path': 'model_path', 'extra_config': "['MODEL.ROI_HEADS.SCORE_THRESH_TEST', score_thresh]", 'label_map': 'label_map'}), "(config_path, model_path=model_path, extra_config=[\n 'MODEL.ROI_HEADS.SCORE_THRESH_TEST', score_thresh], label_map=label_map)\n", (4378, 4506), True, 'import layoutparser as lp\n'), ((5229, 5287), 'layoutparser.draw_box', 'lp.draw_box', (['im', 'layout'], {'box_width': '(5)', 'show_element_id': '(True)'}), '(im, layout, box_width=5, show_element_id=True)\n', (5240, 5287), True, 'import layoutparser as lp\n'), ((5974, 6007), 'os.path.join', 'os.path.join', (['models_dir', 'dataset'], {}), '(models_dir, dataset)\n', (5986, 6007), False, 'import os\n'), ((2187, 2205), 'cv2.imread', 'cv2.imread', (['impath'], {}), '(impath)\n', (2197, 2205), False, 'import cv2\n'), ((2838, 2866), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {}), '()\n', (2864, 2866), False, 'from concurrent import futures\n'), ((6036, 6066), 'os.path.join', 'os.path.join', (['subdir', '"""*.yaml"""'], {}), "(subdir, '*.yaml')\n", (6048, 6066), False, 'import os\n'), ((6517, 6559), 'os.path.join', 'os.path.join', (['layout_output_dir', 'f"""{name}"""'], {}), "(layout_output_dir, f'{name}')\n", (6529, 6559), False, 'import os\n'), ((453, 467), 'cv2.imread', 'cv2.imread', (['im'], {}), '(im)\n', (463, 467), False, 'import cv2\n'), ((7016, 7050), 'os.path.join', 'os.path.join', (['vis_output_dir', 'name'], {}), '(vis_output_dir, name)\n', (7028, 7050), False, 'import os\n'), ((7070, 7094), 'os.path.exists', 'os.path.exists', (['this_dir'], {}), '(this_dir)\n', (7084, 7094), False, 'import os\n'), ((7142, 7179), 'os.makedirs', 'os.makedirs', (['this_dir'], {'exist_ok': '(False)'}), '(this_dir, exist_ok=False)\n', (7153, 7179), False, 'import os\n'), ((571, 585), 'numpy.ndarray', 'np.ndarray', (['im'], {}), '(im)\n', (581, 585), True, 'import numpy as np\n'), ((2595, 2619), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (2607, 2619), False, 'import os\n'), ((6591, 6621), 'os.path.exists', 'os.path.exists', (['layout_outpath'], {}), '(layout_outpath)\n', (6605, 6621), False, 'import os\n'), ((7333, 7351), 'cv2.imread', 'cv2.imread', (['impath'], {}), '(impath)\n', (7343, 7351), False, 'import cv2\n'), ((7505, 7533), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {}), '()\n', (7531, 7533), False, 'from concurrent import futures\n'), ((7452, 7476), 'os.path.basename', 'os.path.basename', (['impath'], {}), '(impath)\n', (7468, 7476), False, 'import os\n'), ((6176, 6198), 'os.path.basename', 'os.path.basename', (['conf'], {}), '(conf)\n', (6192, 6198), False, 'import os\n')] |
import argparse
import re
import os
import json
import numpy as np
import pickle as pkl
"""
for extracting word embedding yourself, please download pretrained model from one of the following links.
"""
url = {'glove': 'http://nlp.stanford.edu/data/glove.6B.zip',
'google': 'https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing',
'fasttext': 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.en.zip'}
data_dir = '../data/'
feat_len = 300
def embed_text_file(text_file, word_vectors, get_vector, save_file):
with open(text_file) as fp:
text_list = json.load(fp)
all_feats = []
has = 0
cnt_missed = 0
missed_list = []
for i in range(len(text_list)):
class_name = text_list[i].lower()
if i % 500 == 0:
print('%d / %d : %s' % (i, len(text_list), class_name))
feat = np.zeros(feat_len)
options = class_name.split(',')
cnt_word = 0
for j in range(len(options)):
now_feat = get_embedding(options[j].strip(), word_vectors, get_vector)
if np.abs(now_feat.sum()) > 0:
cnt_word += 1
feat += now_feat
if cnt_word > 0:
feat = feat / cnt_word
if np.abs(feat.sum()) == 0:
print('cannot find word ' + class_name)
cnt_missed = cnt_missed + 1
missed_list.append(class_name)
else:
has += 1
feat = feat / (np.linalg.norm(feat) + 1e-6)
all_feats.append(feat)
all_feats = np.array(all_feats)
for each in missed_list:
print(each)
print('does not have semantic embedding: ', cnt_missed, 'has: ', has)
if not os.path.exists(os.path.dirname(save_file)):
os.makedirs(os.path.dirname(save_file))
print('## Make Directory: %s' % save_file)
with open(save_file, 'wb') as fp:
pkl.dump(all_feats, fp)
print('save to : %s' % save_file)
def get_embedding(entity_str, word_vectors, get_vector):
try:
feat = get_vector(word_vectors, entity_str)
return feat
except:
feat = np.zeros(feat_len)
str_set = filter(None, re.split("[ \-_]+", entity_str))
cnt_word = 0
for i in range(len(str_set)):
temp_str = str_set[i]
try:
now_feat = get_vector(word_vectors, temp_str)
feat = feat + now_feat
cnt_word = cnt_word + 1
except:
continue
if cnt_word > 0:
feat = feat / cnt_word
return feat
def get_glove_dict(txt_dir):
print('load glove word embedding')
txt_file = os.path.join(txt_dir, 'glove.6B.300d.txt')
word_dict = {}
feat = np.zeros(feat_len)
with open(txt_file) as fp:
for line in fp:
words = line.split()
assert len(words) - 1 == feat_len
for i in range(feat_len):
feat[i] = float(words[i+1])
feat = np.array(feat)
word_dict[words[0]] = feat
print('loaded to dict!')
return word_dict
def glove_google(word_vectors, word):
return word_vectors[word]
def fasttext(word_vectors, word):
return word_vectors.get_word_vector(word)
def parse_arg():
parser = argparse.ArgumentParser(description='word embeddign type')
parser.add_argument('--wv', type=str, default='glove',
help='word embedding type: [glove, google, fasttext]')
parser.add_argument('--path', type=str, default='',
help='path to pretrained word embedding model')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arg()
text_file = os.path.join(data_dir, 'list', 'invdict_wordntext.json')
model_path = args.path
if args.wv == 'glove':
save_file = os.path.join(data_dir, 'word_embedding_model', 'glove_word2vec_wordnet.pkl')
if not os.path.exists(save_file):
word_vectors = get_glove_dict(model_path)
get_vector = glove_google
elif args.wv == 'google':
save_file = os.path.join(data_dir, 'word_embedding_model', 'google_word2vec_wordnet.pkl')
if not os.path.exists(save_file):
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format(model_path, binary=True)
get_vector = glove_google
elif args.wv == 'fasttext':
save_file = os.path.join(data_dir, 'word_embedding_model', 'fasttext_word2vec_wordnet.pkl')
if not os.path.exists(save_file):
from fastText import load_model
word_vectors = load_model(os.path.join(model_path, 'wiki.en.bin'))
get_vector = fasttext
else:
raise NotImplementedError
if not os.path.exists(save_file):
print('obtain semantic word embeddig', save_file)
embed_text_file(text_file, word_vectors, get_vector, save_file)
else:
print('Embedding existed :', save_file, 'Skip!!!') | [
"re.split",
"os.path.exists",
"pickle.dump",
"argparse.ArgumentParser",
"gensim.models.keyedvectors.KeyedVectors.load_word2vec_format",
"os.path.join",
"numpy.array",
"numpy.zeros",
"os.path.dirname",
"numpy.linalg.norm",
"json.load"
] | [((1563, 1582), 'numpy.array', 'np.array', (['all_feats'], {}), '(all_feats)\n', (1571, 1582), True, 'import numpy as np\n'), ((2632, 2674), 'os.path.join', 'os.path.join', (['txt_dir', '"""glove.6B.300d.txt"""'], {}), "(txt_dir, 'glove.6B.300d.txt')\n", (2644, 2674), False, 'import os\n'), ((2705, 2723), 'numpy.zeros', 'np.zeros', (['feat_len'], {}), '(feat_len)\n', (2713, 2723), True, 'import numpy as np\n'), ((3247, 3305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""word embeddign type"""'}), "(description='word embeddign type')\n", (3270, 3305), False, 'import argparse\n'), ((3687, 3743), 'os.path.join', 'os.path.join', (['data_dir', '"""list"""', '"""invdict_wordntext.json"""'], {}), "(data_dir, 'list', 'invdict_wordntext.json')\n", (3699, 3743), False, 'import os\n'), ((610, 623), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (619, 623), False, 'import json\n'), ((883, 901), 'numpy.zeros', 'np.zeros', (['feat_len'], {}), '(feat_len)\n', (891, 901), True, 'import numpy as np\n'), ((1908, 1931), 'pickle.dump', 'pkl.dump', (['all_feats', 'fp'], {}), '(all_feats, fp)\n', (1916, 1931), True, 'import pickle as pkl\n'), ((2184, 2216), 're.split', 're.split', (['"""[ \\\\-_]+"""', 'entity_str'], {}), "('[ \\\\-_]+', entity_str)\n", (2192, 2216), False, 'import re\n'), ((3819, 3895), 'os.path.join', 'os.path.join', (['data_dir', '"""word_embedding_model"""', '"""glove_word2vec_wordnet.pkl"""'], {}), "(data_dir, 'word_embedding_model', 'glove_word2vec_wordnet.pkl')\n", (3831, 3895), False, 'import os\n'), ((4775, 4800), 'os.path.exists', 'os.path.exists', (['save_file'], {}), '(save_file)\n', (4789, 4800), False, 'import os\n'), ((1734, 1760), 'os.path.dirname', 'os.path.dirname', (['save_file'], {}), '(save_file)\n', (1749, 1760), False, 'import os\n'), ((1783, 1809), 'os.path.dirname', 'os.path.dirname', (['save_file'], {}), '(save_file)\n', (1798, 1809), False, 'import os\n'), ((2137, 2155), 'numpy.zeros', 'np.zeros', (['feat_len'], {}), '(feat_len)\n', (2145, 2155), True, 'import numpy as np\n'), ((2959, 2973), 'numpy.array', 'np.array', (['feat'], {}), '(feat)\n', (2967, 2973), True, 'import numpy as np\n'), ((3911, 3936), 'os.path.exists', 'os.path.exists', (['save_file'], {}), '(save_file)\n', (3925, 3936), False, 'import os\n'), ((4080, 4157), 'os.path.join', 'os.path.join', (['data_dir', '"""word_embedding_model"""', '"""google_word2vec_wordnet.pkl"""'], {}), "(data_dir, 'word_embedding_model', 'google_word2vec_wordnet.pkl')\n", (4092, 4157), False, 'import os\n'), ((4173, 4198), 'os.path.exists', 'os.path.exists', (['save_file'], {}), '(save_file)\n', (4187, 4198), False, 'import os\n'), ((4291, 4349), 'gensim.models.keyedvectors.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['model_path'], {'binary': '(True)'}), '(model_path, binary=True)\n', (4324, 4349), False, 'from gensim.models.keyedvectors import KeyedVectors\n'), ((4440, 4519), 'os.path.join', 'os.path.join', (['data_dir', '"""word_embedding_model"""', '"""fasttext_word2vec_wordnet.pkl"""'], {}), "(data_dir, 'word_embedding_model', 'fasttext_word2vec_wordnet.pkl')\n", (4452, 4519), False, 'import os\n'), ((1485, 1505), 'numpy.linalg.norm', 'np.linalg.norm', (['feat'], {}), '(feat)\n', (1499, 1505), True, 'import numpy as np\n'), ((4535, 4560), 'os.path.exists', 'os.path.exists', (['save_file'], {}), '(save_file)\n', (4549, 4560), False, 'import os\n'), ((4644, 4683), 'os.path.join', 'os.path.join', (['model_path', '"""wiki.en.bin"""'], {}), "(model_path, 'wiki.en.bin')\n", (4656, 4683), False, 'import os\n')] |
import dateutil
from typing import List
import numpy as np
import pandas as pd
from macpie._config import get_option
from macpie import lltools, strtools
def add_diff_days(
df: pd.DataFrame, col_start: str, col_end: str, diff_days_col: str = None, inplace=False
):
"""Adds a column to DataFrame called ``_diff_days`` which contains
the number of days between ``col_start`` and ``col_end``
:param df: DataFrame
:param col_start: column containing the start date
:param col_end: column containing the end date
"""
if diff_days_col is None:
diff_days_col = get_option("column.system.diff_days")
if col_start == col_end:
raise KeyError("date columns have the same name: {col_start}=={col_end}")
if not inplace:
df = df.copy()
df[diff_days_col] = df[col_end] - df[col_start]
df[diff_days_col] = df[diff_days_col] / np.timedelta64(1, "D")
# df.assign(**{diff_days_col: (df[col_end] - df[col_start]) / np.timedelta64(1, "D")})
if not inplace:
return df
def any_duplicates(df: pd.DataFrame, col: str, ignore_nan: bool = False):
"""Return ``True`` if there are any duplicates in ``col``.
:param df: DataFrame
:param col: column to check for duplicates
:param ignore_nan: Whether to ignore ``nan`` values
"""
col = get_col_name(df, col)
if ignore_nan is True:
return df[col].dropna().duplicated().any()
return df[col].duplicated().any()
def assimilate(left: pd.DataFrame, right: pd.DataFrame):
"""Assimilate ``right`` to look like ``left`` by casting column data types in ``right``
to the data types in ``left`` where the column name is the same.
:param left: left DataFrame
:param right: right DataFrame
"""
# give me all the elements in left that are also in right
left_columns = set(left.columns)
right_columns = set(right.columns)
left_dtypes_dict = left.dtypes.to_dict()
# find columns that are only in left but not in right
left_only_cols = left_columns.difference(right_columns)
for col in left_only_cols:
del left_dtypes_dict[col]
for col_name, dtype in left_dtypes_dict.items():
try:
right = right.astype({col_name: dtype})
except pd.errors.IntCastingNaNError:
pass
return right
def diff_cols(left: pd.DataFrame, right: pd.DataFrame, cols_ignore=set(), cols_ignore_pat=None):
"""Return a length-2 tuple where the first element is the set of columns that
exist in ``left``, and the second element is the set of columns that only
exist in ``right``.
:param left: left DataFrame
:param right: right DataFrame
:param cols_ignore: columns to ignore
:param cols_ignore_pat: Character sequence or regular expression.
Column names that match will be ignored.
Defaults to None, which uses the pattern
``'$^'`` to match nothing to ignore nothing
"""
left = drop_cols(left, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
right = drop_cols(right, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
left_columns = set(left.columns)
right_columns = set(right.columns)
left_columns = left_columns - set(cols_ignore)
right_columns = right_columns - set(cols_ignore)
left_only_cols = left_columns - right_columns
right_only_cols = right_columns - left_columns
return (left_only_cols, right_only_cols)
def diff_rows(left: pd.DataFrame, right: pd.DataFrame, cols_ignore=set(), cols_ignore_pat=None):
"""If ``left`` and ``right`` share the same columns, returns a DataFrame
containing rows that differ.
:param left: left DataFrame
:param right: right DataFrame
:param cols_ignore: a list of any columns to ignore
"""
left = drop_cols(left, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
right = drop_cols(right, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
left_only_cols, right_only_cols = diff_cols(left, right)
if left_only_cols == right_only_cols == set():
indicator_col_name = get_option("column.system.prefix") + "_diff_rows_merge"
if isinstance(left.columns, pd.MultiIndex) or isinstance(right.columns, pd.MultiIndex):
# TODO: Doing a pd.merge() on MultiIndex dataframes with indicator
# set to True/string resulted in the following error:
# pandas.errors.PerformanceWarning: dropping on a non-lexsorted multi-index
# without a level parameter may impact performance
# Flatten the column MultiIndexes to get around this
left.columns = left.columns.to_flat_index()
right.columns = right.columns.to_flat_index()
merged_df = pd.merge(left, right, indicator=indicator_col_name, how="outer")
changed_rows_df = merged_df[merged_df[indicator_col_name] != "both"]
return changed_rows_df
raise KeyError("Dataframes do not share the same columns")
def drop_cols(df: pd.DataFrame, cols_list=set(), cols_pat=None):
"""Drop specified columns
:param cols_list: List of columns to drop. Defaults to set()
:param cols_pat: Character sequence or regular expression.
Column names that match will be dropped.
Defaults to None, which uses the pattern
``'$^'`` to match nothing to ignore nothing
"""
# Default pattern is to match nothing to ignore nothing
cols_pat = "$^" if cols_pat is None else cols_pat
if isinstance(df.columns, pd.MultiIndex):
last_level = df.columns.nlevels - 1
cols = df.columns.get_level_values(last_level)
else:
cols = df.columns
cols_match_pat = cols.str.contains(cols_pat, regex=True)
cols_to_keep = np.invert(cols_match_pat)
df = df.loc[:, cols_to_keep]
df = df.drop(columns=cols_list, errors="ignore")
return df
def drop_suffix(df: pd.DataFrame, suffix):
"""Removes the ``suffix`` in any column name containing the ``suffix``.
:param df: DataFrame
:param suffix: suffix to drop
"""
return df.rename(columns=lambda x: strtools.strip_suffix(x, suffix))
def equals(left: pd.DataFrame, right: pd.DataFrame, cols_ignore=set(), cols_ignore_pat=None):
"""For testing equality of :class:`pandas.DataFrame` objects
:param df1: left DataFrame to compare
:param df2: right DataFrame to compare
:param cols_ignore: DataFrame columns to ignore in comparison
:param cols_ignore_pat: Character sequence or regular expression.
Column names that match will be ignored in comparison.
Defaults to None, which uses the pattern
``'$^'`` to match nothing to ignore nothing
"""
# columns should be same type (e.g. Index or MultiIndex)
if type(left.columns) != type(right.columns):
raise TypeError(
f"Left columns type ('{type(left.columns)}') is "
f"different than right columns type ('{type(right.columns)}')"
)
if isinstance(left.columns, pd.MultiIndex):
if left.columns.nlevels != right.columns.nlevels:
raise ValueError("MultiIndexes have different levels.")
left = drop_cols(left, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
right = drop_cols(right, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
try:
right = left.mac.assimilate(right)
except NotImplementedError:
pass
return left.equals(right)
def flatten_multiindex(df: pd.DataFrame, axis: int = 0, delimiter: str = "_"):
"""Flatten (i.e. collapse) the multiindex on a particular ``axis`` using
a ``delimiter``.
:param df: DataFrame
:param axis: on which axis to flatten the multiindex. ``0`` for index, ``1`` for columns
:param delimiter: delimiter to join multiindex levels on
"""
if axis == 0:
if isinstance(df.index, pd.MultiIndex):
df.index = [delimiter.join(str(idx) for idx in idx_tup) for idx_tup in df.index]
elif axis == 1:
if isinstance(df.columns, pd.MultiIndex):
df.columns = [delimiter.join(str(col) for col in col_tup) for col_tup in df.columns]
def get_col_name(df: pd.DataFrame, col_name):
"""Get the properly-cased column name from ``df``, ignoring case.
:param df: DataFrame
:param col_name: case-insensitive name of the column
"""
if col_name is None:
raise KeyError("column to get is 'None'")
if lltools.is_list_like(col_name):
for col in df.columns:
if lltools.list_like_str_equal(col, col_name, case_sensitive=False):
return col
raise KeyError(f"column not found: {col_name}")
if isinstance(col_name, str):
for col in df.columns:
if strtools.str_equals(col, col_name, case_sensitive=False):
return col
raise KeyError(f"column not found: {col_name}")
def get_col_names(df: pd.DataFrame, col_names: List[str], strict=True):
"""Get the properly-cased columns names from ``df``, ignoring case.
:param df: DataFrame
:param col_names: list of case-insensitive column names
:param strict: if True, raise error if a column can't be found, otherwise
return None for that column
"""
df_col_names = []
for col in col_names:
try:
df_col = get_col_name(df, col)
except KeyError as e:
if strict:
raise e
else:
df_col = None
df_col_names.append(df_col)
return df_col_names
def insert(df: pd.DataFrame, col_name, col_value, allow_duplicates=False):
"""Adds a column to the end of the DataFrame
:param df: DataFrame
:param col_name: name of column to insert
:param col_value: value of column to insert
"""
return df.insert(len(df.columns), col_name, col_value, allow_duplicates=allow_duplicates)
def is_date_col(arr_or_dtype):
"""Check whether the provided array or dtype is of the datetime64 dtype.
:param arr_or_dtype: The array or dtype to check
"""
return pd.api.types.is_datetime64_any_dtype(arr_or_dtype)
def mark_duplicates_by_cols(df: pd.DataFrame, cols: List[str]):
"""Create a column in ``df`` called ``_duplicates`` which is a boolean Series
denoting duplicate rows as identified by ``cols``.
:param df: DataFrame
:param cols: Only consider these columns for identifiying duplicates
"""
df[get_option("column.system.duplicates")] = df.duplicated(subset=cols, keep=False)
return df
def replace_suffix(df: pd.DataFrame, old_suffix, new_suffix):
"""For any column names containing ``old_suffix``, replace the ``old_suffix``
with ``new_suffix``.
:param df: DataFrame
:param old_suffix: suffix to replace
:param new_suffix: suffix to replace ``old_suffix``
"""
return df.rename(
columns=lambda x: x[: -len(old_suffix)] + new_suffix if x.endswith(old_suffix) else x
)
def to_datetime(df: pd.DataFrame, date_col_name):
"""Convert ``date_col_name`` column in ``df`` to datetime.
:param df: DataFrame
:param date_col_name: column to convert
"""
try:
_date_col = get_col_name(df, date_col_name)
if not is_date_col(df[_date_col]):
df[_date_col] = pd.to_datetime(df[_date_col])
return _date_col
except KeyError:
raise KeyError(f"Date column '{date_col_name}' in dataframe is not a valid column")
except ValueError:
raise TypeError(
f"Date column '{date_col_name}' in dataframe contains string(s) that "
"are not likely datetime(s)"
)
except TypeError as e:
raise TypeError(
(
f"Date column '{date_col_name}' in dataframe contains values "
f"that are not convertible to datetime"
)
) from e
except dateutil.parser.ParserError:
raise ValueError(
(
f"Date column '{date_col_name}' in dataframe could not be parsed "
f"as a datetime string"
)
)
except pd.errors.OutOfBoundsDatetime:
# Since pandas represents timestamps in nanosecond resolution,
# the time span that can be represented using a 64-bit integer
# is limited to approximately 584 years.
raise ValueError(
(
f"Date column '{date_col_name}' in dataframe contains a date "
f"that is out of bounds (i.e. outside of today +- 584 years)"
)
)
| [
"macpie._config.get_option",
"macpie.strtools.strip_suffix",
"pandas.merge",
"macpie.lltools.list_like_str_equal",
"macpie.lltools.is_list_like",
"macpie.strtools.str_equals",
"numpy.invert",
"numpy.timedelta64",
"pandas.to_datetime",
"pandas.api.types.is_datetime64_any_dtype"
] | [((5815, 5840), 'numpy.invert', 'np.invert', (['cols_match_pat'], {}), '(cols_match_pat)\n', (5824, 5840), True, 'import numpy as np\n'), ((8547, 8577), 'macpie.lltools.is_list_like', 'lltools.is_list_like', (['col_name'], {}), '(col_name)\n', (8567, 8577), False, 'from macpie import lltools, strtools\n'), ((10178, 10228), 'pandas.api.types.is_datetime64_any_dtype', 'pd.api.types.is_datetime64_any_dtype', (['arr_or_dtype'], {}), '(arr_or_dtype)\n', (10214, 10228), True, 'import pandas as pd\n'), ((599, 636), 'macpie._config.get_option', 'get_option', (['"""column.system.diff_days"""'], {}), "('column.system.diff_days')\n", (609, 636), False, 'from macpie._config import get_option\n'), ((890, 912), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (904, 912), True, 'import numpy as np\n'), ((4778, 4842), 'pandas.merge', 'pd.merge', (['left', 'right'], {'indicator': 'indicator_col_name', 'how': '"""outer"""'}), "(left, right, indicator=indicator_col_name, how='outer')\n", (4786, 4842), True, 'import pandas as pd\n'), ((10546, 10584), 'macpie._config.get_option', 'get_option', (['"""column.system.duplicates"""'], {}), "('column.system.duplicates')\n", (10556, 10584), False, 'from macpie._config import get_option\n'), ((4131, 4165), 'macpie._config.get_option', 'get_option', (['"""column.system.prefix"""'], {}), "('column.system.prefix')\n", (4141, 4165), False, 'from macpie._config import get_option\n'), ((8625, 8689), 'macpie.lltools.list_like_str_equal', 'lltools.list_like_str_equal', (['col', 'col_name'], {'case_sensitive': '(False)'}), '(col, col_name, case_sensitive=False)\n', (8652, 8689), False, 'from macpie import lltools, strtools\n'), ((8855, 8911), 'macpie.strtools.str_equals', 'strtools.str_equals', (['col', 'col_name'], {'case_sensitive': '(False)'}), '(col, col_name, case_sensitive=False)\n', (8874, 8911), False, 'from macpie import lltools, strtools\n'), ((11390, 11419), 'pandas.to_datetime', 'pd.to_datetime', (['df[_date_col]'], {}), '(df[_date_col])\n', (11404, 11419), True, 'import pandas as pd\n'), ((6171, 6203), 'macpie.strtools.strip_suffix', 'strtools.strip_suffix', (['x', 'suffix'], {}), '(x, suffix)\n', (6192, 6203), False, 'from macpie import lltools, strtools\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 21:06:23 2018
@author: <NAME>
"""
import numpy as np
from metodos_numericos.LU import LU
from metodos_numericos.Gauss import Gauss
#from Utils import Utils
#from TabelaGauss import TabelaGauss
from TabelaGaussLegendre import TabelaGaussLegendre
class MinimosQuadrados():
#constroi o polinomio para interpolacao
def executar(self, a, b, n, f):
tam = n+1
A = np.zeros((tam,tam), dtype=np.float64)
B = np.zeros((tam,), dtype=np.float64)
#cria matriz
for i in range (0,tam):
for j in range (0,tam):
A[i][j] = self.integralMatriz(a, b, tam, i, j)
#cria vetor fonte
for i in range (0,tam):
B[i] = self.integralVetor(a, b, tam, f, i)
#print("A", A)
#print("B", B)
#Utils().obtemInfoMatriz(A)
#calcula coeficientes
X = LU().executar(A, B)[0]
#X = Gauss().executarComPivoteamento(A, B)[0]
#X = Gauss().executar(A, B)[0]
return X
def funcaoBase(self, x, n):
return x**n
def produtoFi(self, x, i, j):
return self.funcaoBase(x, i) * self.funcaoBase(x, j)
def produtoVetor(self, x, f, i):
return f(x) * self.funcaoBase(x, i)
######### Metodo de Integracao ##########
def x(self, a, b, t):
return (((b-a)*t) / 2.0) + ((b+a)/2.0)
def dx(self, a, b):
return (b-a)/2.0
def integralMatriz(self, a, b, n, i, j):
#recupera pontos da tabela
tw = TabelaGaussLegendre().getValores(n)
#calcula
soma = 0
for k in range (0, n):
wi = np.float64(tw[1][k])
ti = np.float64(tw[0][k])
soma += wi * self.produtoFi(self.x(a, b, ti), i, j) * self.dx(a, b)
return soma
def integralVetor(self, a, b, n, f, i):
#recupera pontos da tabela
tw = TabelaGaussLegendre().getValores(n)
#calcula
soma = 0
for k in range (0, n):
wi = np.float64(tw[1][k])
ti = np.float64(tw[0][k])
soma += wi * self.produtoVetor(self.x(a, b, ti), f, i) * self.dx(a, b)
return soma
######### Interpolacao ##########
def interpolaCoeficientes(self, c, n, xk):
tam = n+1
soma = 0
for i in range (0,tam):
soma += c[i] * (xk ** i)
#if(np.isnan(soma)):
#print("resultado da interpolacao do valor "+repr(xk)+" foi igual a NaN")
#soma = 0
return soma
| [
"numpy.float64",
"numpy.zeros",
"metodos_numericos.LU.LU",
"TabelaGaussLegendre.TabelaGaussLegendre"
] | [((467, 505), 'numpy.zeros', 'np.zeros', (['(tam, tam)'], {'dtype': 'np.float64'}), '((tam, tam), dtype=np.float64)\n', (475, 505), True, 'import numpy as np\n'), ((517, 551), 'numpy.zeros', 'np.zeros', (['(tam,)'], {'dtype': 'np.float64'}), '((tam,), dtype=np.float64)\n', (525, 551), True, 'import numpy as np\n'), ((1850, 1870), 'numpy.float64', 'np.float64', (['tw[1][k]'], {}), '(tw[1][k])\n', (1860, 1870), True, 'import numpy as np\n'), ((1888, 1908), 'numpy.float64', 'np.float64', (['tw[0][k]'], {}), '(tw[0][k])\n', (1898, 1908), True, 'import numpy as np\n'), ((2238, 2258), 'numpy.float64', 'np.float64', (['tw[1][k]'], {}), '(tw[1][k])\n', (2248, 2258), True, 'import numpy as np\n'), ((2276, 2296), 'numpy.float64', 'np.float64', (['tw[0][k]'], {}), '(tw[0][k])\n', (2286, 2296), True, 'import numpy as np\n'), ((1720, 1741), 'TabelaGaussLegendre.TabelaGaussLegendre', 'TabelaGaussLegendre', ([], {}), '()\n', (1739, 1741), False, 'from TabelaGaussLegendre import TabelaGaussLegendre\n'), ((2108, 2129), 'TabelaGaussLegendre.TabelaGaussLegendre', 'TabelaGaussLegendre', ([], {}), '()\n', (2127, 2129), False, 'from TabelaGaussLegendre import TabelaGaussLegendre\n'), ((1017, 1021), 'metodos_numericos.LU.LU', 'LU', ([], {}), '()\n', (1019, 1021), False, 'from metodos_numericos.LU import LU\n')] |
import math
import operator
from functools import reduce
import bezier
import cv2
import numpy as np
import pyclipper
from pyclipper import PyclipperOffset
from scipy.interpolate import splprep, splev
from shapely.geometry import Polygon
def compute_two_points_angle(_base_point, _another_point):
"""
以基点作x轴延长线,这根线以基点为圆心进行顺时针运动,与基点和另一个点的连线重合所经历的角度
:param _base_point: 基点
:param _another_point: 另一个点
"""
diff_x, diff_y = _another_point[0] - _base_point[0], _another_point[1] - _base_point[1]
clockwise_angle = 180 + math.degrees(math.atan2(-diff_y, -diff_x))
return clockwise_angle % 360
def get_clockwise_angle_of_two_lines(_center_point, _point_1, _point_2):
"""
以中心点为圆心,点1到点2之间的顺时针的角度
:param _center_point: 中心点
:param _point_1: 点1的坐标
:param _point_2: 点2的坐标
:return: 夹角的角度
"""
angle_1 = compute_two_points_angle(_center_point, _point_1)
angle_2 = compute_two_points_angle(_center_point, _point_2)
if angle_2 < angle_1:
return angle_2 + 360 - angle_1
else:
return angle_2 - angle_1
def curved_polygon(_points):
"""
利用B样条插值对多边形进行优化,使得更加平滑
:param _points: 多边形所在点
:return: 平滑后的50个点
"""
tck, u = splprep(_points.T, u=None, s=1.0, per=1, quiet=2)
u_new = np.linspace(u.min(), u.max(), 1000)
x_new, y_new = splev(u_new, tck, der=0)
return np.array(list(zip(x_new.astype(np.int), y_new.astype(np.int)))).reshape((-1, 1, 2))
def approximate_curved_polygon(_contour, point_num=200):
"""
使用贝塞尔曲线进行拟合,得到平滑的闭合多边形轮廓
:param _contour: 构成多边形轮廓的点集. Array:(N, 2)
:param point_num: 每次拟合的点的数量,越大则越平滑. Int
:return: 返回平滑后的轮廓点
"""
to_return_contour = []
_contour = np.reshape(_contour, (-1, 2))
# 复制起始点到最后,保证生成闭合的曲线
_contour = np.vstack((_contour, _contour[0, :].reshape((-1, 2))))
for start_index in range(0, _contour.shape[0], point_num):
# 多取一个点,防止曲线中间出现断点
end_index = start_index + point_num + 1
end_index = end_index if end_index < _contour.shape[0] else _contour.shape[0]
nodes = np.transpose(_contour[start_index:end_index, :])
# 拟合贝塞尔曲线
curve = bezier.Curve(nodes, degree=nodes.shape[1] - 1)
curve = curve.evaluate_multi(np.linspace(0.0, 1.0, point_num * 5))
to_return_contour.append(np.transpose(curve))
to_return_contour = np.array(to_return_contour).reshape((-1, 2))
return to_return_contour
def get_region_proportion(_regions, _proportion):
"""
获取一堆区域的相应的占比
"""
assert _proportion in {'area', 'height', 'width'}, '不支持的占比计算方式'
all_region_values = []
if _proportion == 'area':
all_region_values = [np.sum(m_region) for m_region in _regions]
elif _proportion == 'height':
for m_region in _regions:
m_region_y, _ = np.where(m_region)
all_region_values.append(max(m_region_y) - min(m_region_y))
elif _proportion == 'width':
for m_region in _regions:
_, m_region_x = np.where(m_region)
all_region_values.append(max(m_region_x) - min(m_region_x))
sum_region_value = sum(all_region_values)
return [m_region_value / sum_region_value for m_region_value in all_region_values]
def get_bounding_rectangle(_x, _y):
"""
获得一系列点的组成最小外接矩形的相关信息
:rtype: object
:param _x: 一系列点的x值
:param _y: 一系列点的y值
:return: 最小外接矩形的左上角x,左上角y,右下角x,右下角y,矩形的高度和宽度
"""
left_top_corner_x, left_top_corner_y = min(_x), min(_y)
right_bottom_corner_x, right_bottom_corner_y = max(_x), max(_y)
width = right_bottom_corner_x - left_top_corner_x
height = right_bottom_corner_y - left_top_corner_y
return left_top_corner_x, left_top_corner_y, right_bottom_corner_x, right_bottom_corner_y, height, width
def interpolate_points(_points):
"""
对线段进行插值,方便后面对多边形进行插值算法的时候更加理想
:param _points: 所有点
:return: 插值完成后的点
"""
to_return_points = []
_points = np.array(_points)
for m_point_previous, m_point_next in zip(_points, _points[1:]):
m_segments = np.max(np.abs(m_point_previous - m_point_next) // 10)
if m_segments > 1:
new_x = np.linspace(m_point_previous[0], m_point_next[0], num=int(m_segments), endpoint=False,
dtype=np.int)
new_y = np.linspace(m_point_previous[1], m_point_next[1], num=int(m_segments), endpoint=False,
dtype=np.int)
to_return_points.append(np.vstack([new_x, new_y]))
else:
to_return_points.append(np.array([[m_point_previous[0]], [m_point_previous[1]]]))
return np.hstack(to_return_points).T
def get_polygon_region_contour(_region_mask, _mode='max'):
"""
获得多边形区域的轮廓
:param _region_mask: 有多边形区域的图像
:param _mode: 为'all'时,返回所有的轮廓点集;
为'max'时,返回最大的轮廓点集;
:return: 这个多边形轮廓
"""
_, contours, _ = cv2.findContours(_region_mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
to_return_contours = []
if _mode == 'max':
to_return_contours = [max(contours, key=cv2.contourArea), ]
elif _mode == 'all':
to_return_contours = contours
return to_return_contours
def concentric_circle_delete_duplicated(_all_centers, _down_scale_ratio=4):
"""
简易的二维坐标去重
相当于将相邻坐标放到一个格子里
"""
tile_grids = dict()
to_return_optimized_centers = []
for m_x, m_y in _all_centers:
m_x_downscaled, m_y_downscaled = m_x // _down_scale_ratio, m_y // _down_scale_ratio
m_downscale_name = '%d_%d' % (m_x_downscaled, m_y_downscaled)
if m_downscale_name not in tile_grids:
tile_grids[m_downscale_name] = (m_x, m_y, 1)
else:
sum_x, sum_y, sum_counter = tile_grids[m_downscale_name]
tile_grids[m_downscale_name] = (sum_x + m_x, sum_y + m_y, sum_counter + 1)
for _, (m_sum_x, m_sum_y, m_sum_counter) in tile_grids.items():
to_return_optimized_centers.append((m_sum_x // m_sum_counter, m_sum_y // m_sum_counter))
return to_return_optimized_centers
def nms(_rectangles, _scores, _nms_threshold):
"""
非极大值抑制
Args:
_rectangles: 所有bbox(非归一化的box)
_scores: 所有bbox的score
_nms_threshold: nms的阈值
Returns: nms之后的bbox
"""
x1 = _rectangles[:, 0]
y1 = _rectangles[:, 1]
x2 = _rectangles[:, 2]
y2 = _rectangles[:, 3]
scores = _scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 获得由大到小的分数索引
score_index = np.argsort(scores)[::-1]
keep = []
while len(score_index) > 0:
max_index = score_index[0]
# 最大的肯定是需要的框
keep.append(max_index)
intersection_left_x = np.maximum(x1[max_index], x1[score_index[1:]])
intersection_top_y = np.maximum(y1[max_index], y1[score_index[1:]])
intersection_right_x = np.minimum(x2[max_index], x2[score_index[1:]])
intersection_bottom_y = np.minimum(y2[max_index], y2[score_index[1:]])
width = np.maximum(0.0, intersection_right_x - intersection_left_x + 1)
height = np.maximum(0.0, intersection_bottom_y - intersection_top_y + 1)
intersection = width * height
min_areas = areas[score_index[1:]].copy()
min_areas_mask = areas[score_index[1:]] < areas[max_index]
min_areas[~min_areas_mask] = areas[max_index]
iou = intersection / min_areas
ids = np.where(np.logical_and(iou < _nms_threshold, min_areas != intersection))[0]
# 算iou的时候没把第一个参考框索引考虑进来,所以这里都要+1
score_index = score_index[ids + 1]
return keep
def rotate_points(_points, _degree=0, _center=(0, 0)):
"""
逆时针绕着一个点旋转点
Notes:
points是非归一化的值
Args:
_points: 需要旋转的点
_degree: 角度
_center: 中心点
Returns: 旋转后的点
"""
angle = np.deg2rad(_degree)
rotate_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
center = np.atleast_2d(_center)
points = np.atleast_2d(_points)
return np.reshape((rotate_matrix @ (points.T - center.T) + center.T).T, (-1, 2))
def get_expand_rotated_points(_image, _center, _rotate_degree):
"""
图像使用扩张模式旋转之后中心点的位置就会发生变化
Args:
_image: 图像
_center: 旋转中心点
_rotate_degree: 旋转角度
Returns: 旋转后的原始的四个点(↖↗↘↙的顺序),旋转后的中心点
"""
h, w = _image.shape[:2]
points = np.array([
[0, 0],
[w - 1, 0],
[w - 1, h - 1],
[0, h - 1],
_center
])
rotated_points = rotate_points(points, _rotate_degree, _center)
offset_x = -np.min(rotated_points[:, 0])
offset_y = -np.min(rotated_points[:, 1])
new_points = rotated_points + [offset_x, offset_y]
return new_points[:4], new_points[4]
def rotate_degree_img(_img, _degree, _center=None, _with_expand=True, _mask=None):
"""
逆时针旋转图像
Args:
_img: 待旋转图像
_degree: 角度
_center: 旋转中心,默认为图像几何中心
_with_expand: 是否需要调整图像大小,保证所有内容都不丢失
_mask: 待旋转的mask,可为None
Returns: 旋转后的图像,旋转后的mask
"""
if _mask is not None:
assert _img.shape == _mask.shape[:2], 'mask and shape is not same'
h, w = _img.shape[:2]
if _center is None:
center = (w / 2, h / 2)
else:
center = _center
if _with_expand:
four_corner_points, _ = get_expand_rotated_points(_img, center, _degree)
new_width = int(np.max(four_corner_points[:, 0]))
new_height = int(np.max(four_corner_points[:, 1]))
current_location = np.array([
[0, 0],
[w, 0],
[w, h],
], dtype=np.float32)
rotate_matrix = cv2.getAffineTransform(current_location, four_corner_points[:3].astype(np.float32))
else:
rotate_matrix = cv2.getRotationMatrix2D(center, _degree, 1)
new_width = w
new_height = h
rotated_img = cv2.warpAffine(_img, rotate_matrix, (new_width, new_height), flags=cv2.INTER_LINEAR)
if _mask is not None:
rotated_mask = cv2.warpAffine(_mask, rotate_matrix, (new_width, new_height), flags=cv2.INTER_NEAREST)
else:
rotated_mask = None
return rotated_img, rotated_mask
def resize_convex_hull_polygon(_convex_hull_points, _resize_ratio):
"""
对凸包的多边形进行缩放
Args:
_convex_hull_points: 凸包多边形的轮廓
_resize_ratio: 缩放比例
Returns: 缩放后的点
"""
center_point = np.mean(_convex_hull_points, axis=0)
diff_points = _convex_hull_points - center_point
r = np.linalg.norm(diff_points, axis=1)
theta = np.arctan2(diff_points[:, 1], diff_points[:, 0])
target_r = r * _resize_ratio
to_return_points = np.zeros_like(diff_points, dtype=np.float)
to_return_points[:, 0] = target_r * np.cos(theta)
to_return_points[:, 1] = target_r * np.sin(theta)
# 向下取整
return (to_return_points + center_point).astype(np.int)
def get_distance(_p1, _p2):
"""
获取两点之间的欧式距离
Args:
_p1: 点的坐标
_p2: 点的坐标
Returns: 两点间的欧式距离
"""
return np.sqrt(np.sum(np.square(_p1 - _p2)))
def resize_with_height(_image, _target_height):
"""
将图像高度resize到指定高度的等比例缩放
Args:
_image: 待缩放图像
_target_height: 目标高度
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
ratio = h / _target_height
target_w = int(np.ceil(w / ratio))
return cv2.resize(_image, (target_w, _target_height))
def resize_with_width(_image, _target_width):
"""
将图像宽度resize到指定宽度的等比例缩放
Args:
_image: 待缩放图像
_target_width: 目标宽度
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
ratio = w / _target_width
target_h = int(np.ceil(h / ratio))
return cv2.resize(_image, (_target_width, target_h))
def resize_with_short_side(_image, _target_short_side_size):
"""
将图像最短边resize到指定长度的等比例缩放
Args:
_image: 图像
_target_short_side_size: 最短边目标长度
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
if h > w:
return resize_with_width(_image, _target_short_side_size)
else:
return resize_with_height(_image, _target_short_side_size)
def resize_with_long_side(_image, _target_long_side_size):
"""
将图像最长边resize到指定长度的等比例缩放
Args:
_image: 图像
_target_long_side_size: 最长边目标长度
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
if h > w:
return resize_with_height(_image, _target_long_side_size)
else:
return resize_with_width(_image, _target_long_side_size)
def _compute_image_specific_base(_image, _height_base=None, _width_base=None):
"""
计算图像的宽高在一定基数基础上的最邻近向上取整的宽高
Args:
_image: 图像
_height_base: 高度的基数
_width_base: 宽度的基数
Returns: 最临近高度,最邻近宽度
"""
h, w = _image.shape[:2]
target_h = h
target_w = w
if _height_base is not None:
if h <= _height_base:
target_h = _height_base
else:
target_h = int(np.ceil(h / _height_base) * _height_base)
if _width_base is not None:
if w <= _width_base:
target_w = _width_base
else:
target_w = int(np.ceil(w / _width_base) * _width_base)
return target_h, target_w
def resize_with_specific_base(_image, _height_base=None, _width_base=None):
"""
将图像缩放到特定基的倍数的高宽
Args:
_image: 待缩放图像
_height_base: 高度的基
_width_base: 宽度的基
Returns: 缩放后的图像
"""
target_h, target_w = _compute_image_specific_base(_image, _height_base, _width_base)
return cv2.resize(_image, (target_w, target_h))
def center_pad_image_with_specific_base(_image, _height_base=None, _width_base=None, _pad_value=0,
_output_pad_ratio=False):
"""
将图像中心填充到特定基的倍数的高宽的图像中
Args:
_image: 待缩放图像
_height_base: 高度的基
_width_base: 宽度的基
_pad_value: pad的填充值
_output_pad_ratio: 是否输出pad(width_pad,height_pad)的占比,方便后面在计算的时候减去对应的值
Returns: 缩放后的图像
"""
h, w = _image.shape[:2]
target_h, target_w = _compute_image_specific_base(_image, _height_base, _width_base)
if len(_image.shape) == 3:
full_size_image = np.ones((target_h, target_w, _image.shape[2]), dtype=_image.dtype) * _pad_value
else:
full_size_image = np.ones((target_h, target_w), dtype=_image.dtype) * _pad_value
left_margin = (target_w - w) // 2
right_margin = left_margin + w
top_margin = (target_h - h) // 2
bottom_margin = top_margin + h
full_size_image[top_margin:bottom_margin, left_margin:right_margin, ...] = _image
if not _output_pad_ratio:
return full_size_image
else:
return full_size_image, (left_margin / target_w, top_margin / target_h)
def remove_image_pad(_padded_image, _original_image, _left_margin, _top_margin):
"""
移除图像的pad
Args:
_padded_image: 已经pad后的图像
_original_image: 原图
_left_margin: 左边界
_top_margin: 上边界
Returns: 移除边界后的图
"""
padded_h, padded_w = _padded_image.shape[:2]
original_h, original_w = _original_image.shape[:2]
left_margin_pixels = int(_left_margin * padded_w)
top_margin_pixels = int(_top_margin * padded_h)
right_boundary = left_margin_pixels + original_w
bottom_boundary = top_margin_pixels + original_h
return _padded_image[top_margin_pixels:bottom_boundary, left_margin_pixels:right_boundary, ...]
def get_cropped_image(_image, _location):
"""
抠取图中的特定区域
Args:
_image: 待抠取图像
_location: 待抠取区域
Returns: 抠取出来的结果
"""
h, w = _image.shape[:2]
top_left_x = int(np.clip(_location['top_left_x'], a_min=0, a_max=1) * w)
top_left_y = int(np.clip(_location['top_left_y'], a_min=0, a_max=1) * h)
bottom_right_x = int(np.clip(_location['bottom_right_x'], a_min=0, a_max=1) * w)
bottom_right_y = int(np.clip(_location['bottom_right_y'], a_min=0, a_max=1) * h)
return _image.copy()[top_left_y:bottom_right_y + 1, top_left_x:bottom_right_x + 1, ...]
def get_min_area_bbox(_image, _contour, _scale_ratio=1.0):
"""
获取一个contour对应的最小面积矩形
note:主要是解决了旋转角度不合适的问题
Args:
_image: bbox所在图像
_contour: 轮廓
_scale_ratio: 缩放比例
Returns: 最小面积矩形的相关信息
"""
h, w = _image.shape[:2]
if _scale_ratio != 1:
reshaped_contour = _contour.reshape(-1, 2)
current_polygon = Polygon(reshaped_contour)
distance = current_polygon.area * _scale_ratio / current_polygon.length
offset = PyclipperOffset()
offset.AddPath(reshaped_contour, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
all_paths = offset.Execute(distance)
if len(all_paths) > 0:
max_path = max(all_paths, key=lambda x: cv2.contourArea(np.array(x)))
scaled_contour = np.array(max_path).reshape(-1, 1, 2)
else:
return None
else:
scaled_contour = _contour
try:
# 会存在contour不合法的情况下,无法计算得到最小面积矩形
rotated_box = cv2.minAreaRect(scaled_contour)
if -90 <= rotated_box[2] <= -45:
to_rotate_degree = rotated_box[2] + 90
bbox_height, bbox_width = rotated_box[1]
else:
to_rotate_degree = rotated_box[2]
bbox_width, bbox_height = rotated_box[1]
# 几何信息归一化可以方便进行在缩放前的图像上进行操作
to_return_rotated_box = {
'degree': int(to_rotate_degree),
'center_x': rotated_box[0][0] / w,
'center_y': rotated_box[0][1] / h,
'box_height': bbox_height / h,
'box_width': bbox_width / w,
}
return to_return_rotated_box
except Exception as e:
return None
def get_rotated_box_roi_from_image(_image, _rotated_box, _scale_ratio=(1.0, 1.0)):
"""
在图像中抠取一个旋转的box的roi
Args:
_image: 待抠取图像
_rotated_box: 旋转的box
_scale_ratio: 缩放比例,如果为tuple则分别为width和height的scale ratio,如果是数值的类型的,则width height的scale ratio一致
Returns: 抠取的roi
"""
h, w = _image.shape[:2]
rotated_points = get_coordinates_of_rotated_box(_image, _rotated_box, _scale_ratio)
target_h = int(_rotated_box['box_height'] * h)
target_w = int(_rotated_box['box_width'] * w)
target_points = np.array([
[0, 0],
[target_w, 0],
[target_w, target_h],
], dtype=np.float32)
warp_matrix = cv2.getAffineTransform(rotated_points.astype(np.float32)[:3], target_points)
cropped_image = cv2.warpAffine(_image, warp_matrix, (target_w, target_h))
return cropped_image
def replace_rotated_box_roi_to_image(_source_image, _rotated_box, _replace_image, _box_scale_ratio=(1.0, 1.0)):
"""
将旋转的box的roi区域替换为特定图片
Args:
_source_image: 原始图像
_rotated_box: 原始图像上的bbox
_replace_image: 需要替换的图像
_box_scale_ratio: box的缩放比例,如果为tuple则分别为width和height的scale ratio,
如果是数值的类型的,则width height的scale ratio一致
Returns: 替换后的图
"""
h, w = _source_image.shape[:2]
replace_image_h, replace_image_w = _replace_image.shape[:2]
rotated_points = get_coordinates_of_rotated_box(_source_image, _rotated_box, _box_scale_ratio)
source_points = np.array([
[0, 0],
[replace_image_w, 0],
[replace_image_w, replace_image_h],
], dtype=np.float32)
warp_matrix = cv2.getAffineTransform(source_points, rotated_points.astype(np.float32)[:3])
replaced_mask = cv2.warpAffine(np.ones_like(_replace_image, dtype=np.uint8), warp_matrix, (w, h))
masked_replaced_image = cv2.warpAffine(_replace_image, warp_matrix, (w, h))
replaced_image = _source_image.copy()
np.putmask(replaced_image, replaced_mask == 1, masked_replaced_image)
return replaced_image
def get_coordinates_of_rotated_box(_image, _rotated_box, _scale_ratio=(1.0, 1.0)):
"""
获取旋转的矩形的对应的四个顶点坐标
Args:
_image: 对应的图像
_rotated_box: 旋转的矩形
_scale_ratio: 获取bbox的尺度,如果为tuple,则分别表示width和height的scale ratio,如果是一个数字就同时表示两个
Returns: 四个对应在图像中的坐标点
"""
h, w = _image.shape[:2]
center_x = _rotated_box['center_x']
center_y = _rotated_box['center_y']
if isinstance(_scale_ratio, tuple):
width_ratio, height_ratio = _scale_ratio
else:
width_ratio, height_ratio = _scale_ratio, _scale_ratio
half_box_width = _rotated_box['box_width'] * width_ratio / 2
half_box_height = _rotated_box['box_height'] * height_ratio / 2
raw_points = np.array([
[center_x - half_box_width, center_y - half_box_height],
[center_x + half_box_width, center_y - half_box_height],
[center_x + half_box_width, center_y + half_box_height],
[center_x - half_box_width, center_y + half_box_height]
]) * (w, h)
rotated_points = rotate_points(raw_points, _rotated_box['degree'], (center_x * w, center_y * h))
rotated_points[:, 0] = np.clip(rotated_points[:, 0], a_min=0, a_max=w)
rotated_points[:, 1] = np.clip(rotated_points[:, 1], a_min=0, a_max=h)
return rotated_points.astype(np.int32)
def clockwise_sort_points(_point_coordinates):
"""
以左上角为起点的顺时针排序
原理就是将笛卡尔坐标转换为极坐标,然后对极坐标的φ进行排序
Args:
_point_coordinates: 待排序的点[(x,y),]
Returns: 排序完成的点
"""
center_point = tuple(
map(operator.truediv, reduce(lambda x, y: map(operator.add, x, y), _point_coordinates),
[len(_point_coordinates)] * 2))
return sorted(_point_coordinates, key=lambda coord: (180 + math.degrees(
math.atan2(*tuple(map(operator.sub, coord, center_point))[::-1]))) % 360)
def force_convert_image_to_bgr(_image):
"""
将图像转换为bgr
Args:
_image: 待转换图像
Returns: 转换后的图像
"""
if len(_image.shape) == 2:
candidate_image = cv2.cvtColor(_image, cv2.COLOR_GRAY2BGR)
else:
if _image.shape[-1] == 4:
candidate_image = cv2.cvtColor(_image, cv2.COLOR_BGRA2BGR)
else:
candidate_image = _image
return candidate_image
def face_align(_image, _landmark, _target_shape):
"""
人脸对齐
Args:
_image: 人脸图片
_landmark: 人脸图片上的landmark
_target_shape: 目标尺寸
Returns: 对齐后的人脸
"""
reference_facial_points = np.array([
[0.31556875, 0.4615741],
[0.6826229, 0.45983392],
[0.5002625, 0.6405054],
[0.3494719, 0.82469195],
[0.6534365, 0.8232509]
], dtype=np.float32)
target_facial_points = reference_facial_points.copy() * _target_shape
h, w = _image.shape[:2]
remapped_landmark = _landmark.copy() * [w, h]
transform_matrix = cv2.estimateRigidTransform(remapped_landmark, target_facial_points, True)
face_img = cv2.warpAffine(_image, transform_matrix, _target_shape)
return face_img
def correct_face_orientation(_image, _landmark_info):
"""
校正人脸的方向
Args:
_image: 人脸照片
_landmark_info: landmark信息
Returns: 旋转后的人脸照片,以及反变化的回调函数
"""
h, w = _image.shape[:2]
reference_facial_points = np.array([
[30.29459953, 51.69630003],
[65.53179932, 51.50139904],
[48.02519989, 71.73660183],
], dtype=np.float32)
if _landmark_info['points_count'] == 5:
points_index = [0, 1, 2]
elif _landmark_info['points_count'] == 106:
points_index = [38, 88, 86]
else:
raise NotImplementedError(f"Cannot correct face with {_landmark_info['points_count']} landmark points now")
landmark_x = _landmark_info['x_locations'][points_index]
landmark_y = _landmark_info['y_locations'][points_index]
landmark = np.stack([landmark_x, landmark_y], axis=1)
transform_matrix = cv2.getAffineTransform((landmark * [96.0, 112.0]).astype(np.float32), reference_facial_points)
center_point = (landmark[-1] * (w, h)).astype(np.float32)
degree = math.degrees(math.atan(transform_matrix[0, 0] / transform_matrix[0, 1]))
assert -90 <= degree <= 90, 'the face correct angle must be between -90 degree and 90 degree'
if degree > 0:
rotation_degree = degree - 90
else:
rotation_degree = degree + 90
rotated_image, _ = rotate_degree_img(_image, rotation_degree, (center_point[0], center_point[1]), True)
rotated_points, rotated_center_point = get_expand_rotated_points(_image, (center_point[0], center_point[1]),
rotation_degree)
original_h, original_w = _image.shape[:2]
transform_back_matrix = cv2.getAffineTransform(
rotated_points[:3].astype(np.float32),
np.array([[0, 0], [original_w - 1, 0], [original_w - 1, original_h - 1]], dtype=np.float32)
)
def _rotate_back_callback(_to_rotate_back_image):
"""
用于将需要进行反变化回原样的图回调函数
Args:
_to_rotate_back_image: 待反变化的图
Returns: 与原图对应
"""
return cv2.warpAffine(_to_rotate_back_image,
transform_back_matrix,
(original_w, original_h),
flags=cv2.INTER_NEAREST)
return rotated_image, _rotate_back_callback
| [
"numpy.clip",
"numpy.hstack",
"numpy.argsort",
"numpy.array",
"shapely.geometry.Polygon",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.sin",
"math.atan",
"numpy.atleast_2d",
"numpy.mean",
"numpy.reshape",
"numpy.where",
"numpy.putmask",
"numpy.max",
"cv2.minAreaRect",
"numpy.stack",
... | [((1237, 1286), 'scipy.interpolate.splprep', 'splprep', (['_points.T'], {'u': 'None', 's': '(1.0)', 'per': '(1)', 'quiet': '(2)'}), '(_points.T, u=None, s=1.0, per=1, quiet=2)\n', (1244, 1286), False, 'from scipy.interpolate import splprep, splev\n'), ((1354, 1378), 'scipy.interpolate.splev', 'splev', (['u_new', 'tck'], {'der': '(0)'}), '(u_new, tck, der=0)\n', (1359, 1378), False, 'from scipy.interpolate import splprep, splev\n'), ((1734, 1763), 'numpy.reshape', 'np.reshape', (['_contour', '(-1, 2)'], {}), '(_contour, (-1, 2))\n', (1744, 1763), True, 'import numpy as np\n'), ((3966, 3983), 'numpy.array', 'np.array', (['_points'], {}), '(_points)\n', (3974, 3983), True, 'import numpy as np\n'), ((7839, 7858), 'numpy.deg2rad', 'np.deg2rad', (['_degree'], {}), '(_degree)\n', (7849, 7858), True, 'import numpy as np\n'), ((7998, 8020), 'numpy.atleast_2d', 'np.atleast_2d', (['_center'], {}), '(_center)\n', (8011, 8020), True, 'import numpy as np\n'), ((8034, 8056), 'numpy.atleast_2d', 'np.atleast_2d', (['_points'], {}), '(_points)\n', (8047, 8056), True, 'import numpy as np\n'), ((8068, 8141), 'numpy.reshape', 'np.reshape', (['(rotate_matrix @ (points.T - center.T) + center.T).T', '(-1, 2)'], {}), '((rotate_matrix @ (points.T - center.T) + center.T).T, (-1, 2))\n', (8078, 8141), True, 'import numpy as np\n'), ((8425, 8492), 'numpy.array', 'np.array', (['[[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1], _center]'], {}), '([[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1], _center])\n', (8433, 8492), True, 'import numpy as np\n'), ((9926, 10015), 'cv2.warpAffine', 'cv2.warpAffine', (['_img', 'rotate_matrix', '(new_width, new_height)'], {'flags': 'cv2.INTER_LINEAR'}), '(_img, rotate_matrix, (new_width, new_height), flags=cv2.\n INTER_LINEAR)\n', (9940, 10015), False, 'import cv2\n'), ((10448, 10484), 'numpy.mean', 'np.mean', (['_convex_hull_points'], {'axis': '(0)'}), '(_convex_hull_points, axis=0)\n', (10455, 10484), True, 'import numpy as np\n'), ((10546, 10581), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_points'], {'axis': '(1)'}), '(diff_points, axis=1)\n', (10560, 10581), True, 'import numpy as np\n'), ((10594, 10642), 'numpy.arctan2', 'np.arctan2', (['diff_points[:, 1]', 'diff_points[:, 0]'], {}), '(diff_points[:, 1], diff_points[:, 0])\n', (10604, 10642), True, 'import numpy as np\n'), ((10699, 10741), 'numpy.zeros_like', 'np.zeros_like', (['diff_points'], {'dtype': 'np.float'}), '(diff_points, dtype=np.float)\n', (10712, 10741), True, 'import numpy as np\n'), ((11402, 11448), 'cv2.resize', 'cv2.resize', (['_image', '(target_w, _target_height)'], {}), '(_image, (target_w, _target_height))\n', (11412, 11448), False, 'import cv2\n'), ((11742, 11787), 'cv2.resize', 'cv2.resize', (['_image', '(_target_width, target_h)'], {}), '(_image, (_target_width, target_h))\n', (11752, 11787), False, 'import cv2\n'), ((13610, 13650), 'cv2.resize', 'cv2.resize', (['_image', '(target_w, target_h)'], {}), '(_image, (target_w, target_h))\n', (13620, 13650), False, 'import cv2\n'), ((18346, 18419), 'numpy.array', 'np.array', (['[[0, 0], [target_w, 0], [target_w, target_h]]'], {'dtype': 'np.float32'}), '([[0, 0], [target_w, 0], [target_w, target_h]], dtype=np.float32)\n', (18354, 18419), True, 'import numpy as np\n'), ((18566, 18623), 'cv2.warpAffine', 'cv2.warpAffine', (['_image', 'warp_matrix', '(target_w, target_h)'], {}), '(_image, warp_matrix, (target_w, target_h))\n', (18580, 18623), False, 'import cv2\n'), ((19275, 19373), 'numpy.array', 'np.array', (['[[0, 0], [replace_image_w, 0], [replace_image_w, replace_image_h]]'], {'dtype': 'np.float32'}), '([[0, 0], [replace_image_w, 0], [replace_image_w, replace_image_h]],\n dtype=np.float32)\n', (19283, 19373), True, 'import numpy as np\n'), ((19626, 19677), 'cv2.warpAffine', 'cv2.warpAffine', (['_replace_image', 'warp_matrix', '(w, h)'], {}), '(_replace_image, warp_matrix, (w, h))\n', (19640, 19677), False, 'import cv2\n'), ((19724, 19793), 'numpy.putmask', 'np.putmask', (['replaced_image', '(replaced_mask == 1)', 'masked_replaced_image'], {}), '(replaced_image, replaced_mask == 1, masked_replaced_image)\n', (19734, 19793), True, 'import numpy as np\n'), ((20963, 21010), 'numpy.clip', 'np.clip', (['rotated_points[:, 0]'], {'a_min': '(0)', 'a_max': 'w'}), '(rotated_points[:, 0], a_min=0, a_max=w)\n', (20970, 21010), True, 'import numpy as np\n'), ((21038, 21085), 'numpy.clip', 'np.clip', (['rotated_points[:, 1]'], {'a_min': '(0)', 'a_max': 'h'}), '(rotated_points[:, 1], a_min=0, a_max=h)\n', (21045, 21085), True, 'import numpy as np\n'), ((22311, 22472), 'numpy.array', 'np.array', (['[[0.31556875, 0.4615741], [0.6826229, 0.45983392], [0.5002625, 0.6405054],\n [0.3494719, 0.82469195], [0.6534365, 0.8232509]]'], {'dtype': 'np.float32'}), '([[0.31556875, 0.4615741], [0.6826229, 0.45983392], [0.5002625, \n 0.6405054], [0.3494719, 0.82469195], [0.6534365, 0.8232509]], dtype=np.\n float32)\n', (22319, 22472), True, 'import numpy as np\n'), ((22684, 22757), 'cv2.estimateRigidTransform', 'cv2.estimateRigidTransform', (['remapped_landmark', 'target_facial_points', '(True)'], {}), '(remapped_landmark, target_facial_points, True)\n', (22710, 22757), False, 'import cv2\n'), ((22773, 22828), 'cv2.warpAffine', 'cv2.warpAffine', (['_image', 'transform_matrix', '_target_shape'], {}), '(_image, transform_matrix, _target_shape)\n', (22787, 22828), False, 'import cv2\n'), ((23100, 23217), 'numpy.array', 'np.array', (['[[30.29459953, 51.69630003], [65.53179932, 51.50139904], [48.02519989, \n 71.73660183]]'], {'dtype': 'np.float32'}), '([[30.29459953, 51.69630003], [65.53179932, 51.50139904], [\n 48.02519989, 71.73660183]], dtype=np.float32)\n', (23108, 23217), True, 'import numpy as np\n'), ((23669, 23711), 'numpy.stack', 'np.stack', (['[landmark_x, landmark_y]'], {'axis': '(1)'}), '([landmark_x, landmark_y], axis=1)\n', (23677, 23711), True, 'import numpy as np\n'), ((2099, 2147), 'numpy.transpose', 'np.transpose', (['_contour[start_index:end_index, :]'], {}), '(_contour[start_index:end_index, :])\n', (2111, 2147), True, 'import numpy as np\n'), ((2182, 2228), 'bezier.Curve', 'bezier.Curve', (['nodes'], {'degree': '(nodes.shape[1] - 1)'}), '(nodes, degree=nodes.shape[1] - 1)\n', (2194, 2228), False, 'import bezier\n'), ((4643, 4670), 'numpy.hstack', 'np.hstack', (['to_return_points'], {}), '(to_return_points)\n', (4652, 4670), True, 'import numpy as np\n'), ((6526, 6544), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (6536, 6544), True, 'import numpy as np\n'), ((6716, 6762), 'numpy.maximum', 'np.maximum', (['x1[max_index]', 'x1[score_index[1:]]'], {}), '(x1[max_index], x1[score_index[1:]])\n', (6726, 6762), True, 'import numpy as np\n'), ((6792, 6838), 'numpy.maximum', 'np.maximum', (['y1[max_index]', 'y1[score_index[1:]]'], {}), '(y1[max_index], y1[score_index[1:]])\n', (6802, 6838), True, 'import numpy as np\n'), ((6870, 6916), 'numpy.minimum', 'np.minimum', (['x2[max_index]', 'x2[score_index[1:]]'], {}), '(x2[max_index], x2[score_index[1:]])\n', (6880, 6916), True, 'import numpy as np\n'), ((6949, 6995), 'numpy.minimum', 'np.minimum', (['y2[max_index]', 'y2[score_index[1:]]'], {}), '(y2[max_index], y2[score_index[1:]])\n', (6959, 6995), True, 'import numpy as np\n'), ((7013, 7076), 'numpy.maximum', 'np.maximum', (['(0.0)', '(intersection_right_x - intersection_left_x + 1)'], {}), '(0.0, intersection_right_x - intersection_left_x + 1)\n', (7023, 7076), True, 'import numpy as np\n'), ((7094, 7157), 'numpy.maximum', 'np.maximum', (['(0.0)', '(intersection_bottom_y - intersection_top_y + 1)'], {}), '(0.0, intersection_bottom_y - intersection_top_y + 1)\n', (7104, 7157), True, 'import numpy as np\n'), ((8623, 8651), 'numpy.min', 'np.min', (['rotated_points[:, 0]'], {}), '(rotated_points[:, 0])\n', (8629, 8651), True, 'import numpy as np\n'), ((8668, 8696), 'numpy.min', 'np.min', (['rotated_points[:, 1]'], {}), '(rotated_points[:, 1])\n', (8674, 8696), True, 'import numpy as np\n'), ((9577, 9629), 'numpy.array', 'np.array', (['[[0, 0], [w, 0], [w, h]]'], {'dtype': 'np.float32'}), '([[0, 0], [w, 0], [w, h]], dtype=np.float32)\n', (9585, 9629), True, 'import numpy as np\n'), ((9819, 9862), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', '_degree', '(1)'], {}), '(center, _degree, 1)\n', (9842, 9862), False, 'import cv2\n'), ((10060, 10151), 'cv2.warpAffine', 'cv2.warpAffine', (['_mask', 'rotate_matrix', '(new_width, new_height)'], {'flags': 'cv2.INTER_NEAREST'}), '(_mask, rotate_matrix, (new_width, new_height), flags=cv2.\n INTER_NEAREST)\n', (10074, 10151), False, 'import cv2\n'), ((10782, 10795), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10788, 10795), True, 'import numpy as np\n'), ((10836, 10849), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10842, 10849), True, 'import numpy as np\n'), ((11371, 11389), 'numpy.ceil', 'np.ceil', (['(w / ratio)'], {}), '(w / ratio)\n', (11378, 11389), True, 'import numpy as np\n'), ((11711, 11729), 'numpy.ceil', 'np.ceil', (['(h / ratio)'], {}), '(h / ratio)\n', (11718, 11729), True, 'import numpy as np\n'), ((16503, 16528), 'shapely.geometry.Polygon', 'Polygon', (['reshaped_contour'], {}), '(reshaped_contour)\n', (16510, 16528), False, 'from shapely.geometry import Polygon\n'), ((16626, 16643), 'pyclipper.PyclipperOffset', 'PyclipperOffset', ([], {}), '()\n', (16641, 16643), False, 'from pyclipper import PyclipperOffset\n'), ((17111, 17142), 'cv2.minAreaRect', 'cv2.minAreaRect', (['scaled_contour'], {}), '(scaled_contour)\n', (17126, 17142), False, 'import cv2\n'), ((19531, 19575), 'numpy.ones_like', 'np.ones_like', (['_replace_image'], {'dtype': 'np.uint8'}), '(_replace_image, dtype=np.uint8)\n', (19543, 19575), True, 'import numpy as np\n'), ((20549, 20800), 'numpy.array', 'np.array', (['[[center_x - half_box_width, center_y - half_box_height], [center_x +\n half_box_width, center_y - half_box_height], [center_x + half_box_width,\n center_y + half_box_height], [center_x - half_box_width, center_y +\n half_box_height]]'], {}), '([[center_x - half_box_width, center_y - half_box_height], [\n center_x + half_box_width, center_y - half_box_height], [center_x +\n half_box_width, center_y + half_box_height], [center_x - half_box_width,\n center_y + half_box_height]])\n', (20557, 20800), True, 'import numpy as np\n'), ((21849, 21889), 'cv2.cvtColor', 'cv2.cvtColor', (['_image', 'cv2.COLOR_GRAY2BGR'], {}), '(_image, cv2.COLOR_GRAY2BGR)\n', (21861, 21889), False, 'import cv2\n'), ((23918, 23976), 'math.atan', 'math.atan', (['(transform_matrix[0, 0] / transform_matrix[0, 1])'], {}), '(transform_matrix[0, 0] / transform_matrix[0, 1])\n', (23927, 23976), False, 'import math\n'), ((24641, 24736), 'numpy.array', 'np.array', (['[[0, 0], [original_w - 1, 0], [original_w - 1, original_h - 1]]'], {'dtype': 'np.float32'}), '([[0, 0], [original_w - 1, 0], [original_w - 1, original_h - 1]],\n dtype=np.float32)\n', (24649, 24736), True, 'import numpy as np\n'), ((24955, 25070), 'cv2.warpAffine', 'cv2.warpAffine', (['_to_rotate_back_image', 'transform_back_matrix', '(original_w, original_h)'], {'flags': 'cv2.INTER_NEAREST'}), '(_to_rotate_back_image, transform_back_matrix, (original_w,\n original_h), flags=cv2.INTER_NEAREST)\n', (24969, 25070), False, 'import cv2\n'), ((561, 589), 'math.atan2', 'math.atan2', (['(-diff_y)', '(-diff_x)'], {}), '(-diff_y, -diff_x)\n', (571, 589), False, 'import math\n'), ((2266, 2302), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(point_num * 5)'], {}), '(0.0, 1.0, point_num * 5)\n', (2277, 2302), True, 'import numpy as np\n'), ((2337, 2356), 'numpy.transpose', 'np.transpose', (['curve'], {}), '(curve)\n', (2349, 2356), True, 'import numpy as np\n'), ((2382, 2409), 'numpy.array', 'np.array', (['to_return_contour'], {}), '(to_return_contour)\n', (2390, 2409), True, 'import numpy as np\n'), ((2695, 2711), 'numpy.sum', 'np.sum', (['m_region'], {}), '(m_region)\n', (2701, 2711), True, 'import numpy as np\n'), ((9457, 9489), 'numpy.max', 'np.max', (['four_corner_points[:, 0]'], {}), '(four_corner_points[:, 0])\n', (9463, 9489), True, 'import numpy as np\n'), ((9516, 9548), 'numpy.max', 'np.max', (['four_corner_points[:, 1]'], {}), '(four_corner_points[:, 1])\n', (9522, 9548), True, 'import numpy as np\n'), ((11082, 11102), 'numpy.square', 'np.square', (['(_p1 - _p2)'], {}), '(_p1 - _p2)\n', (11091, 11102), True, 'import numpy as np\n'), ((14264, 14330), 'numpy.ones', 'np.ones', (['(target_h, target_w, _image.shape[2])'], {'dtype': '_image.dtype'}), '((target_h, target_w, _image.shape[2]), dtype=_image.dtype)\n', (14271, 14330), True, 'import numpy as np\n'), ((14380, 14429), 'numpy.ones', 'np.ones', (['(target_h, target_w)'], {'dtype': '_image.dtype'}), '((target_h, target_w), dtype=_image.dtype)\n', (14387, 14429), True, 'import numpy as np\n'), ((15723, 15773), 'numpy.clip', 'np.clip', (["_location['top_left_x']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['top_left_x'], a_min=0, a_max=1)\n", (15730, 15773), True, 'import numpy as np\n'), ((15800, 15850), 'numpy.clip', 'np.clip', (["_location['top_left_y']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['top_left_y'], a_min=0, a_max=1)\n", (15807, 15850), True, 'import numpy as np\n'), ((15881, 15935), 'numpy.clip', 'np.clip', (["_location['bottom_right_x']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['bottom_right_x'], a_min=0, a_max=1)\n", (15888, 15935), True, 'import numpy as np\n'), ((15966, 16020), 'numpy.clip', 'np.clip', (["_location['bottom_right_y']"], {'a_min': '(0)', 'a_max': '(1)'}), "(_location['bottom_right_y'], a_min=0, a_max=1)\n", (15973, 16020), True, 'import numpy as np\n'), ((21964, 22004), 'cv2.cvtColor', 'cv2.cvtColor', (['_image', 'cv2.COLOR_BGRA2BGR'], {}), '(_image, cv2.COLOR_BGRA2BGR)\n', (21976, 22004), False, 'import cv2\n'), ((2834, 2852), 'numpy.where', 'np.where', (['m_region'], {}), '(m_region)\n', (2842, 2852), True, 'import numpy as np\n'), ((4081, 4120), 'numpy.abs', 'np.abs', (['(m_point_previous - m_point_next)'], {}), '(m_point_previous - m_point_next)\n', (4087, 4120), True, 'import numpy as np\n'), ((4497, 4522), 'numpy.vstack', 'np.vstack', (['[new_x, new_y]'], {}), '([new_x, new_y])\n', (4506, 4522), True, 'import numpy as np\n'), ((4574, 4630), 'numpy.array', 'np.array', (['[[m_point_previous[0]], [m_point_previous[1]]]'], {}), '([[m_point_previous[0]], [m_point_previous[1]]])\n', (4582, 4630), True, 'import numpy as np\n'), ((7430, 7493), 'numpy.logical_and', 'np.logical_and', (['(iou < _nms_threshold)', '(min_areas != intersection)'], {}), '(iou < _nms_threshold, min_areas != intersection)\n', (7444, 7493), True, 'import numpy as np\n'), ((7890, 7903), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7896, 7903), True, 'import numpy as np\n'), ((7953, 7966), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7959, 7966), True, 'import numpy as np\n'), ((7968, 7981), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7974, 7981), True, 'import numpy as np\n'), ((3020, 3038), 'numpy.where', 'np.where', (['m_region'], {}), '(m_region)\n', (3028, 3038), True, 'import numpy as np\n'), ((7906, 7919), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7912, 7919), True, 'import numpy as np\n'), ((13027, 13052), 'numpy.ceil', 'np.ceil', (['(h / _height_base)'], {}), '(h / _height_base)\n', (13034, 13052), True, 'import numpy as np\n'), ((13206, 13230), 'numpy.ceil', 'np.ceil', (['(w / _width_base)'], {}), '(w / _width_base)\n', (13213, 13230), True, 'import numpy as np\n'), ((16920, 16938), 'numpy.array', 'np.array', (['max_path'], {}), '(max_path)\n', (16928, 16938), True, 'import numpy as np\n'), ((16877, 16888), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (16885, 16888), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
"""
spanning_tree
=============
Script: spanning_tree.py
Author: <EMAIL>
Modified: 2018-06-13
Original: ... mst.py in my github
extensive documentation is there.
Purpose:
--------
Produce a spanning tree from a point set. I have yet to confirm
whether it constitutes a minimum spanning tree, since the implementation
doesn't specify whether Prim's algorithm is being used (see ref. 2)
References:
-----------
`<http://stackoverflow.com/questions/41903502/sort-two-dimensional-
list-python>`_.
`<http://peekaboo-vision.blogspot.ca/2012/02/simplistic-minimum-
spanning-tree-in.html>`_.
Also referenced here...
`<http://stackoverflow.com/questions/34374839/minimum-spanning-tree-
distance-and-graph>`_.
Notes:
------
>>> array 'a' array([[ 0, 0], constructed for minimum spanning tree example
[ 0, 8],
[10, 8],
[10, 0],
[ 3, 4],
[ 7, 4]])
(1) sorting
>>> np.lexsort((a[:,1], a[:,0])) sort by x, then y
>>> np.lexsort(a.T) >= np.lexsort((a[:,0], a[:,1])) sort y, x
(2) Distances
unsorted....
>>> np.linalg.norm(a[1:] - a[:-1], axis=1)
array([ 8.0, 10.0, 8.0, 8.1, 4.0])
>>> np.sum(np.linalg.norm(a[1:] - a[:-1], axis=1)) => 38.0622...
sorted....
>>> a_srt = a[np.lexsort(a.T),:]
>>> np.linalg.norm(a_srt[1:] - a_srt[:-1], axis=1)
array([ 8.0, 5.0, 4.0, 5.0, 8.0])
>>> np.sum(np.linalg.norm(a_srt[1:] - a_srt[:-1], axis=1)) => 30.0...
(3) Near results...
>>> coords, dist, n_array = n_near(s, N=2)
ID Xo Yo C0_x C0_y C1_x C1_y Dist0 Dist1
([(0, 0.0, 0.0, 3.0, 4.0, 0.0, 8.0, 5.0, 8.0),
(1, 0.0, 8.0, 3.0, 4.0, 0.0, 0.0, 5.0, 8.0),
(2, 3.0, 4.0, 7.0, 4.0, 0.0, 0.0, 4.0, 5.0),
(3, 7.0, 4.0, 3.0, 4.0, 10.0, 8.0, 4.0, 5.0),
(4, 10.0, 8.0, 7.0, 4.0, 10.0, 0.0, 5.0, 8.0),
(5, 10.0, 0.0, 7.0, 4.0, 10.0, 8.0, 5.0, 8.0)],
dtype=[('ID', '<i4'),
('Xo', '<f8'), ('Yo', '<f8'),
('C0_X', '<f8'), ('C0_Y', '<f8'),
('C1_X', '<f8'), ('C1_Y', '<f8'),
('Dist0', '<f8'), ('Dist1', '<f8')])
Connections:
>>> o_d
array([(0, 2, 5.0),
(2, 3, 4.0),
(2, 1, 5.0),
(3, 4, 5.0),
(3, 5, 5.0)],
dtype=[('Orig', '<i4'), ('Dest', '<i4'), ('Dist', '<f8')])
>>> a[o_d['Orig']] a[o_d['Dest']]
array([[ 0, 0], array([[10, 8],
[10, 8], [10, 0],
[10, 8], [ 0, 8],
[10, 0], [ 3, 4],
[10, 0]]) [ 7, 4]])
distance array:
>>> array([[ 5.0, 8.0, 8.1, 10.0, 12.8],
[ 5.0, 8.0, 8.1, 10.0, 12.8],
[ 4.0, 5.0, 5.0, 8.1, 8.1],
[ 4.0, 5.0, 5.0, 8.1, 8.1],
[ 5.0, 8.0, 8.1, 10.0, 12.8],
[ 5.0, 8.0, 8.1, 10.0, 12.8]])
Back to the original distance and sorted array, a_srt.
The distances are determined using the sorted points, the diagonal
distances are set to np.inf so that they have the maximal distance.
The distance values can be sorted to get their indices in the array
Then the array can be sliced to retrieve the points coordinates and the
distance array can be sliced to get the distances.
>>> dix = np.arange(d.shape[0])
d[dix, dix] = np.inf
distance array, `d`
>>> d
array([[ inf, 8.0, 5.0, 8.1, 10.0, 12.8],
[ 8.0, inf, 5.0, 8.1, 12.8, 10.0],
[ 5.0, 5.0, inf, 4.0, 8.1, 8.1],
[ 8.1, 8.1, 4.0, inf, 5.0, 5.0],
[ 10.0, 12.8, 8.1, 5.0, inf, 8.0],
[ 12.8, 10.0, 8.1, 5.0, 8.0, inf]])
>>> np.argsort(d[0]) # => array([2, 1, 3, 4, 5, 0])
>>> a_srt[np.argsort(d[0])]
array([[3, 4], [ 0, 8], [7, 4], [10, 0], [10, 8], [0, 0]])
>>> d[0][np.argsort(d[0])] # => array([ 5.0, 8.0, 8.1, 10.0, 12.8, inf])
: ---------------------------------------------------------------------:
"""
# ---- imports, formats, constants ----
#
import sys
import numpy as np
import arcpy
from arcpytools_pnt import fc_info, tweet
from textwrap import dedent
ft = {'bool': lambda x: repr(x.astype(np.int32)),
'float_kind': '{: 0.1f}'.format}
np.set_printoptions(edgeitems=10, linewidth=100, precision=2,
suppress=True, threshold=120,
formatter=ft)
np.ma.masked_print_option.set_display('-')
script = sys.argv[0]
# ---- process and functions ----
# (1) run dist_arr which calls _e_dist
# (2) perform the mst (minimum spanning tree, using Prims algorithm)
# (3) connect the points and return the structured array and then the fc
def dist_arr(a, prn=False):
"""Minimum spanning tree prep... see main header
: paths from given data set...
"""
# idx = np.lexsort(a.T) # sort y, then x
idx = np.lexsort((a[:, 1], a[:, 0])) # sort X, then Y
# idx= np.lexsort((a[:,0], a[:,1])) # sort Y, then X
a_srt = a[idx, :]
d = _e_dist(a_srt)
if prn:
frmt = """\n {}\n :Input array...\n {}\n\n :Sorted array...
{}\n\n :Distance...\n {}
"""
args = [dist_arr.__doc__, a, a_srt, d] # d.astype('int')]
print(dedent(frmt).format(*args))
return idx, a_srt, d
def _e_dist(a):
"""Return a 2D square-form euclidean distance matrix. For other
dimensions, use e_dist in ein_geom.py
"""
b = a.reshape(np.prod(a.shape[:-1]), 1, a.shape[-1])
diff = a - b
d = np.sqrt(np.einsum('ijk,ijk->ij', diff, diff)).squeeze()
# d = np.triu(d)
return d
def mst(W, copy_W=True):
"""Determine the minimum spanning tree for a set of points represented
by their inter-point distances... ie their 'W'eights
Requires:
---------
W: array
edge weights (distance, time) for a set of points. W needs to be
a square array or a np.triu perhaps
Returns:
--------
pairs: array
the pair of nodes that form the edges
"""
if copy_W:
W = W.copy()
if W.shape[0] != W.shape[1]:
raise ValueError("W needs to be square matrix of edge weights")
Np = W.shape[0]
pairs = []
pnts_seen = [0] # Add the first point
n_seen = 1
# exclude self connections by assigning inf to the diagonal
diag = np.arange(Np)
W[diag, diag] = np.inf
#
while n_seen != Np:
new_edge = np.argmin(W[pnts_seen], axis=None)
new_edge = divmod(new_edge, Np)
new_edge = [pnts_seen[new_edge[0]], new_edge[1]]
pairs.append(new_edge)
pnts_seen.append(new_edge[1])
W[pnts_seen, new_edge[1]] = np.inf
W[new_edge[1], pnts_seen] = np.inf
n_seen += 1
return np.vstack(pairs)
def connect(a, dists, edges):
"""Return the full spanning tree, with points, connections and distance
a: point array
points to connect
dist: array
distance array, from _e_dist
edge: array
edges, from mst
"""
p_f = edges[:, 0]
p_t = edges[:, 1]
d = dists[p_f, p_t]
n = p_f.shape[0]
dt = [('Orig', '<i4'), ('Dest', 'i4'), ('Dist', '<f8')]
out = np.zeros((n,), dtype=dt)
out['Orig'] = p_f
out['Dest'] = p_t
out['Dist'] = d
return out
# ---- main section ----
def _demo():
"""A sample run demonstrating the principles and workflow"""
# a = np.array([[0, 0], [0, 8], [10, 8], [10, 0], [3, 4], [7, 4]])
pth = script.split("/")[:-2] + ['Point_tools.gdb', 'unsorted_pnts']
in_fc = "/".join(pth)
shp_fld, oid_fld, shp_type, SR = fc_info(in_fc)
a = arcpy.da.FeatureClassToNumPyArray(in_fc,
['SHAPE@X', 'SHAPE@Y'], "", SR)
a = a.view(np.dtype('float64')).reshape(a.shape[0], 2)
idx, a_srt, d = dist_arr(a, prn=False) # distance array and sorted pnts
pairs = mst(d) # the orig-dest pairs for the mst
o_d = connect(a_srt, d, pairs) # produce an o-d structured array
os = a_srt[pairs[:, 0]]
ds = a_srt[pairs[:, 1]]
fr_to = np.array(list(zip(os, ds)))
return a, o_d, fr_to
def _tool():
in_fc = sys.argv[1]
out_fc = sys.argv[2]
shp_fld, oid_fld, shp_type, SR = fc_info(in_fc)
out_flds = [oid_fld, shp_fld]
frmt = """\nScript.... {}\nUsing..... {}\nSR...{}\n"""
args = [script, in_fc, SR.name]
msg = frmt.format(*args)
tweet(msg)
a = arcpy.da.FeatureClassToNumPyArray(in_fc, shp_fld, "", SR)
if len(a) >= 2:
z = np.zeros((a.shape[0], 2))
z[:, 0] = a['Shape'][:, 0]
z[:, 1] = a['Shape'][:, 1]
idx, a_srt, d = dist_arr(z)
pairs = mst(d)
o_d = connect(a_srt, d, pairs)
os = a_srt[pairs[:, 0]]
ds = a_srt[pairs[:, 1]]
fr_to = np.array(list(zip(os, ds)))
s = []
for pt in fr_to:
s.append(arcpy.Polyline(arcpy.Array([arcpy.Point(*p) for p in pt]), SR))
if arcpy.Exists(out_fc):
arcpy.Delete_management(out_fc)
arcpy.CopyFeatures_management(s, out_fc)
else:
msg2 = """
|
---- Potential User error......
Technically the script didn't fail.... but...
You need at least 2 different points... make sure you don't have an
incorrect selection
---- Try again
|
"""
tweet(dedent(msg2))
# ---- demo section ----
if len(sys.argv) == 1:
testing = True
a, o_d, fr_to = _demo()
else:
testing = False
_tool()
"""
Dissolve_management (in_features, out_feature_class, {dissolve_field},
{statistics_fields}, {multi_part}, {unsplit_lines})
Dissolve_management (in_features, out_feature_class, '', '',
'SINGLE_PART', 'UNSPLIT_LINES')
collapse from-to data by reshaping it
fr_to.shape
Out[57]: (199, 2, 2)
fr_to.reshape(199*2, 2)
"""
# ---------------------------------------------------------------------
if __name__ == "__main__":
"""Main section... """
# print("Script... {}".format(script))
# a = np.random.randint(1, 10, size=(10,2))
# a, d, pairs, o_d = _demo()
| [
"numpy.prod",
"arcpytools_pnt.fc_info",
"arcpy.CopyFeatures_management",
"textwrap.dedent",
"numpy.set_printoptions",
"arcpy.Point",
"arcpy.da.FeatureClassToNumPyArray",
"arcpytools_pnt.tweet",
"numpy.lexsort",
"numpy.zeros",
"arcpy.Exists",
"numpy.einsum",
"numpy.vstack",
"arcpy.Delete_ma... | [((4260, 4369), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'edgeitems': '(10)', 'linewidth': '(100)', 'precision': '(2)', 'suppress': '(True)', 'threshold': '(120)', 'formatter': 'ft'}), '(edgeitems=10, linewidth=100, precision=2, suppress=True,\n threshold=120, formatter=ft)\n', (4279, 4369), True, 'import numpy as np\n'), ((4409, 4451), 'numpy.ma.masked_print_option.set_display', 'np.ma.masked_print_option.set_display', (['"""-"""'], {}), "('-')\n", (4446, 4451), True, 'import numpy as np\n'), ((4887, 4917), 'numpy.lexsort', 'np.lexsort', (['(a[:, 1], a[:, 0])'], {}), '((a[:, 1], a[:, 0]))\n', (4897, 4917), True, 'import numpy as np\n'), ((6396, 6409), 'numpy.arange', 'np.arange', (['Np'], {}), '(Np)\n', (6405, 6409), True, 'import numpy as np\n'), ((6816, 6832), 'numpy.vstack', 'np.vstack', (['pairs'], {}), '(pairs)\n', (6825, 6832), True, 'import numpy as np\n'), ((7269, 7293), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'dt'}), '((n,), dtype=dt)\n', (7277, 7293), True, 'import numpy as np\n'), ((7696, 7710), 'arcpytools_pnt.fc_info', 'fc_info', (['in_fc'], {}), '(in_fc)\n', (7703, 7710), False, 'from arcpytools_pnt import fc_info, tweet\n'), ((7720, 7792), 'arcpy.da.FeatureClassToNumPyArray', 'arcpy.da.FeatureClassToNumPyArray', (['in_fc', "['SHAPE@X', 'SHAPE@Y']", '""""""', 'SR'], {}), "(in_fc, ['SHAPE@X', 'SHAPE@Y'], '', SR)\n", (7753, 7792), False, 'import arcpy\n'), ((8348, 8362), 'arcpytools_pnt.fc_info', 'fc_info', (['in_fc'], {}), '(in_fc)\n', (8355, 8362), False, 'from arcpytools_pnt import fc_info, tweet\n'), ((8530, 8540), 'arcpytools_pnt.tweet', 'tweet', (['msg'], {}), '(msg)\n', (8535, 8540), False, 'from arcpytools_pnt import fc_info, tweet\n'), ((8550, 8607), 'arcpy.da.FeatureClassToNumPyArray', 'arcpy.da.FeatureClassToNumPyArray', (['in_fc', 'shp_fld', '""""""', 'SR'], {}), "(in_fc, shp_fld, '', SR)\n", (8583, 8607), False, 'import arcpy\n'), ((5488, 5509), 'numpy.prod', 'np.prod', (['a.shape[:-1]'], {}), '(a.shape[:-1])\n', (5495, 5509), True, 'import numpy as np\n'), ((6490, 6524), 'numpy.argmin', 'np.argmin', (['W[pnts_seen]'], {'axis': 'None'}), '(W[pnts_seen], axis=None)\n', (6499, 6524), True, 'import numpy as np\n'), ((8642, 8667), 'numpy.zeros', 'np.zeros', (['(a.shape[0], 2)'], {}), '((a.shape[0], 2))\n', (8650, 8667), True, 'import numpy as np\n'), ((9098, 9118), 'arcpy.Exists', 'arcpy.Exists', (['out_fc'], {}), '(out_fc)\n', (9110, 9118), False, 'import arcpy\n'), ((9174, 9214), 'arcpy.CopyFeatures_management', 'arcpy.CopyFeatures_management', (['s', 'out_fc'], {}), '(s, out_fc)\n', (9203, 9214), False, 'import arcpy\n'), ((9133, 9164), 'arcpy.Delete_management', 'arcpy.Delete_management', (['out_fc'], {}), '(out_fc)\n', (9156, 9164), False, 'import arcpy\n'), ((9522, 9534), 'textwrap.dedent', 'dedent', (['msg2'], {}), '(msg2)\n', (9528, 9534), False, 'from textwrap import dedent\n'), ((5562, 5598), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->ij"""', 'diff', 'diff'], {}), "('ijk,ijk->ij', diff, diff)\n", (5571, 5598), True, 'import numpy as np\n'), ((7852, 7871), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (7860, 7871), True, 'import numpy as np\n'), ((5272, 5284), 'textwrap.dedent', 'dedent', (['frmt'], {}), '(frmt)\n', (5278, 5284), False, 'from textwrap import dedent\n'), ((9048, 9063), 'arcpy.Point', 'arcpy.Point', (['*p'], {}), '(*p)\n', (9059, 9063), False, 'import arcpy\n')] |
from contextlib import suppress
from typing import Callable, Union, Iterable, List, Optional, Tuple
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import zfit
from zfit import ztf
from zfit.core.interfaces import ZfitPDF
from zfit.util import ztyping
from zfit.util.exception import ShapeIncompatibleError
from .. import settings
from ..util.container import convert_to_container
from .limits import Space
from ..settings import ztypes, run
class UniformSampleAndWeights:
def __call__(self, n_to_produce: Union[int, tf.Tensor], limits: Space, dtype):
rnd_samples = []
thresholds_unscaled_list = []
weights = ztf.constant(1., shape=(1,))
for (lower, upper), area in zip(limits.iter_limits(as_tuple=True), limits.iter_areas(rel=True)):
n_partial_to_produce = tf.to_int64(
ztf.to_real(n_to_produce) * ztf.to_real(area)) # TODO(Mayou36): split right!
lower = ztf.convert_to_tensor(lower, dtype=dtype)
upper = ztf.convert_to_tensor(upper, dtype=dtype)
sample_drawn = tf.random_uniform(shape=(n_partial_to_produce, limits.n_obs + 1),
# + 1 dim for the function value
dtype=ztypes.float)
rnd_sample = sample_drawn[:, :-1] * (upper - lower) + lower # -1: all except func value
thresholds_unscaled = sample_drawn[:, -1]
# if not multiple_limits:
# return rnd_sample, thresholds_unscaled
rnd_samples.append(rnd_sample)
thresholds_unscaled_list.append(thresholds_unscaled)
rnd_sample = tf.concat(rnd_samples, axis=0)
thresholds_unscaled = tf.concat(thresholds_unscaled_list, axis=0)
n_drawn = n_to_produce
return rnd_sample, thresholds_unscaled, weights, weights, n_drawn
class EventSpace(Space):
"""EXPERIMENTAL SPACE CLASS!"""
def __init__(self, obs: ztyping.ObsTypeInput, limits: ztyping.LimitsTypeInput, factory=None,
name: Optional[str] = "Space"):
if limits is None:
raise ValueError("Limits cannot be None for EventSpaces (currently)")
self._limits_tensor = None
self._factory = factory
super().__init__(obs, limits, name)
@property
def limits(self) -> ztyping.LimitsTypeReturn:
limits = super().limits()
limits_tensor = self._limits_tensor
if limits_tensor is not None:
lower, upper = limits
new_bounds = [[], []]
for i, old_bounds in enumerate(lower, upper):
for bound in old_bounds:
new_bound = (lim(limits_tensor) for lim in bound)
new_bounds[i].append(new_bound)
new_bounds[i] = tuple(new_bounds[i])
return tuple(new_bounds)
def create_limits(self, n):
self._limits_tensor = self._factory(n)
def iter_areas(self, rel: bool = False) -> Tuple[float, ...]:
raise RuntimeError("Cannot be called with an event space.")
def add(self, other: ztyping.SpaceOrSpacesTypeInput):
raise RuntimeError("Cannot be called with an event space.")
def combine(self, other: ztyping.SpaceOrSpacesTypeInput):
raise RuntimeError("Cannot be called with an event space.")
def accept_reject_sample(prob: Callable, n: int, limits: Space,
sample_and_weights_factory: Callable = UniformSampleAndWeights,
dtype=ztypes.float, prob_max: Union[None, int] = None,
efficiency_estimation: float = 1.0) -> tf.Tensor:
"""Accept reject sample from a probability distribution.
Args:
prob (function): A function taking x a Tensor as an argument and returning the probability
(or anything that is proportional to the probability).
n (int): Number of samples to produce
limits (:py:class:`~zfit.Space`): The limits to sample from
sample_and_weights_factory (Callable): A function that returns the sample to insert into `prob` and the weights
(prob) of each sample together with the random thresholds. The API looks as follows:
- Parameters:
- n_to_produce (int, tf.Tensor): The number of events to produce (not exactly).
- limits (Space): the limits in which the samples will be.
- dtype (dtype): DType of the output.
- Return:
A tuple of length 5:
- proposed sample (tf.Tensor with shape=(n_to_produce, n_obs)): The new (proposed) sample
whose values are inside `limits`.
- thresholds_unscaled (tf.Tensor with shape=(n_to_produce,): Uniformly distributed
random values **between 0 and 1**.
- weights (tf.Tensor with shape=(n_to_produce)): (Proportional to the) probability
for each sample of the distribution it was drawn from.
- weights_max (int, tf.Tensor, None): The maximum of the weights (if known). This is
what the probability maximum will be scaled with, so it should be rather lower than the maximum
if the peaks do not exactly coincide. Otherwise return None (which will **assume**
that the peaks coincide).
- n_produced: the number of events produced. Can deviate from the requested number.
dtype ():
prob_max (Union[None, int]): The maximum of the model function for the given limits. If None
is given, it will be automatically, safely estimated (by a 10% increase in computation time
(constant weak scaling)).
efficiency_estimation (float): estimation of the initial sampling efficiency.
Returns:
tf.Tensor:
"""
multiple_limits = limits.n_limits > 1
# if limits.n_limits == 1:
# lower, upper = limits.limits
# lower = ztf.convert_to_tensor(lower[0], dtype=dtype)
# upper = ztf.convert_to_tensor(upper[0], dtype=dtype)
sample_and_weights = sample_and_weights_factory()
n = tf.to_int64(n)
def enough_produced(n, sample, n_total_drawn, eff):
return tf.greater(n, tf.shape(sample, out_type=tf.int64)[0])
def sample_body(n, sample, n_total_drawn=0, eff=1.0):
if sample is None:
n_to_produce = n
else:
n_to_produce = n - tf.shape(sample, out_type=tf.int64)[0]
if isinstance(limits, EventSpace): # EXPERIMENTAL(Mayou36): added to test EventSpace
limits.create_limits(n=n)
do_print = settings.get_verbosity() > 5
if do_print:
print_op = tf.print("Number of samples to produce:", n_to_produce, " with efficiency ", eff)
with tf.control_dependencies([print_op] if do_print else []):
n_to_produce = tf.to_int64(ztf.to_real(n_to_produce) / eff * 1.01) + 100 # just to make sure
# TODO: adjustable efficiency cap for memory efficiency (prevent too many samples at once produced)
n_to_produce = tf.minimum(n_to_produce, tf.to_int64(5e5)) # introduce a cap to force serial
rnd_sample, thresholds_unscaled, weights, weights_max, n_drawn = sample_and_weights(n_to_produce=n_to_produce,
limits=limits,
dtype=dtype)
# if n_produced is None:
# raise ShapeIncompatibleError("`sample_and_weights` has to return thresholds with a defined shape."
# "Use `Tensor.set_shape()` if the automatic propagation of the shape "
# "is not available.")
n_total_drawn += n_drawn
n_total_drawn = tf.to_int64(n_total_drawn)
probabilities = prob(rnd_sample)
if prob_max is None: # TODO(performance): estimate prob_max, after enough estimations -> fix it?
# TODO(Mayou36): This control dependency is needed because otherwise the max won't be determined
# correctly. A bug report on will be filled (WIP).
# The behavior is very odd: if we do not force a kind of copy, the `reduce_max` returns
# a value smaller by a factor of 1e-14
# with tf.control_dependencies([probabilities]):
# UPDATE: this works now? Was it just a one-time bug?
prob_max_inferred = tf.reduce_max(probabilities)
else:
prob_max_inferred = prob_max
if weights_max is None:
weights_max = tf.reduce_max(weights) * 0.99 # safety margin, also taking numericals into account
weights_scaled = prob_max_inferred / weights_max * weights
random_thresholds = thresholds_unscaled * weights_scaled
if run.numeric_checks:
assert_op = [tf.assert_greater_equal(x=weights_scaled, y=probabilities,
message="Not all weights are >= probs so the sampling "
"will be biased. If a custom `sample_and_weights` "
"was used, make sure that either the shape of the "
"custom sampler (resp. it's weights) overlap better "
"or decrease the `max_weight`")]
else:
assert_op = []
with tf.control_dependencies(assert_op):
take_or_not = probabilities > random_thresholds
# rnd_sample = tf.expand_dims(rnd_sample, dim=0) if len(rnd_sample.shape) == 1 else rnd_sample
take_or_not = take_or_not[0] if len(take_or_not.shape) == 2 else take_or_not
filtered_sample = tf.boolean_mask(rnd_sample, mask=take_or_not, axis=0)
if sample is None:
sample = filtered_sample
else:
sample = tf.concat([sample, filtered_sample], axis=0)
# efficiency (estimate) of how many samples we get
eff = ztf.to_real(tf.shape(sample, out_type=tf.int64)[1]) / ztf.to_real(n_total_drawn)
return n, sample, n_total_drawn, eff
# TODO(Mayou36): refactor, remove initial call
sample = tf.while_loop(cond=enough_produced, body=sample_body, # paraopt
loop_vars=sample_body(n=n, sample=None, # run first once for initialization
n_total_drawn=0, eff=efficiency_estimation),
swap_memory=True,
parallel_iterations=4,
back_prop=False)[1] # backprop not needed here
if multiple_limits:
sample = tf.random.shuffle(sample) # to make sure, randomly remove and not biased.
new_sample = sample[:n, :] # cutting away to many produced
# TODO(Mayou36): uncomment below. Why was set_shape needed? leave away to catch failure over time
# if no failure, uncomment both for improvement of shape inference
# with suppress(AttributeError): # if n_samples_int is not a numpy object
# new_sample.set_shape((n_samples_int, n_dims))
return new_sample
def extract_extended_pdfs(pdfs: Union[Iterable[ZfitPDF], ZfitPDF]) -> List[ZfitPDF]:
"""Return all extended pdfs that are daughters.
Args:
pdfs (Iterable[pdfs]):
Returns:
List[pdfs]:
"""
from ..models.functor import BaseFunctor
pdfs = convert_to_container(pdfs)
indep_pdfs = []
for pdf in pdfs:
if not pdf.is_extended:
continue
elif isinstance(pdf, BaseFunctor):
if all(pdf.pdfs_extended):
indep_pdfs.extend(extract_extended_pdfs(pdfs=pdf.pdfs))
elif not any(pdf.pdfs_extended):
indep_pdfs.append(pdf)
else:
assert False, "Should not reach this point, wrong assumptions. Please report bug."
else: # extended, but not a functor
indep_pdfs.append(pdf)
return indep_pdfs
def extended_sampling(pdfs: Union[Iterable[ZfitPDF], ZfitPDF], limits: Space) -> tf.Tensor:
"""Create a sample from extended pdfs by sampling poissonian using the yield.
Args:
pdfs (iterable[ZfitPDF]):
limits (:py:class:`~zfit.Space`):
Returns:
Union[tf.Tensor]:
"""
samples = []
pdfs = convert_to_container(pdfs)
pdfs = extract_extended_pdfs(pdfs)
for pdf in pdfs:
n = tf.random.poisson(lam=pdf.get_yield(), shape=(), dtype=ztypes.float)
sample = pdf._single_hook_sample(limits=limits, n=n, name="extended_sampling")
# sample.set_shape((n, limits.n_obs))
samples.append(sample)
samples = tf.concat(samples, axis=0)
return samples
if __name__ == '__main__':
import matplotlib.pyplot as plt
import time
with tf.Session() as sess:
dist = tfp.distributions.Normal(loc=1.5, scale=5.)
log_prob_fn = dist.log_prob
hmc = tfp.mcmc.HamiltonianMonteCarlo(target_log_prob_fn=log_prob_fn, step_size=0.1,
num_leapfrog_steps=2)
samples, kernel_results = tfp.mcmc.sample_chain(num_results=int(2),
num_burnin_steps=int(3),
num_steps_between_results=int(3),
current_state=0.3, kernel=hmc,
parallel_iterations=80)
result = sess.run(samples)
print(np.average(result), np.std(result))
# maximum = 1.1 * tf.reduce_max(dist.model(tf.random_uniform((10000,), -100, 100)))
maximum = 0.1
# maximum = None
list1 = []
sampled_dist_ar = accept_reject_sample(prob=dist.prob, n=100000000, limits=(-13.5, 16.5), sampler=None,
prob_max=maximum)
start = time.time()
for _ in range(40):
_ = sess.run(sampled_dist_ar)
end = time.time()
print("Time needed for normalization:", end - start)
# plt.hist(sampled_dist_ar, bins=40)
plt.figure()
# plt.hist(result, bins=40)
plt.show()
| [
"tensorflow.shape",
"tensorflow.boolean_mask",
"tensorflow.assert_greater_equal",
"tensorflow.control_dependencies",
"tensorflow.Session",
"tensorflow.random.shuffle",
"tensorflow_probability.mcmc.HamiltonianMonteCarlo",
"tensorflow.concat",
"tensorflow_probability.distributions.Normal",
"numpy.av... | [((6198, 6212), 'tensorflow.to_int64', 'tf.to_int64', (['n'], {}), '(n)\n', (6209, 6212), True, 'import tensorflow as tf\n'), ((12919, 12945), 'tensorflow.concat', 'tf.concat', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (12928, 12945), True, 'import tensorflow as tf\n'), ((673, 702), 'zfit.ztf.constant', 'ztf.constant', (['(1.0)'], {'shape': '(1,)'}), '(1.0, shape=(1,))\n', (685, 702), False, 'from zfit import ztf\n'), ((1690, 1720), 'tensorflow.concat', 'tf.concat', (['rnd_samples'], {'axis': '(0)'}), '(rnd_samples, axis=0)\n', (1699, 1720), True, 'import tensorflow as tf\n'), ((1751, 1794), 'tensorflow.concat', 'tf.concat', (['thresholds_unscaled_list'], {'axis': '(0)'}), '(thresholds_unscaled_list, axis=0)\n', (1760, 1794), True, 'import tensorflow as tf\n'), ((7944, 7970), 'tensorflow.to_int64', 'tf.to_int64', (['n_total_drawn'], {}), '(n_total_drawn)\n', (7955, 7970), True, 'import tensorflow as tf\n'), ((9964, 10017), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['rnd_sample'], {'mask': 'take_or_not', 'axis': '(0)'}), '(rnd_sample, mask=take_or_not, axis=0)\n', (9979, 10017), True, 'import tensorflow as tf\n'), ((10902, 10927), 'tensorflow.random.shuffle', 'tf.random.shuffle', (['sample'], {}), '(sample)\n', (10919, 10927), True, 'import tensorflow as tf\n'), ((13057, 13069), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13067, 13069), True, 'import tensorflow as tf\n'), ((13094, 13138), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': '(1.5)', 'scale': '(5.0)'}), '(loc=1.5, scale=5.0)\n', (13118, 13138), True, 'import tensorflow_probability as tfp\n'), ((13188, 13292), 'tensorflow_probability.mcmc.HamiltonianMonteCarlo', 'tfp.mcmc.HamiltonianMonteCarlo', ([], {'target_log_prob_fn': 'log_prob_fn', 'step_size': '(0.1)', 'num_leapfrog_steps': '(2)'}), '(target_log_prob_fn=log_prob_fn, step_size=\n 0.1, num_leapfrog_steps=2)\n', (13218, 13292), True, 'import tensorflow_probability as tfp\n'), ((14186, 14197), 'time.time', 'time.time', ([], {}), '()\n', (14195, 14197), False, 'import time\n'), ((14282, 14293), 'time.time', 'time.time', ([], {}), '()\n', (14291, 14293), False, 'import time\n'), ((14409, 14421), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14419, 14421), True, 'import matplotlib.pyplot as plt\n'), ((14466, 14476), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14474, 14476), True, 'import matplotlib.pyplot as plt\n'), ((970, 1011), 'zfit.ztf.convert_to_tensor', 'ztf.convert_to_tensor', (['lower'], {'dtype': 'dtype'}), '(lower, dtype=dtype)\n', (991, 1011), False, 'from zfit import ztf\n'), ((1032, 1073), 'zfit.ztf.convert_to_tensor', 'ztf.convert_to_tensor', (['upper'], {'dtype': 'dtype'}), '(upper, dtype=dtype)\n', (1053, 1073), False, 'from zfit import ztf\n'), ((1101, 1191), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '(n_partial_to_produce, limits.n_obs + 1)', 'dtype': 'ztypes.float'}), '(shape=(n_partial_to_produce, limits.n_obs + 1), dtype=\n ztypes.float)\n', (1118, 1191), True, 'import tensorflow as tf\n'), ((6764, 6849), 'tensorflow.print', 'tf.print', (['"""Number of samples to produce:"""', 'n_to_produce', '""" with efficiency """', 'eff'], {}), "('Number of samples to produce:', n_to_produce, ' with efficiency ',\n eff)\n", (6772, 6849), True, 'import tensorflow as tf\n'), ((6859, 6914), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['([print_op] if do_print else [])'], {}), '([print_op] if do_print else [])\n', (6882, 6914), True, 'import tensorflow as tf\n'), ((7178, 7199), 'tensorflow.to_int64', 'tf.to_int64', (['(500000.0)'], {}), '(500000.0)\n', (7189, 7199), True, 'import tensorflow as tf\n'), ((8601, 8629), 'tensorflow.reduce_max', 'tf.reduce_max', (['probabilities'], {}), '(probabilities)\n', (8614, 8629), True, 'import tensorflow as tf\n'), ((9654, 9688), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['assert_op'], {}), '(assert_op)\n', (9677, 9688), True, 'import tensorflow as tf\n'), ((10118, 10162), 'tensorflow.concat', 'tf.concat', (['[sample, filtered_sample]'], {'axis': '(0)'}), '([sample, filtered_sample], axis=0)\n', (10127, 10162), True, 'import tensorflow as tf\n'), ((10291, 10317), 'zfit.ztf.to_real', 'ztf.to_real', (['n_total_drawn'], {}), '(n_total_drawn)\n', (10302, 10317), False, 'from zfit import ztf\n'), ((13797, 13815), 'numpy.average', 'np.average', (['result'], {}), '(result)\n', (13807, 13815), True, 'import numpy as np\n'), ((13817, 13831), 'numpy.std', 'np.std', (['result'], {}), '(result)\n', (13823, 13831), True, 'import numpy as np\n'), ((6299, 6334), 'tensorflow.shape', 'tf.shape', (['sample'], {'out_type': 'tf.int64'}), '(sample, out_type=tf.int64)\n', (6307, 6334), True, 'import tensorflow as tf\n'), ((8744, 8766), 'tensorflow.reduce_max', 'tf.reduce_max', (['weights'], {}), '(weights)\n', (8757, 8766), True, 'import tensorflow as tf\n'), ((9017, 9319), 'tensorflow.assert_greater_equal', 'tf.assert_greater_equal', ([], {'x': 'weights_scaled', 'y': 'probabilities', 'message': '"""Not all weights are >= probs so the sampling will be biased. If a custom `sample_and_weights` was used, make sure that either the shape of the custom sampler (resp. it\'s weights) overlap better or decrease the `max_weight`"""'}), '(x=weights_scaled, y=probabilities, message=\n "Not all weights are >= probs so the sampling will be biased. If a custom `sample_and_weights` was used, make sure that either the shape of the custom sampler (resp. it\'s weights) overlap better or decrease the `max_weight`"\n )\n', (9040, 9319), True, 'import tensorflow as tf\n'), ((872, 897), 'zfit.ztf.to_real', 'ztf.to_real', (['n_to_produce'], {}), '(n_to_produce)\n', (883, 897), False, 'from zfit import ztf\n'), ((900, 917), 'zfit.ztf.to_real', 'ztf.to_real', (['area'], {}), '(area)\n', (911, 917), False, 'from zfit import ztf\n'), ((6499, 6534), 'tensorflow.shape', 'tf.shape', (['sample'], {'out_type': 'tf.int64'}), '(sample, out_type=tf.int64)\n', (6507, 6534), True, 'import tensorflow as tf\n'), ((10249, 10284), 'tensorflow.shape', 'tf.shape', (['sample'], {'out_type': 'tf.int64'}), '(sample, out_type=tf.int64)\n', (10257, 10284), True, 'import tensorflow as tf\n'), ((6955, 6980), 'zfit.ztf.to_real', 'ztf.to_real', (['n_to_produce'], {}), '(n_to_produce)\n', (6966, 6980), False, 'from zfit import ztf\n')] |
'''
do not directly run this script, you should execute the unit test
by launching the "run_test.sh"
'''
import libqpsolver
import os
import time
import progressbar
import numpy as np
from random import random
from cvxopt import matrix, solvers
#show detailed unit test message
verbose = False
#unit test run time and test item numbers
test_suite_exec_times = 1000
test_suite_items = 12
#global variables
sol_diff_cnt = 0
curr_test_num = 0
progress_cnt_max = test_suite_exec_times * test_suite_items
progress = \
progressbar.ProgressBar(maxval=progress_cnt_max, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def quadprog_cvxopt(P, q, A=None, b=None, A_eq=None, b_eq=None, options=None):
"""
qp solver provided by cvxopt package:
Minimize (1/2)(x.' * P * x) + (q.' * x)
Subject to A * x < b
and A_eq * x = b_eq
"""
#verbose option
options = solvers.options['show_progress'] = verbose
#objective function
P, q = matrix(P), matrix(q)
#inequality constraint
if (A is not None) and (b is not None):
A, b = matrix(A), matrix(b)
else:
A, b = None, None
#equality constraint
if (A_eq is not None) and (b_eq is not None):
A_eq, b_eq = matrix(A_eq), matrix(b_eq)
else:
A_eq, b_eq = None, None
#solve qp
sol = solvers.qp(P, q, A, b, A_eq, b_eq, options);
return np.array(sol['x'])
def matrix_compare(mat1, mat2):
if len(mat1) != len(mat2):
print('dimension of matrices are not equal for comparasion')
return False;
for i in range(len(mat1)):
error_percentage = abs(mat1[i] - mat2[i]) / abs(mat1[i])
#tolerate 5% of error
if error_percentage > 0.05:
return False
return True
def generate_symmetric_positive_definite_matrix(row, column, max_val):
#vec = np.random.rand(row, column)
vec = np.ones((row, column))
for x in np.nditer(vec, op_flags=['readwrite']):
ran = 0
while True:
ran = max_val * random()
if ran > 0.1 * max_val:
break
x[...] = ran
vec_transposed = np.transpose(vec)
spd_matrix = np.matmul(vec, vec_transposed)
spd_matrix = np.multiply(max_val, spd_matrix)
return spd_matrix
def generate_random_row_vector(row, max_val):
vec = np.random.rand(row, 1)
vec = np.multiply(max_val, vec)
return vec
def test_random_2x2_qp_problem(cost_func_max_val):
P = generate_symmetric_positive_definite_matrix(2, 2, cost_func_max_val);
q = generate_random_row_vector(2, cost_func_max_val)
#randomlly turn on / off the inequality constraints
A = None
b = None
if np.random.rand(1, 1) < 0.5:
A = np.array([[+1.0, +1.0],
[-1.0, +2.0],
[+2.0, +1.0]])
b = np.array([[2.0],
[2.0],
[3.0]])
#randomlly turn on / off the equality constraints
A_eq = None;
b_eq = None;
if np.random.rand(1, 1) < 0.5:
A_eq = np.array([[1.0, 1.0]])
b_eq = np.array([[0.0]])
if verbose == True:
print('[Test input matrices]')
print('P = \n%s' %(P))
print('q = \n%s' %(q))
print('A = \n%s' %(A))
print('b = \n%s' %(b))
print('A_eq = \n%s' %(A_eq))
print('b_eq = \n%s\n' %(b_eq))
if verbose == True:
print('[debug message from CVXOPT]')
cvxopt_sol = quadprog_cvxopt(P, q, A, b, A_eq, b_eq, None)
if verbose == True:
print('\n[debug message from libqpsolver]')
libqpsolver_sol = libqpsolver.quadprog(P, q, A, b, A_eq, b_eq, None, None)
if verbose == True:
print('\n[Optimal solution by CVXOPT]')
print(cvxopt_sol)
print('\n[Optimal solution by libqpsolver]')
print(libqpsolver_sol)
cost_libqpsolver = qp_cost_eval(libqpsolver_sol, P, q)
cost_cvxopt = qp_cost_eval(cvxopt_sol, P, q)
#test_result = matrix_compare(cvxopt_sol, libqpsolver_sol)
test_result = qp_cost_compare(cost_cvxopt, cost_libqpsolver)
global sol_diff_cnt
global curr_test_num
curr_test_num = curr_test_num + 1
if test_result == True:
if verbose == True:
print(f"{bcolors.OKGREEN}\n[unit test of #%d is passed]{bcolors.ENDC}" %(curr_test_num))
else:
if verbose == True:
print(f"{bcolors.FAIL}\n[unit test of #%d is failed]{bcolors.ENDC}" %(curr_test_num))
sol_diff_cnt = sol_diff_cnt + 1
if verbose == True:
print('===============================================================')
return test_result
def qp_cost_eval(x, P, q):
xt = np.transpose(x)
Px = np.matmul(P, x)
xPx = np.matmul(xt, Px)
xPx = xPx / 2
qt = np.transpose(q)
qx = np.matmul(qt, x)
cost = xPx + qx
return cost
def qp_cost_compare(cost_cvxopt, cost_libqpsolver):
abs_diff = abs(cost_cvxopt - cost_libqpsolver)
error_percentage = abs_diff / abs(cost_cvxopt)
#pass if difference is smaller than 10%
if(error_percentage < 0.1):
return True
else:
return False
def test_random_NxN_qp_problem(N, cost_func_max_val):
P = generate_symmetric_positive_definite_matrix(N, N, cost_func_max_val);
q = generate_random_row_vector(N, cost_func_max_val)
#randomlly turn on / off the inequality constraints
A = None
b = None
if np.random.rand(1, 1) < 0.5:
A = np.identity(N)
b = np.zeros((N, 1))
for i in range(N):
while True:
ran = np.random.rand(1, 1)
if(ran > 0.1 and ran < 0.9):
b[i, 0] = N * ran
break
#randomlly turn on / off the equality constraints
A_eq = None
b_eq = None
if np.random.rand(1, 1) < 0.5:
A_eq = np.ones((1, N));
b_eq = np.array([[0.0]])
if verbose == True:
print('[Test input matrices]')
print('P = \n%s' %(P))
print('q = \n%s' %(q))
print('A = \n%s' %(A))
print('b = \n%s' %(b))
print('A_eq = \n%s' %(A_eq))
print('b_eq = \n%s\n' %(b_eq))
if verbose == True:
print('[debug message from CVXOPT]')
cvxopt_sol = quadprog_cvxopt(P, q, A, b, A_eq, b_eq, None)
if verbose == True:
print('\n[debug message from libqpsolver]')
libqpsolver_sol = libqpsolver.quadprog(P, q, A, b, A_eq, b_eq, None, None)
if verbose == True:
print('\n[Optimal solution by CVXOPT]')
print(cvxopt_sol)
print('\n[Optimal solution by libqpsolver]')
print(libqpsolver_sol)
cost_libqpsolver = qp_cost_eval(libqpsolver_sol, P, q)
cost_cvxopt = qp_cost_eval(cvxopt_sol, P, q)
#test_result = matrix_compare(cvxopt_sol, libqpsolver_sol)
test_result = qp_cost_compare(cost_cvxopt, cost_libqpsolver)
if test_result == False:
print("libqpsolver cost = %f" %(cost_libqpsolver))
print("cvxopt cost = %f" %(cost_cvxopt))
if verbose == True:
print("libqpsolver cost = %f" %(cost_libqpsolver))
print("cvxopt cost = %f" %(cost_cvxopt))
global sol_diff_cnt
global curr_test_num
curr_test_num = curr_test_num + 1
if test_result == True:
if verbose == True:
print(f"{bcolors.OKGREEN}\n[unit test of #%d is passed]{bcolors.ENDC}" %(curr_test_num))
else:
if verbose == True:
print(f"{bcolors.FAIL}\n[unit test of #%d is failed]{bcolors.ENDC}" %(curr_test_num))
sol_diff_cnt = sol_diff_cnt + 1
if verbose == True:
print('===============================================================')
return test_result
def test_libqpsolver():
print(f"{bcolors.BOLD}start the unit test of quadratic programming solver{bcolors.ENDC}")
print('test items: %d' %(test_suite_exec_times * test_suite_items))
#progress bar
progress.start()
progress.update(0)
time_start = time.time()
time_last = time_start
for i in range(0, test_suite_exec_times):
time_now = time.time()
#update the progress bar every 10 seconds
if (time_now - time_last) > 10:
time_last = time_now
progress.update(i * test_suite_items)
print('\nelapsed time: %d seconds' %(time_now - time_start))
#test 3x3 problems with simple constraints
test_random_NxN_qp_problem(3, 100)
test_random_NxN_qp_problem(3, 500)
test_random_NxN_qp_problem(3, 1000)
test_random_NxN_qp_problem(3, 10000)
#test 15x15 problems with simple constraints
test_random_NxN_qp_problem(15, 100)
test_random_NxN_qp_problem(15, 500)
test_random_NxN_qp_problem(15, 1000)
test_random_NxN_qp_problem(15, 10000)
#test 50x50 problems with simple constraints
test_random_NxN_qp_problem(50, 100)
test_random_NxN_qp_problem(50, 500)
test_random_NxN_qp_problem(50, 1000)
test_random_NxN_qp_problem(50, 10000)
progress.finish()
time_now = time.time()
print('elapsed time: %d seconds' %(time_now - time_start))
total_test_times = test_suite_items * test_suite_exec_times
correctness = (1.0 - (sol_diff_cnt / total_test_times)) * 100.0
print(f"{bcolors.BOLD}unit test total run times = %d{bcolors.ENDC}" %(total_test_times))
print(f"-> %d of %d failed" %(sol_diff_cnt, total_test_times))
#if error count exceed 0.1% of total test count, than the solver is not stable
if (sol_diff_cnt / total_test_times) > 0.001:
print(f"{bcolors.FAIL}[failed] correctness = %f%%{bcolors.ENDC}" %(correctness))
print(f"{bcolors.FAIL}the solver is unstable due to the correctness is lower than 99%{bcolors.ENDC}")
exit(1)
else:
print(f"{bcolors.OKGREEN}[passed] correctness = %f%%{bcolors.ENDC}" %(correctness))
exit(0)
def main():
test_libqpsolver()
if __name__ == "__main__": main()
| [
"numpy.identity",
"progressbar.Bar",
"numpy.multiply",
"numpy.ones",
"numpy.random.rand",
"numpy.nditer",
"numpy.array",
"numpy.zeros",
"numpy.matmul",
"progressbar.Percentage",
"cvxopt.matrix",
"libqpsolver.quadprog",
"cvxopt.solvers.qp",
"random.random",
"numpy.transpose",
"time.time... | [((1597, 1640), 'cvxopt.solvers.qp', 'solvers.qp', (['P', 'q', 'A', 'b', 'A_eq', 'b_eq', 'options'], {}), '(P, q, A, b, A_eq, b_eq, options)\n', (1607, 1640), False, 'from cvxopt import matrix, solvers\n'), ((1653, 1671), 'numpy.array', 'np.array', (["sol['x']"], {}), "(sol['x'])\n", (1661, 1671), True, 'import numpy as np\n'), ((2155, 2177), 'numpy.ones', 'np.ones', (['(row, column)'], {}), '((row, column))\n', (2162, 2177), True, 'import numpy as np\n'), ((2191, 2229), 'numpy.nditer', 'np.nditer', (['vec'], {'op_flags': "['readwrite']"}), "(vec, op_flags=['readwrite'])\n", (2200, 2229), True, 'import numpy as np\n'), ((2406, 2423), 'numpy.transpose', 'np.transpose', (['vec'], {}), '(vec)\n', (2418, 2423), True, 'import numpy as np\n'), ((2442, 2472), 'numpy.matmul', 'np.matmul', (['vec', 'vec_transposed'], {}), '(vec, vec_transposed)\n', (2451, 2472), True, 'import numpy as np\n'), ((2490, 2522), 'numpy.multiply', 'np.multiply', (['max_val', 'spd_matrix'], {}), '(max_val, spd_matrix)\n', (2501, 2522), True, 'import numpy as np\n'), ((2603, 2625), 'numpy.random.rand', 'np.random.rand', (['row', '(1)'], {}), '(row, 1)\n', (2617, 2625), True, 'import numpy as np\n'), ((2636, 2661), 'numpy.multiply', 'np.multiply', (['max_val', 'vec'], {}), '(max_val, vec)\n', (2647, 2661), True, 'import numpy as np\n'), ((3873, 3929), 'libqpsolver.quadprog', 'libqpsolver.quadprog', (['P', 'q', 'A', 'b', 'A_eq', 'b_eq', 'None', 'None'], {}), '(P, q, A, b, A_eq, b_eq, None, None)\n', (3893, 3929), False, 'import libqpsolver\n'), ((4941, 4956), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (4953, 4956), True, 'import numpy as np\n'), ((4966, 4981), 'numpy.matmul', 'np.matmul', (['P', 'x'], {}), '(P, x)\n', (4975, 4981), True, 'import numpy as np\n'), ((4992, 5009), 'numpy.matmul', 'np.matmul', (['xt', 'Px'], {}), '(xt, Px)\n', (5001, 5009), True, 'import numpy as np\n'), ((5038, 5053), 'numpy.transpose', 'np.transpose', (['q'], {}), '(q)\n', (5050, 5053), True, 'import numpy as np\n'), ((5063, 5079), 'numpy.matmul', 'np.matmul', (['qt', 'x'], {}), '(qt, x)\n', (5072, 5079), True, 'import numpy as np\n'), ((6655, 6711), 'libqpsolver.quadprog', 'libqpsolver.quadprog', (['P', 'q', 'A', 'b', 'A_eq', 'b_eq', 'None', 'None'], {}), '(P, q, A, b, A_eq, b_eq, None, None)\n', (6675, 6711), False, 'import libqpsolver\n'), ((8230, 8241), 'time.time', 'time.time', ([], {}), '()\n', (8239, 8241), False, 'import time\n'), ((9325, 9336), 'time.time', 'time.time', ([], {}), '()\n', (9334, 9336), False, 'import time\n'), ((1241, 1250), 'cvxopt.matrix', 'matrix', (['P'], {}), '(P)\n', (1247, 1250), False, 'from cvxopt import matrix, solvers\n'), ((1252, 1261), 'cvxopt.matrix', 'matrix', (['q'], {}), '(q)\n', (1258, 1261), False, 'from cvxopt import matrix, solvers\n'), ((2955, 2975), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (2969, 2975), True, 'import numpy as np\n'), ((2995, 3047), 'numpy.array', 'np.array', (['[[+1.0, +1.0], [-1.0, +2.0], [+2.0, +1.0]]'], {}), '([[+1.0, +1.0], [-1.0, +2.0], [+2.0, +1.0]])\n', (3003, 3047), True, 'import numpy as np\n'), ((3104, 3135), 'numpy.array', 'np.array', (['[[2.0], [2.0], [3.0]]'], {}), '([[2.0], [2.0], [3.0]])\n', (3112, 3135), True, 'import numpy as np\n'), ((3276, 3296), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (3290, 3296), True, 'import numpy as np\n'), ((3319, 3341), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (3327, 3341), True, 'import numpy as np\n'), ((3357, 3374), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (3365, 3374), True, 'import numpy as np\n'), ((5681, 5701), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (5695, 5701), True, 'import numpy as np\n'), ((5721, 5735), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (5732, 5735), True, 'import numpy as np\n'), ((5748, 5764), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (5756, 5764), True, 'import numpy as np\n'), ((6064, 6084), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (6078, 6084), True, 'import numpy as np\n'), ((6107, 6122), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (6114, 6122), True, 'import numpy as np\n'), ((6139, 6156), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (6147, 6156), True, 'import numpy as np\n'), ((8335, 8346), 'time.time', 'time.time', ([], {}), '()\n', (8344, 8346), False, 'import time\n'), ((591, 621), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (606, 621), False, 'import progressbar\n'), ((628, 652), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (650, 652), False, 'import progressbar\n'), ((1349, 1358), 'cvxopt.matrix', 'matrix', (['A'], {}), '(A)\n', (1355, 1358), False, 'from cvxopt import matrix, solvers\n'), ((1360, 1369), 'cvxopt.matrix', 'matrix', (['b'], {}), '(b)\n', (1366, 1369), False, 'from cvxopt import matrix, solvers\n'), ((1503, 1515), 'cvxopt.matrix', 'matrix', (['A_eq'], {}), '(A_eq)\n', (1509, 1515), False, 'from cvxopt import matrix, solvers\n'), ((1517, 1529), 'cvxopt.matrix', 'matrix', (['b_eq'], {}), '(b_eq)\n', (1523, 1529), False, 'from cvxopt import matrix, solvers\n'), ((2295, 2303), 'random.random', 'random', ([], {}), '()\n', (2301, 2303), False, 'from random import random\n'), ((5839, 5859), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (5853, 5859), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# Copyright 2021 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
import os
import sys
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
import numpy as np
import scipy.constants as scc
## This script performs various checks for the proton boron nuclear fusion module. The simulation
## that we check is made of 5 different tests, each with different proton, boron and alpha species.
##
## The first test is performed in the proton-boron center of mass frame. It could correspond to the
## physical case of a proton beam colliding with a boron beam. The kinetic energy of the colliding
## particles depends on the cell number in the z direction and varies in the few keV to few MeV
## range. All the particles within a cell have the exact same momentum, which allows detailed
## checks of the energy of produced alpha particles. The proton and boron species have the same
## density and number of particles in this test. The number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The second test is performed in the boron rest frame. It corresponds to the physical case of a
## low density proton beam colliding with a high-density proton+boron target. The energy of the
## proton beam is varied in the few keV to few MeV range, depending on the cell number in the z
## direction. As in the previous case, all the particles within a cell have the exact same
## momentum, which allows detailed checks of the energy of produced alpha particles. In this test,
## there are 100 immobile boron and 100 immobile proton macroparticles per cell, as well as 900
## beam proton macroparticles per cell. The density of the immobile particles is 6 orders of
## magnitude higher than the number of beam particles, which means that they have a much higher
## weight. This test is similar to the example given in section 3 of Higginson et al.,
## Journal of Computation Physics, 388 439–453 (2019), which was found to be sensitive to the way
## unsampled pairs are accounted for. As before, the number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The third test corresponds to a Maxwellian plasma with a 44 keV temperature. The alpha yield is
## directly compared to the analytical fits of <NAME> and <NAME>, Nuclear Fusion, 40, 865
## (2000) for a thermal plasma.
##
## The fourth test corresponds to a plasma with an extremely small boron density, so that all boron
## macroparticles should have disappeared by the end of the simulation, which we verify.
##
## The fifth test is exactly the same as the fourth test, except that the
## fusion_probability_threshold parameter is increased to an excessive value. Because of that, we
## severely underestimate the fusion yield and boron macroparticles remain at the end of the
## simulation, which we verify.
##
## In all simulations, we check particle number, charge, momentum and energy conservation and
## perform basic checks regarding the produced particles. When possible, we also compare the number
## of produced macroparticles, fusion yield and energy of the produced particles to theoretical
## values.
##
## Please be aware that the relative tolerances are often set empirically in this analysis script,
## so it would not be surprising that some tolerances need to be increased in the future.
default_tol = 1.e-12 # Default relative tolerance
## Some physical parameters
keV_to_Joule = scc.e*1e3
MeV_to_Joule = scc.e*1e6
barn_to_square_meter = 1.e-28
m_p = scc.m_p # Proton mass
m_b = 10.9298*m_p # Boron 11 mass
m_reduced = m_p*m_b/(m_p+m_b)
m_a = 3.97369*m_p # Alpha mass
m_be = 7.94748*m_p # Beryllium 8 mass
Z_boron = 5.
Z_proton = 1.
E_Gamow = (Z_boron*Z_proton*np.pi*scc.fine_structure)**2*2.*m_reduced*scc.c**2
E_Gamow_MeV = E_Gamow/MeV_to_Joule
E_Gamow_keV = E_Gamow/keV_to_Joule
E_fusion = 8.59009*MeV_to_Joule # Energy released during p + B -> alpha + Be
E_decay = 0.0918984*MeV_to_Joule # Energy released during Be -> 2*alpha
E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha
## Some numerical parameters for this test
size_x = 8
size_y = 8
size_z = 16
dV_total = size_x*size_y*size_z # Total simulation volume
# Volume of a slice corresponding to a single cell in the z direction. In tests 1 and 2, all the
# particles of a given species in the same slice have the exact same momentum
dV_slice = size_x*size_y
dt = 1./(scc.c*np.sqrt(3.))
# In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2
Energy_step = 22.*keV_to_Joule
def is_close(val1, val2, rtol=default_tol, atol=0.):
## Wrapper around numpy.isclose, used to override the default tolerances.
return np.isclose(val1, val2, rtol=rtol, atol=atol)
def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = yt_ad[species_name, "particle_momentum_x"].v
data_dict[prefix+"_py_"+suffix] = yt_ad[species_name, "particle_momentum_y"].v
data_dict[prefix+"_pz_"+suffix] = yt_ad[species_name, "particle_momentum_z"].v
data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v
data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v
data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v
data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, "particle_position_z"].v
def add_empty_species_to_dict(data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = np.empty(0)
data_dict[prefix+"_py_"+suffix] = np.empty(0)
data_dict[prefix+"_pz_"+suffix] = np.empty(0)
data_dict[prefix+"_w_"+suffix] = np.empty(0)
data_dict[prefix+"_id_"+suffix] = np.empty(0)
data_dict[prefix+"_cpu_"+suffix] = np.empty(0)
data_dict[prefix+"_z_"+suffix] = np.empty(0)
def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
try:
## If species exist, we add its data to the dictionary
add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix)
except yt.utilities.exceptions.YTFieldNotFound:
## If species does not exist, we avoid python crash and add empty arrays to the
## dictionnary. Currently, this happens for the boron species in test number 4, which
## entirely fuses into alphas.
add_empty_species_to_dict(data_dict, species_name, prefix, suffix)
def check_particle_number_conservation(data):
total_w_proton_start = np.sum(data["proton_w_start"])
total_w_proton_end = np.sum(data["proton_w_end"])
total_w_boron_start = np.sum(data["boron_w_start"])
total_w_boron_end = np.sum(data["boron_w_end"])
consumed_proton = total_w_proton_start - total_w_proton_end
consumed_boron = total_w_boron_start - total_w_boron_end
created_alpha = np.sum(data["alpha_w_end"])
assert(consumed_proton >= 0.)
assert(consumed_boron >= 0.)
assert(created_alpha >= 0.)
## Check that number of consumed proton and consumed boron are equal
assert_scale = max(total_w_proton_start, total_w_boron_start)
assert(is_close(consumed_proton, consumed_boron, rtol = 0., atol = default_tol*assert_scale))
## Check that number of consumed particles corresponds to number of produced alpha
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
assert(is_close(total_w_proton_start, total_w_proton_end + created_alpha/3.))
assert(is_close(total_w_boron_start, total_w_boron_end + created_alpha/3.))
def compute_energy_array(data, species_name, suffix, m):
## Relativistic computation of kinetic energy for a given species
psq_array = data[species_name+'_px_'+suffix]**2 + data[species_name+'_py_'+suffix]**2 + \
data[species_name+'_pz_'+suffix]**2
rest_energy = m*scc.c**2
return np.sqrt(psq_array*scc.c**2 + rest_energy**2) - rest_energy
def check_energy_conservation(data):
proton_energy_start = compute_energy_array(data, "proton", "start", m_p)
proton_energy_end = compute_energy_array(data, "proton", "end", m_p)
boron_energy_start = compute_energy_array(data, "boron", "start", m_b)
boron_energy_end = compute_energy_array(data, "boron", "end", m_b)
alpha_energy_end = compute_energy_array(data, "alpha", "end", m_a)
total_energy_start = np.sum(proton_energy_start*data["proton_w_start"]) + \
np.sum(boron_energy_start*data["boron_w_start"])
total_energy_end = np.sum(proton_energy_end*data["proton_w_end"]) + \
np.sum(boron_energy_end*data["boron_w_end"]) + \
np.sum(alpha_energy_end*data["alpha_w_end"])
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
n_fusion_reaction = np.sum(data["alpha_w_end"])/3.
assert(is_close(total_energy_end,
total_energy_start + n_fusion_reaction*E_fusion_total,
rtol = 1.e-8))
def check_momentum_conservation(data):
proton_total_px_start = np.sum(data["proton_px_start"]*data["proton_w_start"])
proton_total_py_start = np.sum(data["proton_py_start"]*data["proton_w_start"])
proton_total_pz_start = np.sum(data["proton_pz_start"]*data["proton_w_start"])
proton_total_px_end = np.sum(data["proton_px_end"]*data["proton_w_end"])
proton_total_py_end = np.sum(data["proton_py_end"]*data["proton_w_end"])
proton_total_pz_end = np.sum(data["proton_pz_end"]*data["proton_w_end"])
boron_total_px_start = np.sum(data["boron_px_start"]*data["boron_w_start"])
boron_total_py_start = np.sum(data["boron_py_start"]*data["boron_w_start"])
boron_total_pz_start = np.sum(data["boron_pz_start"]*data["boron_w_start"])
boron_total_px_end = np.sum(data["boron_px_end"]*data["boron_w_end"])
boron_total_py_end = np.sum(data["boron_py_end"]*data["boron_w_end"])
boron_total_pz_end = np.sum(data["boron_pz_end"]*data["boron_w_end"])
alpha_total_px_end = np.sum(data["alpha_px_end"]*data["alpha_w_end"])
alpha_total_py_end = np.sum(data["alpha_py_end"]*data["alpha_w_end"])
alpha_total_pz_end = np.sum(data["alpha_pz_end"]*data["alpha_w_end"])
total_px_start = proton_total_px_start + boron_total_px_start
total_py_start = proton_total_py_start + boron_total_py_start
total_pz_start = proton_total_pz_start + boron_total_pz_start
total_px_end = proton_total_px_end + boron_total_px_end + alpha_total_px_end
total_py_end = proton_total_py_end + boron_total_py_end + alpha_total_py_end
total_pz_end = proton_total_pz_end + boron_total_pz_end + alpha_total_pz_end
## Absolute tolerance is needed because sometimes the initial momentum is exactly 0
assert(is_close(total_px_start, total_px_end, atol=1.e-15))
assert(is_close(total_py_start, total_py_end, atol=1.e-15))
assert(is_close(total_pz_start, total_pz_end, atol=1.e-15))
def check_id(data):
## Check that all created particles have unique id + cpu identifier (two particles with
## different cpu can have the same id)
complex_id = data["alpha_id_end"] + 1j*data["alpha_cpu_end"]
assert(complex_id.shape == np.unique(complex_id).shape)
def basic_product_particles_check(data):
## For each nuclear fusion reaction in the code, we create 6 alpha macroparticles. So the
## total number of alpha macroparticles must be a multiple of 6.
num_alpha = data["alpha_w_end"].shape[0]
assert(num_alpha%6 == 0)
## The weight of the 6 macroparticles coming from a single fusion event should be the same.
## We verify this here.
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][1::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][2::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][3::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][4::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][5::6]))
## When we create 6 macroparticles, the first has the exact same momentum as the second, the
## third has the same as the fourth and the fifth has the same as the sixth. We verify this
## here
assert(np.array_equal(data["alpha_px_end"][::6], data["alpha_px_end"][1::6]))
assert(np.array_equal(data["alpha_py_end"][::6], data["alpha_py_end"][1::6]))
assert(np.array_equal(data["alpha_pz_end"][::6], data["alpha_pz_end"][1::6]))
assert(np.array_equal(data["alpha_px_end"][2::6], data["alpha_px_end"][3::6]))
assert(np.array_equal(data["alpha_py_end"][2::6], data["alpha_py_end"][3::6]))
assert(np.array_equal(data["alpha_pz_end"][2::6], data["alpha_pz_end"][3::6]))
assert(np.array_equal(data["alpha_px_end"][4::6], data["alpha_px_end"][5::6]))
assert(np.array_equal(data["alpha_py_end"][4::6], data["alpha_py_end"][5::6]))
assert(np.array_equal(data["alpha_pz_end"][4::6], data["alpha_pz_end"][5::6]))
def generic_check(data):
check_particle_number_conservation(data)
check_energy_conservation(data)
check_momentum_conservation(data)
check_id(data)
basic_product_particles_check(data)
def check_isotropy(data, relative_tolerance):
## Checks that the alpha particles are emitted isotropically
average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"])
average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"])
average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"])
assert(is_close(average_px_sq, average_py_sq, rtol = relative_tolerance))
assert(is_close(average_px_sq, average_pz_sq, rtol = relative_tolerance))
def astrophysical_factor_lowE(E):
## E is in keV
## Returns astrophysical factor in MeV b using the low energy fit in the range E < 400 keV
## described in equation (2) of <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000)
C0 = 197.
C1 = 0.24
C2 = 2.31e-4
AL = 1.82e4
EL = 148.
dEL = 2.35
return C0 + C1*E + C2*E**2 + AL/((E-EL)**2 + dEL**2)
def astrophysical_factor_midE(E):
## E is in keV
## Returns astrophysical factor in MeV b using the mid energy fit in the range
## 400 keV < E < 642 keV described in equation (3) of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000)
D0 = 330.
D1 = 66.1
D2 = -20.3
D5 = -1.58
E_400 = 400.
E_100 = 100.
E_norm = (E - E_400)/E_100
return D0 + D1*E_norm + D2*E_norm**2 + D5*E_norm**5
def astrophysical_factor_highE(E):
## E is in keV
## Returns astrophysical factor in MeV b using the high energy fit in the range
## 642 keV < E < 3500 keV described in equation (4) of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000)
A0 = 2.57e6
A1 = 5.67e5
A2 = 1.34e5
A3 = 5.68e5
E0 = 581.3
E1 = 1083.
E2 = 2405.
E3 = 3344.
dE0 = 85.7
dE1 = 234.
dE2 = 138.
dE3 = 309.
B = 4.38
return A0/((E-E0)**2 + dE0**2) + A1/((E-E1)**2 + dE1**2) + \
A2/((E-E2)**2 + dE2**2) + A3/((E-E3)**2 + dE3**2) + B
def astrophysical_factor(E):
## E is in keV
## Returns astrophysical factor in MeV b using the fits described in <NAME>
## and <NAME>, Nuclear Fusion, 40, 865 (2000)
conditions = [E <= 400, E <= 642, E > 642]
choices = [astrophysical_factor_lowE(E),
astrophysical_factor_midE(E),
astrophysical_factor_highE(E)]
return np.select(conditions, choices)
def pb_cross_section_buck_fit(E):
## E is in MeV
## Returns cross section in b using a power law fit of the data presented in Buck et al.,
## Nuclear Physics A, 398(2), 189-202 (1983) in the range E > 3.5 MeV.
E_start_fit = 3.5
## Cross section at E = E_start_fit = 3.5 MeV
cross_section_start_fit = 0.2168440845211521
slope_fit = -2.661840717596765
return cross_section_start_fit*(E/E_start_fit)**slope_fit
def pb_cross_section(E):
## E is in keV
## Returns cross section in b using the fits described in <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000) for E < 3.5 MeV and a power law fit of the data presented in
## Buck et al., Nuclear Physics A, 398(2), 189-202 (1983) for E > 3.5 MeV.
E_MeV = E/1.e3
conditions = [E <= 3500, E > 3500]
choices = [astrophysical_factor(E)/E_MeV * np.exp(-np.sqrt(E_Gamow_MeV / E_MeV)),
pb_cross_section_buck_fit(E_MeV)]
return np.select(conditions, choices)
def E_com_to_p_sq_com(m1, m2, E):
## E is the total (kinetic+mass) energy of a two particle (with mass m1 and m2) system in
## its center of mass frame, in J.
## Returns the square norm of the momentum of each particle in that frame.
return E**2/(4.*scc.c**2) - (m1**2 + m2**2)*scc.c**2/2. + \
scc.c**6/(4.*E**2)*((m1**2 - m2**2)**2)
def compute_relative_v_com(E):
## E is the kinetic energy of proton+boron in the center of mass frame, in keV
## Returns the relative velocity between proton and boron in this frame, in m/s
E_J = E*keV_to_Joule + (m_p + m_b)*scc.c**2
p_sq = E_com_to_p_sq_com(m_p, m_b, E_J)
p = np.sqrt(p_sq)
gamma_p = np.sqrt(1. + p_sq / (m_p*scc.c)**2)
gamma_b = np.sqrt(1. + p_sq / (m_b*scc.c)**2)
v_p = p/(gamma_p*m_p)
v_b = p/(gamma_b*m_b)
return v_p+v_b
def expected_alpha_weight_com(E_com, proton_density, boron_density, dV, dt):
## Computes expected number of produced alpha particles as a function of energy E_com in the
## center of mass frame. E_com is in keV.
assert(np.all(E_com>=0))
## Case E_com == 0 is handled manually to avoid division by zero
conditions = [E_com == 0, E_com > 0]
## Necessary to avoid division by 0 warning when pb_cross_section is evaluated
E_com_never_zero = np.clip(E_com, 1.e-15, None)
choices = [0., pb_cross_section(E_com_never_zero)*compute_relative_v_com(E_com_never_zero)]
sigma_times_vrel = np.select(conditions, choices)
## Factor 3 is here because each fusion reaction produces 3 alphas
return 3.*proton_density*boron_density*sigma_times_vrel*barn_to_square_meter*dV*dt
def check_macroparticle_number(data, fusion_probability_target_value, num_pair_per_cell):
## Checks that the number of macroparticles is as expected for the first and second tests
## The first slice 0 < z < 1 does not contribute to alpha creation
numcells = dV_total - dV_slice
## In these tests, the fusion_multiplier is so high that the fusion probability per pair is
## equal to the parameter fusion_probability_target_value
fusion_probability_per_pair = fusion_probability_target_value
expected_fusion_number = numcells*num_pair_per_cell*fusion_probability_per_pair
## Each fusion event produces 6 alpha macroparticles
expected_macroparticle_number = 6.*expected_fusion_number
std_macroparticle_number = 6.*np.sqrt(expected_fusion_number)
actual_macroparticle_number = data["alpha_w_end"].shape[0]
# 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions
assert(is_close(actual_macroparticle_number, expected_macroparticle_number, rtol = 0.,
atol = 5.*std_macroparticle_number))
## used in subsequent function
return expected_fusion_number
def p_sq_boron_frame_to_E_COM_frame(p_proton_sq):
# Takes the proton square norm of the momentum in the boron rest frame and returns the total
# kinetic energy in the center of mass frame. Everything is in SI units.
# Total (kinetic + mass) energy in lab frame
E_lab = np.sqrt(p_proton_sq*scc.c**2 + (m_p*scc.c**2)**2) + m_b*scc.c**2
# Use invariant E**2 - p**2c**2 of 4-momentum norm to compute energy in center of mass frame
E_com = np.sqrt(E_lab**2 - p_proton_sq*scc.c**2)
# Corresponding kinetic energy
E_com_kin = E_com - (m_b+scc.m_p)*scc.c**2
return E_com_kin
def p_sq_to_kinetic_energy(p_sq, m):
## Returns the kinetic energy of a particle as a function of its squared momentum.
## Everything is in SI units.
return np.sqrt(p_sq*scc.c**2 + (m*scc.c**2)**2) - (m*scc.c**2)
def compute_E_com1(data):
## Computes kinetic energy (in Joule) in the center of frame for the first test
## Square norm of the momentum of proton/boron as a function of cell number in z direction
p_sq = 2.*m_reduced*(Energy_step*np.arange(size_z)**2)
return p_sq_to_kinetic_energy(p_sq, m_b) + p_sq_to_kinetic_energy(p_sq, m_p)
def compute_E_com2(data):
## Computes kinetic energy (in Joule) in the center of frame for the second test
## Square norm of the momentum of the proton as a function of cell number in z direction
p_proton_sq = 2.*m_p*(Energy_step*np.arange(size_z)**2)
return p_sq_boron_frame_to_E_COM_frame(p_proton_sq)
def check_alpha_yield(data, expected_fusion_number, E_com, proton_density, boron_density):
## Checks that the fusion yield is as expected for the first and second tests.
## Proton and boron densities are in m^-3.
alpha_weight_theory = expected_alpha_weight_com(E_com/keV_to_Joule, proton_density,
boron_density, dV_slice, dt)
alpha_weight_simulation = np.histogram(data["alpha_z_end"], bins=size_z, range=(0, size_z),
weights = data["alpha_w_end"])[0]
## -1 is here because the first slice 0 < z < 1 does not contribute to alpha creation
expected_fusion_number_per_slice = expected_fusion_number/(size_z-1)
relative_std_alpha_weight = 1./np.sqrt(expected_fusion_number_per_slice)
# 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions
assert(np.all(is_close(alpha_weight_theory, alpha_weight_simulation,
rtol = 5.*relative_std_alpha_weight)))
def check_initial_energy1(data, E_com):
## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process
## takes place in two steps:
## (1): proton + boron 11 -> alpha + beryllium 8
## (2): beryllium 8 -> alpha + alpha
## The alpha generated in the first step (labeled alpha1) generally has a different initial
## energy distribution than the alphas generated in the second step (labeled alpha2 and
## alpha3).
## In the first test, we are in the center of mass frame. Therefore, the momentum of alpha1 is
## entirely determined by the energy in the center of mass frame, so we check in this function
## that the energy of the alpha1 macroparticles is as expected. On the other hand, the energy
## of alpha2 and alpha3 follows a continuous distribution within a given range. In this test,
## we check that this range is as expected by comparing the maximum and minimum energy of the
## obtained macroparticles to the theoretical maximum and minimum.
## Note that in the simulations, 6 macroparticles are generated during for each fusion event.
## The first and second macroparticles are alpha1 particles. The third and fourth are alpha2.
## The fifth and sixth are alpha3.
energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a)
z_alpha = data["alpha_z_end"]
# Loop over all slices (i.e. cells in the z direction)
for slice_number in range(1, size_z):
## Kinetic energy in the lab frame before fusion
E_kinetic_com_before = E_com[slice_number]
## Total (kinetic + mass) energy in the lab frame after
## proton + boron 11 -> alpha + beryllium 8
E_total_com_after = E_kinetic_com_before + E_fusion + (m_a + m_be)*scc.c**2
## Corresponding momentum norm squared of alpha1/beryllium
p_sq_after = E_com_to_p_sq_com(m_a, m_be, E_total_com_after)
## Corresponding kinetic energy for alpha1
energy_alpha1_theory = p_sq_to_kinetic_energy(p_sq_after, m_a)
## Corresponding kinetic energy for beryllium
energy_beryllium_theory = p_sq_to_kinetic_energy(p_sq_after, m_be)
## Corresponding kinetic energy for alpha2 + alpha3 after beryllium decay
energy_alpha2_plus_3_theory = energy_beryllium_theory + E_decay
## Compute the theoretical maximum and minimum energy of alpha2 and alpha3. This
## calculation is done nonrelativistically, by noting that the maximum (minimum) energy
## corresponds to an alpha emitted exactly in the (opposite) direction of the beryllium
## in the center of mass frame. This calculation involves solving a polynomial equation of
## order 2 in p_alpha23.
max_p_alpha23 = 0.5*(np.sqrt(p_sq_after) + \
np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after))
min_p_alpha23 = 0.5*(np.sqrt(p_sq_after) - \
np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after))
max_energy_alpha23 = max_p_alpha23**2/(2.*m_a)
min_energy_alpha23 = min_p_alpha23**2/(2.*m_a)
## Get the energy of all alphas in the slice
energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \
(z_alpha < (slice_number + 1))]
## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice
energy_alpha1_simulation = energy_alpha_slice[::6]
## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice
energy_alpha2_simulation = energy_alpha_slice[2::6]
## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice
energy_alpha3_simulation = energy_alpha_slice[4::6]
assert(np.all(is_close(energy_alpha1_simulation, energy_alpha1_theory, rtol=5.e-8)))
assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=1.e-2))
def check_initial_energy2(data):
## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process
## takes place in two steps:
## (1): proton + boron 11 -> alpha + beryllium 8
## (2): beryllium 8 -> alpha + alpha
## The alpha generated in the first step (labeled alpha1) generally has a different initial
## energy distribution than the alphas generated in the second step (labeled alpha2 and
## alpha3).
## In the second test, we are in the boron rest frame. In this case, the momentum of each alpha
## follows a continuous distribution within a given range. In this function, we verify that
## this range is as expected by comparing the maximum and minimum energy of the obtained
## macroparticles to the theoretical maximum and minimum. Be aware that the range for alpha1
## is not the same as the range for alpha2 and alpha3 (typically alpha1 particles will carry
## more energy).
## Note that in the simulations, 6 macroparticles are generated during for each fusion event.
## The first and second macroparticles are alpha1 particles. The third and fourth are alpha2.
## The fifth and sixth are alpha3.
energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a)
z_alpha = data["alpha_z_end"]
# Loop over all slices (i.e. cells in the z direction)
for slice_number in range(1, size_z):
## For simplicity, all the calculations in this functino are done nonrelativistically
## Proton kinetic energy in the lab frame before fusion
E_proton_nonrelativistic = Energy_step*slice_number**2
## Corresponding square norm of proton momentum
p_proton_sq = 2.*scc.m_p*E_proton_nonrelativistic
## Kinetic energy in the lab frame after
## proton + boron 11 -> alpha + beryllium 8
E_after_fusion = E_proton_nonrelativistic + E_fusion
## Compute the theoretical maximum and minimum energy of alpha1 in the lab frame. This
## calculation is done by noting that the maximum (minimum) energy corresponds to an alpha
## emitted exactly in the (opposite) direction of the proton in the lab frame. This
## calculation involves solving a polynomial equation of order 2 in p_alpha1.
max_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) + \
np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \
(m_a/m_be + 1.)
min_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) - \
np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \
(m_a/m_be + 1.)
max_energy_alpha1 = max_p_alpha1**2/(2*m_a)
min_energy_alpha1 = min_p_alpha1**2/(2*m_a)
## Corresponding max/min kinetic energy of Beryllium in the lab frame
max_E_beryllium = E_after_fusion - min_energy_alpha1
min_E_beryllium = E_after_fusion - max_energy_alpha1
## Corresponding max/min momentum square of Beryllium in the lab frame
max_p_sq_beryllium = 2.*m_be*max_E_beryllium
min_p_sq_beryllium = 2.*m_be*min_E_beryllium
## Corresponding max/min kinetic energy in the lab frame for alpha2 + alpha3 after
## Beryllium decay
max_energy_alpha2_plus_3 = max_E_beryllium + E_decay
min_energy_alpha2_plus_3 = min_E_beryllium + E_decay
## Compute the theoretical maximum and minimum energy of alpha2 and alpha3 in the lab
## frame. This calculation is done by noting that the maximum (minimum) energy corresponds
## to an alpha emitted exactly in the (opposite) direction of a beryllium with energy
## max_E_beryllium (min_E_beryllium). This calculation involves solving a polynomial
## equation of order 2 in p_alpha23.
max_p_alpha23 = 0.5*(np.sqrt(max_p_sq_beryllium) + \
np.sqrt(4*m_a*max_energy_alpha2_plus_3 - max_p_sq_beryllium))
min_p_alpha23 = 0.5*(np.sqrt(min_p_sq_beryllium) - \
np.sqrt(4*m_a*min_energy_alpha2_plus_3 - min_p_sq_beryllium))
max_energy_alpha23 = max_p_alpha23**2/(2*m_a)
min_energy_alpha23 = min_p_alpha23**2/(2*m_a)
## Get the energy of all alphas in the slice
energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \
(z_alpha < (slice_number + 1))]
## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice
energy_alpha1_simulation = energy_alpha_slice[::6]
## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice
energy_alpha2_simulation = energy_alpha_slice[2::6]
## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice
energy_alpha3_simulation = energy_alpha_slice[4::6]
assert(is_close(np.amax(energy_alpha1_simulation), max_energy_alpha1, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha1_simulation), min_energy_alpha1, rtol=1.e-2))
## Tolerance is quite high below because we don't have a lot of alphas to produce good
## statistics and an event like alpha1 emitted exactly in direction of proton & alpha2
## emitted exactly in direction opposite to Beryllium is somewhat rare.
assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=2.5e-1))
def check_xy_isotropy(data):
## Checks that the alpha particles are emitted isotropically in x and y
average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"])
average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"])
average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"])
assert(is_close(average_px_sq, average_py_sq, rtol = 5.e-2))
assert(average_pz_sq > average_px_sq)
assert(average_pz_sq > average_py_sq)
def sigmav_thermal_fit_lowE_nonresonant(T):
## Temperature T is in keV
## Returns the nonresonant average of cross section multiplied by relative velocity in m^3/s,
## in the range T <= 70 keV, as described by equation 9 of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000).
E0 = (E_Gamow_keV/4.)**(1./3.) * T**(2./3.)
DE0 = 4.*np.sqrt(T*E0/3.)
C0 = 197.*1.e3
C1 = 0.24*1.e3
C2 = 2.31e-4*1.e3
tau = 3.*E0/T
Seff = C0*(1.+5./(12.*tau)) + C1*(E0+35./36.*T) + C2*(E0**2 + 89./36.*E0*T)
## nonresonant sigma times vrel, in barn meter per second
sigmav_nr_bmps = np.sqrt(2*T*keV_to_Joule/m_reduced) * DE0*Seff/T**2 * np.exp(-tau)
## Return result in cubic meter per second
return sigmav_nr_bmps*barn_to_square_meter
def sigmav_thermal_fit_lowE_resonant(T):
## Temperature T is in keV
## Returns the resonant average of cross section multiplied by relative velocity in m^3/s,
## in the range T <= 70 keV, as described by equation 11 of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000).
return 5.41e-21 * np.exp(-148./T) / T**(3./2.)
def sigmav_thermal_fit_lowE(T):
## Temperature T is in keV
## Returns the average of cross section multiplied by relative velocity in m^3/s, using the
## fits described in section 3.1 of <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000).
## The fits are valid for T <= 70 keV.
return sigmav_thermal_fit_lowE_nonresonant(T) + sigmav_thermal_fit_lowE_resonant(T)
def expected_alpha_thermal(T, proton_density, boron_density, dV, dt):
## Computes the expected number of produced alpha particles when the protons and borons follow
## a Maxwellian distribution with a temperature T, in keV. This uses the thermal fits described
## in <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000).
## The fit used here is only valid in the range T <= 70 keV.
assert((T >=0) and (T<=70))
sigma_times_vrel = sigmav_thermal_fit_lowE(T)
## Factor 3 is here because each fusion event produces 3 alphas.
return 3.*proton_density*boron_density*sigma_times_vrel*dV*dt
def check_thermal_alpha_yield(data):
## Checks that the number of alpha particles in test3 is as expected
Temperature = 44. # keV
proton_density = 1.e28 # m^-3
boron_density = 5.e28 # m^-3
alpha_weight_theory = expected_alpha_thermal(Temperature, proton_density, boron_density,
dV_total, dt)
alpha_weight_simulation = np.sum(data["alpha_w_end"])
assert(is_close(alpha_weight_theory, alpha_weight_simulation, rtol = 2.e-1))
def boron_remains(data):
## Checks whether there remains boron macroparticles at the end of the test
n_boron_left = data["boron_w_end"].shape[0]
return (n_boron_left > 0)
def specific_check1(data):
check_isotropy(data, relative_tolerance = 3.e-2)
expected_fusion_number = check_macroparticle_number(data,
fusion_probability_target_value = 0.002,
num_pair_per_cell = 10000)
E_com = compute_E_com1(data)
check_alpha_yield(data, expected_fusion_number, E_com, proton_density = 1.,
boron_density = 1.)
check_initial_energy1(data, E_com)
def specific_check2(data):
check_xy_isotropy(data)
## Only 900 particles pairs per cell here because we ignore the 10% of protons that are at rest
expected_fusion_number = check_macroparticle_number(data,
fusion_probability_target_value = 0.02,
num_pair_per_cell = 900)
E_com = compute_E_com2(data)
check_alpha_yield(data, expected_fusion_number, E_com, proton_density = 1.e20,
boron_density = 1.e26)
check_initial_energy2(data)
def specific_check3(data):
check_isotropy(data, relative_tolerance = 1.e-1)
check_thermal_alpha_yield(data)
def specific_check4(data):
## In test 4, the boron initial density is so small that all borons should have fused within a
## timestep dt. We thus assert that no boron remains at the end of the simulation.
assert(not boron_remains(data))
def specific_check5(data):
## Test 5 is similar to test 4, expect that the parameter fusion_probability_threshold is
## increased to the point that we should severely underestimate the fusion yield. Consequently,
## there should still be borons at the end of the test, which we verify here.
assert(boron_remains(data))
def check_charge_conservation(rho_start, rho_end):
assert(np.all(is_close(rho_start, rho_end, rtol=2.e-11)))
def main():
filename_end = sys.argv[1]
filename_start = filename_end[:-4] + '0000'
ds_end = yt.load(filename_end)
ds_start = yt.load(filename_start)
ad_end = ds_end.all_data()
ad_start = ds_start.all_data()
field_data_end = ds_end.covering_grid(level=0, left_edge=ds_end.domain_left_edge,
dims=ds_end.domain_dimensions)
field_data_start = ds_start.covering_grid(level=0, left_edge=ds_start.domain_left_edge,
dims=ds_start.domain_dimensions)
ntests = 5
for i in range(1, ntests+1):
proton_species = "proton"+str(i)
boron_species = "boron"+str(i)
alpha_species = "alpha"+str(i)
data = {}
add_species_to_dict(ad_start, data, proton_species, "proton", "start")
add_species_to_dict(ad_start, data, boron_species, "boron", "start")
add_species_to_dict(ad_end, data, proton_species, "proton", "end")
add_species_to_dict(ad_end, data, boron_species, "boron", "end")
add_species_to_dict(ad_end, data, alpha_species, "alpha", "end")
# General checks that are performed for all tests
generic_check(data)
# Checks that are specific to test number i
eval("specific_check"+str(i)+"(data)")
rho_start = field_data_start["rho"].to_ndarray()
rho_end = field_data_end["rho"].to_ndarray()
check_charge_conservation(rho_start, rho_end)
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, filename_end)
if __name__ == "__main__":
main()
| [
"numpy.clip",
"sys.path.insert",
"numpy.sqrt",
"numpy.arange",
"numpy.histogram",
"numpy.select",
"numpy.exp",
"numpy.empty",
"yt.load",
"numpy.amin",
"numpy.average",
"numpy.isclose",
"numpy.unique",
"os.getcwd",
"checksumAPI.evaluate_checksum",
"numpy.sum",
"numpy.array_equal",
"... | [((144, 204), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (159, 204), False, 'import sys\n'), ((4774, 4818), 'numpy.isclose', 'np.isclose', (['val1', 'val2'], {'rtol': 'rtol', 'atol': 'atol'}), '(val1, val2, rtol=rtol, atol=atol)\n', (4784, 4818), True, 'import numpy as np\n'), ((5583, 5594), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5591, 5594), True, 'import numpy as np\n'), ((5634, 5645), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5642, 5645), True, 'import numpy as np\n'), ((5685, 5696), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5693, 5696), True, 'import numpy as np\n'), ((5736, 5747), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5744, 5747), True, 'import numpy as np\n'), ((5787, 5798), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5795, 5798), True, 'import numpy as np\n'), ((5838, 5849), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5846, 5849), True, 'import numpy as np\n'), ((5889, 5900), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5897, 5900), True, 'import numpy as np\n'), ((6554, 6584), 'numpy.sum', 'np.sum', (["data['proton_w_start']"], {}), "(data['proton_w_start'])\n", (6560, 6584), True, 'import numpy as np\n'), ((6612, 6640), 'numpy.sum', 'np.sum', (["data['proton_w_end']"], {}), "(data['proton_w_end'])\n", (6618, 6640), True, 'import numpy as np\n'), ((6668, 6697), 'numpy.sum', 'np.sum', (["data['boron_w_start']"], {}), "(data['boron_w_start'])\n", (6674, 6697), True, 'import numpy as np\n'), ((6725, 6752), 'numpy.sum', 'np.sum', (["data['boron_w_end']"], {}), "(data['boron_w_end'])\n", (6731, 6752), True, 'import numpy as np\n'), ((6901, 6928), 'numpy.sum', 'np.sum', (["data['alpha_w_end']"], {}), "(data['alpha_w_end'])\n", (6907, 6928), True, 'import numpy as np\n'), ((9104, 9160), 'numpy.sum', 'np.sum', (["(data['proton_px_start'] * data['proton_w_start'])"], {}), "(data['proton_px_start'] * data['proton_w_start'])\n", (9110, 9160), True, 'import numpy as np\n'), ((9187, 9243), 'numpy.sum', 'np.sum', (["(data['proton_py_start'] * data['proton_w_start'])"], {}), "(data['proton_py_start'] * data['proton_w_start'])\n", (9193, 9243), True, 'import numpy as np\n'), ((9270, 9326), 'numpy.sum', 'np.sum', (["(data['proton_pz_start'] * data['proton_w_start'])"], {}), "(data['proton_pz_start'] * data['proton_w_start'])\n", (9276, 9326), True, 'import numpy as np\n'), ((9353, 9405), 'numpy.sum', 'np.sum', (["(data['proton_px_end'] * data['proton_w_end'])"], {}), "(data['proton_px_end'] * data['proton_w_end'])\n", (9359, 9405), True, 'import numpy as np\n'), ((9432, 9484), 'numpy.sum', 'np.sum', (["(data['proton_py_end'] * data['proton_w_end'])"], {}), "(data['proton_py_end'] * data['proton_w_end'])\n", (9438, 9484), True, 'import numpy as np\n'), ((9511, 9563), 'numpy.sum', 'np.sum', (["(data['proton_pz_end'] * data['proton_w_end'])"], {}), "(data['proton_pz_end'] * data['proton_w_end'])\n", (9517, 9563), True, 'import numpy as np\n'), ((9590, 9644), 'numpy.sum', 'np.sum', (["(data['boron_px_start'] * data['boron_w_start'])"], {}), "(data['boron_px_start'] * data['boron_w_start'])\n", (9596, 9644), True, 'import numpy as np\n'), ((9671, 9725), 'numpy.sum', 'np.sum', (["(data['boron_py_start'] * data['boron_w_start'])"], {}), "(data['boron_py_start'] * data['boron_w_start'])\n", (9677, 9725), True, 'import numpy as np\n'), ((9752, 9806), 'numpy.sum', 'np.sum', (["(data['boron_pz_start'] * data['boron_w_start'])"], {}), "(data['boron_pz_start'] * data['boron_w_start'])\n", (9758, 9806), True, 'import numpy as np\n'), ((9833, 9883), 'numpy.sum', 'np.sum', (["(data['boron_px_end'] * data['boron_w_end'])"], {}), "(data['boron_px_end'] * data['boron_w_end'])\n", (9839, 9883), True, 'import numpy as np\n'), ((9910, 9960), 'numpy.sum', 'np.sum', (["(data['boron_py_end'] * data['boron_w_end'])"], {}), "(data['boron_py_end'] * data['boron_w_end'])\n", (9916, 9960), True, 'import numpy as np\n'), ((9987, 10037), 'numpy.sum', 'np.sum', (["(data['boron_pz_end'] * data['boron_w_end'])"], {}), "(data['boron_pz_end'] * data['boron_w_end'])\n", (9993, 10037), True, 'import numpy as np\n'), ((10064, 10114), 'numpy.sum', 'np.sum', (["(data['alpha_px_end'] * data['alpha_w_end'])"], {}), "(data['alpha_px_end'] * data['alpha_w_end'])\n", (10070, 10114), True, 'import numpy as np\n'), ((10141, 10191), 'numpy.sum', 'np.sum', (["(data['alpha_py_end'] * data['alpha_w_end'])"], {}), "(data['alpha_py_end'] * data['alpha_w_end'])\n", (10147, 10191), True, 'import numpy as np\n'), ((10218, 10268), 'numpy.sum', 'np.sum', (["(data['alpha_pz_end'] * data['alpha_w_end'])"], {}), "(data['alpha_pz_end'] * data['alpha_w_end'])\n", (10224, 10268), True, 'import numpy as np\n'), ((11684, 11751), 'numpy.array_equal', 'np.array_equal', (["data['alpha_w_end'][::6]", "data['alpha_w_end'][1::6]"], {}), "(data['alpha_w_end'][::6], data['alpha_w_end'][1::6])\n", (11698, 11751), True, 'import numpy as np\n'), ((11764, 11831), 'numpy.array_equal', 'np.array_equal', (["data['alpha_w_end'][::6]", "data['alpha_w_end'][2::6]"], {}), "(data['alpha_w_end'][::6], data['alpha_w_end'][2::6])\n", (11778, 11831), True, 'import numpy as np\n'), ((11844, 11911), 'numpy.array_equal', 'np.array_equal', (["data['alpha_w_end'][::6]", "data['alpha_w_end'][3::6]"], {}), "(data['alpha_w_end'][::6], data['alpha_w_end'][3::6])\n", (11858, 11911), True, 'import numpy as np\n'), ((11924, 11991), 'numpy.array_equal', 'np.array_equal', (["data['alpha_w_end'][::6]", "data['alpha_w_end'][4::6]"], {}), "(data['alpha_w_end'][::6], data['alpha_w_end'][4::6])\n", (11938, 11991), True, 'import numpy as np\n'), ((12004, 12071), 'numpy.array_equal', 'np.array_equal', (["data['alpha_w_end'][::6]", "data['alpha_w_end'][5::6]"], {}), "(data['alpha_w_end'][::6], data['alpha_w_end'][5::6])\n", (12018, 12071), True, 'import numpy as np\n'), ((12290, 12359), 'numpy.array_equal', 'np.array_equal', (["data['alpha_px_end'][::6]", "data['alpha_px_end'][1::6]"], {}), "(data['alpha_px_end'][::6], data['alpha_px_end'][1::6])\n", (12304, 12359), True, 'import numpy as np\n'), ((12372, 12441), 'numpy.array_equal', 'np.array_equal', (["data['alpha_py_end'][::6]", "data['alpha_py_end'][1::6]"], {}), "(data['alpha_py_end'][::6], data['alpha_py_end'][1::6])\n", (12386, 12441), True, 'import numpy as np\n'), ((12454, 12523), 'numpy.array_equal', 'np.array_equal', (["data['alpha_pz_end'][::6]", "data['alpha_pz_end'][1::6]"], {}), "(data['alpha_pz_end'][::6], data['alpha_pz_end'][1::6])\n", (12468, 12523), True, 'import numpy as np\n'), ((12536, 12606), 'numpy.array_equal', 'np.array_equal', (["data['alpha_px_end'][2::6]", "data['alpha_px_end'][3::6]"], {}), "(data['alpha_px_end'][2::6], data['alpha_px_end'][3::6])\n", (12550, 12606), True, 'import numpy as np\n'), ((12619, 12689), 'numpy.array_equal', 'np.array_equal', (["data['alpha_py_end'][2::6]", "data['alpha_py_end'][3::6]"], {}), "(data['alpha_py_end'][2::6], data['alpha_py_end'][3::6])\n", (12633, 12689), True, 'import numpy as np\n'), ((12702, 12772), 'numpy.array_equal', 'np.array_equal', (["data['alpha_pz_end'][2::6]", "data['alpha_pz_end'][3::6]"], {}), "(data['alpha_pz_end'][2::6], data['alpha_pz_end'][3::6])\n", (12716, 12772), True, 'import numpy as np\n'), ((12785, 12855), 'numpy.array_equal', 'np.array_equal', (["data['alpha_px_end'][4::6]", "data['alpha_px_end'][5::6]"], {}), "(data['alpha_px_end'][4::6], data['alpha_px_end'][5::6])\n", (12799, 12855), True, 'import numpy as np\n'), ((12868, 12938), 'numpy.array_equal', 'np.array_equal', (["data['alpha_py_end'][4::6]", "data['alpha_py_end'][5::6]"], {}), "(data['alpha_py_end'][4::6], data['alpha_py_end'][5::6])\n", (12882, 12938), True, 'import numpy as np\n'), ((12951, 13021), 'numpy.array_equal', 'np.array_equal', (["data['alpha_pz_end'][4::6]", "data['alpha_pz_end'][5::6]"], {}), "(data['alpha_pz_end'][4::6], data['alpha_pz_end'][5::6])\n", (12965, 13021), True, 'import numpy as np\n'), ((13359, 13414), 'numpy.average', 'np.average', (["(data['alpha_px_end'] * data['alpha_px_end'])"], {}), "(data['alpha_px_end'] * data['alpha_px_end'])\n", (13369, 13414), True, 'import numpy as np\n'), ((13433, 13488), 'numpy.average', 'np.average', (["(data['alpha_py_end'] * data['alpha_py_end'])"], {}), "(data['alpha_py_end'] * data['alpha_py_end'])\n", (13443, 13488), True, 'import numpy as np\n'), ((13507, 13562), 'numpy.average', 'np.average', (["(data['alpha_pz_end'] * data['alpha_pz_end'])"], {}), "(data['alpha_pz_end'] * data['alpha_pz_end'])\n", (13517, 13562), True, 'import numpy as np\n'), ((15485, 15515), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (15494, 15515), True, 'import numpy as np\n'), ((16465, 16495), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (16474, 16495), True, 'import numpy as np\n'), ((17158, 17171), 'numpy.sqrt', 'np.sqrt', (['p_sq'], {}), '(p_sq)\n', (17165, 17171), True, 'import numpy as np\n'), ((17186, 17226), 'numpy.sqrt', 'np.sqrt', (['(1.0 + p_sq / (m_p * scc.c) ** 2)'], {}), '(1.0 + p_sq / (m_p * scc.c) ** 2)\n', (17193, 17226), True, 'import numpy as np\n'), ((17236, 17276), 'numpy.sqrt', 'np.sqrt', (['(1.0 + p_sq / (m_b * scc.c) ** 2)'], {}), '(1.0 + p_sq / (m_b * scc.c) ** 2)\n', (17243, 17276), True, 'import numpy as np\n'), ((17575, 17593), 'numpy.all', 'np.all', (['(E_com >= 0)'], {}), '(E_com >= 0)\n', (17581, 17593), True, 'import numpy as np\n'), ((17809, 17836), 'numpy.clip', 'np.clip', (['E_com', '(1e-15)', 'None'], {}), '(E_com, 1e-15, None)\n', (17816, 17836), True, 'import numpy as np\n'), ((17957, 17987), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (17966, 17987), True, 'import numpy as np\n'), ((19756, 19802), 'numpy.sqrt', 'np.sqrt', (['(E_lab ** 2 - p_proton_sq * scc.c ** 2)'], {}), '(E_lab ** 2 - p_proton_sq * scc.c ** 2)\n', (19763, 19802), True, 'import numpy as np\n'), ((31951, 32006), 'numpy.average', 'np.average', (["(data['alpha_px_end'] * data['alpha_px_end'])"], {}), "(data['alpha_px_end'] * data['alpha_px_end'])\n", (31961, 32006), True, 'import numpy as np\n'), ((32025, 32080), 'numpy.average', 'np.average', (["(data['alpha_py_end'] * data['alpha_py_end'])"], {}), "(data['alpha_py_end'] * data['alpha_py_end'])\n", (32035, 32080), True, 'import numpy as np\n'), ((32099, 32154), 'numpy.average', 'np.average', (["(data['alpha_pz_end'] * data['alpha_pz_end'])"], {}), "(data['alpha_pz_end'] * data['alpha_pz_end'])\n", (32109, 32154), True, 'import numpy as np\n'), ((34808, 34835), 'numpy.sum', 'np.sum', (["data['alpha_w_end']"], {}), "(data['alpha_w_end'])\n", (34814, 34835), True, 'import numpy as np\n'), ((37203, 37224), 'yt.load', 'yt.load', (['filename_end'], {}), '(filename_end)\n', (37210, 37224), False, 'import yt\n'), ((37240, 37263), 'yt.load', 'yt.load', (['filename_start'], {}), '(filename_start)\n', (37247, 37263), False, 'import yt\n'), ((38614, 38668), 'checksumAPI.evaluate_checksum', 'checksumAPI.evaluate_checksum', (['test_name', 'filename_end'], {}), '(test_name, filename_end)\n', (38643, 38668), False, 'import checksumAPI\n'), ((4490, 4502), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (4497, 4502), True, 'import numpy as np\n'), ((7907, 7957), 'numpy.sqrt', 'np.sqrt', (['(psq_array * scc.c ** 2 + rest_energy ** 2)'], {}), '(psq_array * scc.c ** 2 + rest_energy ** 2)\n', (7914, 7957), True, 'import numpy as np\n'), ((8405, 8457), 'numpy.sum', 'np.sum', (["(proton_energy_start * data['proton_w_start'])"], {}), "(proton_energy_start * data['proton_w_start'])\n", (8411, 8457), True, 'import numpy as np\n'), ((8485, 8535), 'numpy.sum', 'np.sum', (["(boron_energy_start * data['boron_w_start'])"], {}), "(boron_energy_start * data['boron_w_start'])\n", (8491, 8535), True, 'import numpy as np\n'), ((8709, 8755), 'numpy.sum', 'np.sum', (["(alpha_energy_end * data['alpha_w_end'])"], {}), "(alpha_energy_end * data['alpha_w_end'])\n", (8715, 8755), True, 'import numpy as np\n'), ((8857, 8884), 'numpy.sum', 'np.sum', (["data['alpha_w_end']"], {}), "(data['alpha_w_end'])\n", (8863, 8884), True, 'import numpy as np\n'), ((18899, 18930), 'numpy.sqrt', 'np.sqrt', (['expected_fusion_number'], {}), '(expected_fusion_number)\n', (18906, 18930), True, 'import numpy as np\n'), ((19582, 19641), 'numpy.sqrt', 'np.sqrt', (['(p_proton_sq * scc.c ** 2 + (m_p * scc.c ** 2) ** 2)'], {}), '(p_proton_sq * scc.c ** 2 + (m_p * scc.c ** 2) ** 2)\n', (19589, 19641), True, 'import numpy as np\n'), ((20070, 20120), 'numpy.sqrt', 'np.sqrt', (['(p_sq * scc.c ** 2 + (m * scc.c ** 2) ** 2)'], {}), '(p_sq * scc.c ** 2 + (m * scc.c ** 2) ** 2)\n', (20077, 20120), True, 'import numpy as np\n'), ((21217, 21316), 'numpy.histogram', 'np.histogram', (["data['alpha_z_end']"], {'bins': 'size_z', 'range': '(0, size_z)', 'weights': "data['alpha_w_end']"}), "(data['alpha_z_end'], bins=size_z, range=(0, size_z), weights=\n data['alpha_w_end'])\n", (21229, 21316), True, 'import numpy as np\n'), ((21559, 21600), 'numpy.sqrt', 'np.sqrt', (['expected_fusion_number_per_slice'], {}), '(expected_fusion_number_per_slice)\n', (21566, 21600), True, 'import numpy as np\n'), ((32658, 32679), 'numpy.sqrt', 'np.sqrt', (['(T * E0 / 3.0)'], {}), '(T * E0 / 3.0)\n', (32665, 32679), True, 'import numpy as np\n'), ((32971, 32983), 'numpy.exp', 'np.exp', (['(-tau)'], {}), '(-tau)\n', (32977, 32983), True, 'import numpy as np\n'), ((8559, 8607), 'numpy.sum', 'np.sum', (["(proton_energy_end * data['proton_w_end'])"], {}), "(proton_energy_end * data['proton_w_end'])\n", (8565, 8607), True, 'import numpy as np\n'), ((8635, 8681), 'numpy.sum', 'np.sum', (["(boron_energy_end * data['boron_w_end'])"], {}), "(boron_energy_end * data['boron_w_end'])\n", (8641, 8681), True, 'import numpy as np\n'), ((11240, 11261), 'numpy.unique', 'np.unique', (['complex_id'], {}), '(complex_id)\n', (11249, 11261), True, 'import numpy as np\n'), ((25737, 25770), 'numpy.amax', 'np.amax', (['energy_alpha2_simulation'], {}), '(energy_alpha2_simulation)\n', (25744, 25770), True, 'import numpy as np\n'), ((25829, 25862), 'numpy.amin', 'np.amin', (['energy_alpha2_simulation'], {}), '(energy_alpha2_simulation)\n', (25836, 25862), True, 'import numpy as np\n'), ((25921, 25954), 'numpy.amax', 'np.amax', (['energy_alpha3_simulation'], {}), '(energy_alpha3_simulation)\n', (25928, 25954), True, 'import numpy as np\n'), ((26013, 26046), 'numpy.amin', 'np.amin', (['energy_alpha3_simulation'], {}), '(energy_alpha3_simulation)\n', (26020, 26046), True, 'import numpy as np\n'), ((31025, 31058), 'numpy.amax', 'np.amax', (['energy_alpha1_simulation'], {}), '(energy_alpha1_simulation)\n', (31032, 31058), True, 'import numpy as np\n'), ((31116, 31149), 'numpy.amin', 'np.amin', (['energy_alpha1_simulation'], {}), '(energy_alpha1_simulation)\n', (31123, 31149), True, 'import numpy as np\n'), ((31477, 31510), 'numpy.amax', 'np.amax', (['energy_alpha2_simulation'], {}), '(energy_alpha2_simulation)\n', (31484, 31510), True, 'import numpy as np\n'), ((31570, 31603), 'numpy.amin', 'np.amin', (['energy_alpha2_simulation'], {}), '(energy_alpha2_simulation)\n', (31577, 31603), True, 'import numpy as np\n'), ((31663, 31696), 'numpy.amax', 'np.amax', (['energy_alpha3_simulation'], {}), '(energy_alpha3_simulation)\n', (31670, 31696), True, 'import numpy as np\n'), ((31756, 31789), 'numpy.amin', 'np.amin', (['energy_alpha3_simulation'], {}), '(energy_alpha3_simulation)\n', (31763, 31789), True, 'import numpy as np\n'), ((33390, 33408), 'numpy.exp', 'np.exp', (['(-148.0 / T)'], {}), '(-148.0 / T)\n', (33396, 33408), True, 'import numpy as np\n'), ((38594, 38605), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (38603, 38605), False, 'import os\n'), ((20370, 20387), 'numpy.arange', 'np.arange', (['size_z'], {}), '(size_z)\n', (20379, 20387), True, 'import numpy as np\n'), ((20717, 20734), 'numpy.arange', 'np.arange', (['size_z'], {}), '(size_z)\n', (20726, 20734), True, 'import numpy as np\n'), ((24597, 24616), 'numpy.sqrt', 'np.sqrt', (['p_sq_after'], {}), '(p_sq_after)\n', (24604, 24616), True, 'import numpy as np\n'), ((24645, 24704), 'numpy.sqrt', 'np.sqrt', (['(4 * m_a * energy_alpha2_plus_3_theory - p_sq_after)'], {}), '(4 * m_a * energy_alpha2_plus_3_theory - p_sq_after)\n', (24652, 24704), True, 'import numpy as np\n'), ((24731, 24750), 'numpy.sqrt', 'np.sqrt', (['p_sq_after'], {}), '(p_sq_after)\n', (24738, 24750), True, 'import numpy as np\n'), ((24779, 24838), 'numpy.sqrt', 'np.sqrt', (['(4 * m_a * energy_alpha2_plus_3_theory - p_sq_after)'], {}), '(4 * m_a * energy_alpha2_plus_3_theory - p_sq_after)\n', (24786, 24838), True, 'import numpy as np\n'), ((28448, 28536), 'numpy.sqrt', 'np.sqrt', (['(-m_a / m_be * p_proton_sq + 2.0 * E_after_fusion * m_a * (m_a / m_be + 1.0))'], {}), '(-m_a / m_be * p_proton_sq + 2.0 * E_after_fusion * m_a * (m_a /\n m_be + 1.0))\n', (28455, 28536), True, 'import numpy as np\n'), ((28644, 28732), 'numpy.sqrt', 'np.sqrt', (['(-m_a / m_be * p_proton_sq + 2.0 * E_after_fusion * m_a * (m_a / m_be + 1.0))'], {}), '(-m_a / m_be * p_proton_sq + 2.0 * E_after_fusion * m_a * (m_a /\n m_be + 1.0))\n', (28651, 28732), True, 'import numpy as np\n'), ((29944, 29971), 'numpy.sqrt', 'np.sqrt', (['max_p_sq_beryllium'], {}), '(max_p_sq_beryllium)\n', (29951, 29971), True, 'import numpy as np\n'), ((30005, 30069), 'numpy.sqrt', 'np.sqrt', (['(4 * m_a * max_energy_alpha2_plus_3 - max_p_sq_beryllium)'], {}), '(4 * m_a * max_energy_alpha2_plus_3 - max_p_sq_beryllium)\n', (30012, 30069), True, 'import numpy as np\n'), ((30096, 30123), 'numpy.sqrt', 'np.sqrt', (['min_p_sq_beryllium'], {}), '(min_p_sq_beryllium)\n', (30103, 30123), True, 'import numpy as np\n'), ((30157, 30221), 'numpy.sqrt', 'np.sqrt', (['(4 * m_a * min_energy_alpha2_plus_3 - min_p_sq_beryllium)'], {}), '(4 * m_a * min_energy_alpha2_plus_3 - min_p_sq_beryllium)\n', (30164, 30221), True, 'import numpy as np\n'), ((16374, 16402), 'numpy.sqrt', 'np.sqrt', (['(E_Gamow_MeV / E_MeV)'], {}), '(E_Gamow_MeV / E_MeV)\n', (16381, 16402), True, 'import numpy as np\n'), ((28400, 28420), 'numpy.sqrt', 'np.sqrt', (['p_proton_sq'], {}), '(p_proton_sq)\n', (28407, 28420), True, 'import numpy as np\n'), ((28596, 28616), 'numpy.sqrt', 'np.sqrt', (['p_proton_sq'], {}), '(p_proton_sq)\n', (28603, 28616), True, 'import numpy as np\n'), ((32917, 32958), 'numpy.sqrt', 'np.sqrt', (['(2 * T * keV_to_Joule / m_reduced)'], {}), '(2 * T * keV_to_Joule / m_reduced)\n', (32924, 32958), True, 'import numpy as np\n')] |
#%%
import matplotlib.pyplot as plt
import numpy as np
x = [1,2,3,4]
y = [4,8,1,2]
plt.plot(x,y,'b')
plt.title('Gráfico.')
plt.ylabel('Eixo Y')
plt.xlabel('Eixo X')
plt.yticks(y)
plt.xticks(x)
plt.grid(axis = 'y', linestyle = ':')
plt.show()
#%%
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0,10,0.5)
plt.plot(x,x**2,'bx:', label = 'x^2')
plt.plot(x,x**3,'gs--', label = 'x^3')
plt.plot(x,x,'r-.', label = 'x')
plt.legend()
plt.title('Gráfico.')
plt.ylabel('Eixo Y')
plt.xlabel('Eixo X')
plt.show()
#%%
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0,10,0.5)
fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(15,12))
ax1.plot(x,x**2,'bx:',label = 'x^2')
ax1.legend()
ax1.set_ylabel('Eixo Y')
ax1.set_xlabel('Eixo X')
ax2.plot(x,x**3,'gs--', label = 'x^3')
ax2.legend()
ax2.set_ylabel('Eixo Y')
ax2.set_xlabel('Eixo X')
ax3.plot(x,x,'r-.', label = 'x')
ax3.legend()
ax3.set_ylabel('Eixo Y')
ax3.set_xlabel('Eixo X')
plt.suptitle('Gráfico.')
plt.show()
#%%
import matplotlib.pyplot as plt
gp = ['A','B','C']
dados = [5,20,10]
plt.figure(figsize=(9,3))
plt.subplot(1,3,1)
plt.bar(gp,dados)
plt.subplot(132)
plt.scatter(gp,dados)
plt.subplot(133)
plt.plot(gp,dados)
plt.suptitle('Plots')
plt.show()
#%% | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.sc... | [((92, 111), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b"""'], {}), "(x, y, 'b')\n", (100, 111), True, 'import matplotlib.pyplot as plt\n'), ((113, 134), 'matplotlib.pyplot.title', 'plt.title', (['"""Gráfico."""'], {}), "('Gráfico.')\n", (122, 134), True, 'import matplotlib.pyplot as plt\n'), ((136, 156), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Eixo Y"""'], {}), "('Eixo Y')\n", (146, 156), True, 'import matplotlib.pyplot as plt\n'), ((158, 178), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Eixo X"""'], {}), "('Eixo X')\n", (168, 178), True, 'import matplotlib.pyplot as plt\n'), ((182, 195), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y'], {}), '(y)\n', (192, 195), True, 'import matplotlib.pyplot as plt\n'), ((197, 210), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x'], {}), '(x)\n', (207, 210), True, 'import matplotlib.pyplot as plt\n'), ((214, 247), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'linestyle': '""":"""'}), "(axis='y', linestyle=':')\n", (222, 247), True, 'import matplotlib.pyplot as plt\n'), ((255, 265), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (263, 265), True, 'import matplotlib.pyplot as plt\n'), ((331, 352), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.5)'], {}), '(0, 10, 0.5)\n', (340, 352), True, 'import numpy as np\n'), ((354, 393), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(x ** 2)', '"""bx:"""'], {'label': '"""x^2"""'}), "(x, x ** 2, 'bx:', label='x^2')\n", (362, 393), True, 'import matplotlib.pyplot as plt\n'), ((393, 433), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(x ** 3)', '"""gs--"""'], {'label': '"""x^3"""'}), "(x, x ** 3, 'gs--', label='x^3')\n", (401, 433), True, 'import matplotlib.pyplot as plt\n'), ((433, 465), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x', '"""r-."""'], {'label': '"""x"""'}), "(x, x, 'r-.', label='x')\n", (441, 465), True, 'import matplotlib.pyplot as plt\n'), ((467, 479), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (477, 479), True, 'import matplotlib.pyplot as plt\n'), ((483, 504), 'matplotlib.pyplot.title', 'plt.title', (['"""Gráfico."""'], {}), "('Gráfico.')\n", (492, 504), True, 'import matplotlib.pyplot as plt\n'), ((506, 526), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Eixo Y"""'], {}), "('Eixo Y')\n", (516, 526), True, 'import matplotlib.pyplot as plt\n'), ((528, 548), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Eixo X"""'], {}), "('Eixo X')\n", (538, 548), True, 'import matplotlib.pyplot as plt\n'), ((552, 562), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (560, 562), True, 'import matplotlib.pyplot as plt\n'), ((630, 651), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.5)'], {}), '(0, 10, 0.5)\n', (639, 651), True, 'import numpy as np\n'), ((674, 710), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(15, 12)'}), '(3, 1, figsize=(15, 12))\n', (686, 710), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1051), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Gráfico."""'], {}), "('Gráfico.')\n", (1039, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1055, 1065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1063, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1174), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 3)'}), '(figsize=(9, 3))\n', (1158, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1197), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1188, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1215), 'matplotlib.pyplot.bar', 'plt.bar', (['gp', 'dados'], {}), '(gp, dados)\n', (1204, 1215), True, 'import matplotlib.pyplot as plt\n'), ((1216, 1232), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (1227, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1234, 1256), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gp', 'dados'], {}), '(gp, dados)\n', (1245, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1257, 1273), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (1268, 1273), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1294), 'matplotlib.pyplot.plot', 'plt.plot', (['gp', 'dados'], {}), '(gp, dados)\n', (1283, 1294), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1318), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Plots"""'], {}), "('Plots')\n", (1309, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1330, 1332), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import (absolute_import, division, print_function)
from collections import Iterable, OrderedDict
import wrapt
import numpy as np
import numpy.ma as ma
from .units import do_conversion, check_units, dealias_and_clean_unit
from .util import iter_left_indexes, from_args, to_np, combine_dims
from .py3compat import viewitems, viewvalues, isstr
from .config import xarray_enabled
from .constants import default_fill
if xarray_enabled():
from xarray import DataArray
def convert_units(unit_type, alg_unit):
"""A decorator that converts the units from the wrapped function's output.
The desired units are determined from the wrapped function's arguments.
Args:
unit_type (:obj:`str`): The unit type. Choices are: 'wind',
'pressure', 'temp', or 'height'.
alg_unit (:obj:`str`): The units returned by the wrapped function,
which is usually the units returned by the Fortran routine.
Returns:
:class:`numpy.ndarray`: The wrapped function's output in the desired
units.
"""
@wrapt.decorator
def func_wrapper(wrapped, instance, args, kwargs):
desired_units = from_args(wrapped, "units", *args, **kwargs)["units"]
u_cleaned = dealias_and_clean_unit(desired_units)
check_units(u_cleaned, unit_type)
# Unit conversion done here
return do_conversion(wrapped(*args, **kwargs), unit_type,
alg_unit, desired_units)
return func_wrapper
#def _calc_out_dims(outvar, left_dims):
# """
#
# """
# #left_dims = [x for x in left_dims]
# #right_dims = [x for x in outvar.shape]
# #return left_dims + right_dims
#
# return left_dims + outvar.shape
def left_iteration(ref_var_expected_dims,
ref_var_right_ndims,
insert_dims=None,
ref_var_idx=None,
ref_var_name=None,
ignore_args=None,
ignore_kargs=None,
outviews="outview",
alg_dtype=np.float64,
cast_output=True):
"""A decorator to handle iterating over the leftmost dimensions.
For example, if a wrapped function works with three-dimensional arrays, but
the variables include a 4th leftmost dimension for 'Time', this decorator
will iterate over all times, call the 3D Fortran routine, and aggregate the
results in to a 4D output array.
It is also important to note that the final output array is allocated
first, and then views are passed to the wrapped function so that values
do not need to get copied in to the final output array.
Args:
ref_var_expected_dims (:obj:`int`): The number of dimensions that the
Fortran routine is expecting for the reference variable.
ref_var_right_ndims (:obj:`int`): The number of dimensions from the
right to keep for the reference variable when making the output.
Can also be a :class:`combine_dims` object if the sizes are
determined from multiple variables.
insert_dims (sequence of :obj:`int`, optional): A sequence of
dimensions to insert between the left dimensions (e.g. time) and
the kept right dimensions. Default is None.
ref_var_idx (:obj:`int`, optional): The index in the wrapped function's
positional arguments to be used as the reference variable for
determining the leftmost dimensions. Must be specified if
*ref_var_name* is None. Default is None.
ref_var_name (:obj:`str`, optional): The keyword argument name for the
wrapped function's keyword arguments to be used as the reference
variable for calculating the leftmost dimensions. Must be
specified if *ref_var_idx* is None. Default is None.
ignore_args (sequence of :obj:`int`): Indexes of any arguments that
should be ignored when creating the sliced views that are
passed to the Fortran routine.
ignore_kargs (sequence of :obj:`str`): Keys of any keyword arguments
that should be ignored when creating the sliced views that are
passed to the Fortran routine.
outviews (:obj:`str` or a sequence): A single key or sequence of keys
that indicate the wrapped function's keyword argument to use
as the output variable(s) in the wrapped function.
alg_dtype (:class:`numpy.dtype` or :obj:`str`): The numpy data type
used in the wrapped function.
cast_output (:obj:`bool`): Set to True to cast the wrapped function's
output to the same type as the reference variable.
Returns:
:class:`numpy.ndarray`: The aggregated output array that includes
all extra leftmost dimensions found in the reference variable.
"""
@wrapt.decorator
def func_wrapper(wrapped, instance, args, kwargs):
_ignore_args = ignore_args if ignore_args is not None else ()
_ignore_kargs = ignore_kargs if ignore_kargs is not None else ()
_outkeys = [outviews] if isstr(outviews) else outviews
if ref_var_idx is not None:
ref_var = args[ref_var_idx]
else:
ref_var = kwargs[ref_var_name]
ref_var_dtype = ref_var.dtype
ref_var_shape = ref_var.shape
extra_dim_num = ref_var.ndim - ref_var_expected_dims
# No special left side iteration, return the function result
if (extra_dim_num == 0):
return wrapped(*args, **kwargs)
# Start by getting the left-most 'extra' dims
extra_dims = ref_var_shape[0:extra_dim_num]
mid_dims = () if insert_dims is None else tuple(insert_dims)
if not isinstance(ref_var_right_ndims, combine_dims):
right_dims = ref_var_shape[-ref_var_right_ndims:]
else:
right_dims = ref_var_right_ndims(*args)
left_dims = extra_dims
outdims = left_dims + mid_dims + right_dims
if "outview" not in kwargs:
outd = OrderedDict((outkey, np.empty(outdims, alg_dtype))
for outkey in _outkeys)
mask_output = False
for left_idxs in iter_left_indexes(extra_dims):
# Make the left indexes plus a single slice object
# The single slice will handle all the dimensions to
# the right (e.g. [1,1,:])
left_and_slice_idxs = left_idxs + (slice(None), )
# Slice the args if applicable
new_args = [arg[left_and_slice_idxs]
if i not in _ignore_args else arg
for i,arg in enumerate(args)]
# Slice the kwargs if applicable
new_kargs = {key:(val[left_and_slice_idxs]
if key not in _ignore_kargs else val)
for key,val in viewitems(kwargs)}
# Skip the possible empty/missing arrays for the join method
skip_missing = False
for arg in new_args:
try:
_ = arg.ndim
except AttributeError:
continue # Not an array object
else:
arr = to_np(arg)
try:
all_masked = arr.mask.all()
except AttributeError:
pass # Not a masked array
else:
if all_masked:
for output in viewvalues(outd):
output[left_and_slice_idxs] = (
default_fill(np.float64))
skip_missing = True
mask_output = True
break
if skip_missing:
continue
# Insert the output views if one hasn't been provided
if "outview" not in new_kargs:
for outkey,output in viewitems(outd):
outview = output[left_and_slice_idxs]
new_kargs[outkey] = outview
result = wrapped(*new_args, **new_kargs)
# Make sure the result is the same data as what got passed in
# Can delete this once everything works
if (result.__array_interface__["data"][0] !=
outview.__array_interface__["data"][0]):
raise RuntimeError("output array was copied")
if len(outd) == 1:
output = next(iter(viewvalues(outd)))
else:
output = tuple(arr for arr in viewvalues(outd))
if cast_output:
if isinstance(output, np.ndarray):
output = output.astype(ref_var_dtype)
else:
output = tuple(arr.astype(ref_var_dtype) for arr in output)
# Mostly when used with join
if mask_output:
if isinstance(output, np.ndarray):
output = ma.masked_values(output, default_fill(np.float64))
else:
output = tuple(ma.masked_values(arr, default_fill(np.float64))
for arr in output)
return output
return func_wrapper
def cast_type(ref_idx=0, arg_idxs=None, karg_names=None,
alg_dtype=np.float64, outviews="outview"):
"""A decorator to handle type casting.
This decorator is used to cast variables to and from the required
:class:`numpy.dtype` used in the wrapped function.
Args:
ref_idx (:obj:`int`, optional): The index in the wrapped function's
positional arguments to be used as the reference variable for
determining the :class:`numpy.dtype` to return. Default is 0.
arg_idxs (sequence of :obj:`int`, optional): A sequence of indexes in the
wrapped function's positional arguments that indicate which
arguments to cast. Must be specified if *karg_names* is None.
Default is None.
karg_names (sequence of :obj:`str`): A sequence of keyword arguments
in the wrapped function's keyword arguments that indicate the
arguments to cast. Must be specified if *arg_idxs* is None.
Default is None.
alg_dtype (:class:`numpy.dtype` or :obj:`str`): The numpy data type used
in the wrapped function.
outviews (:obj:`str` or a sequence): A single key or sequence of keys
that indicate the wrapped function's keyword argument to use
as the output variable(s) in the wrapped function.
Returns:
:class:`numpy.ndarray`: The wrapped function's output cast to the
same :class:`numpy.dtype` as the reference variable.
"""
@wrapt.decorator
def func_wrapper(wrapped, instance, args, kwargs):
_arg_idxs = arg_idxs if arg_idxs is not None else ()
_karg_names = karg_names if karg_names is not None else ()
# Handle output views if applicable
_outkeys = [outviews] if isstr(outviews) else outviews
_outviews = from_args(wrapped, _outkeys, *args, **kwargs)
has_outview = False
for outkey in _outkeys:
_outview = _outviews[outkey]
if _outview is not None:
has_outview = True
orig_type = args[ref_idx].dtype
new_args = [arg.astype(alg_dtype)
if i in _arg_idxs else arg
for i,arg in enumerate(args)]
new_kargs = {key:(val.astype(alg_dtype)
if key in _karg_names else val)
for key,val in viewitems(kwargs)}
result = wrapped(*new_args, **new_kargs)
# Do nothing for supplied output views
if not has_outview:
if isinstance(result, np.ndarray):
if result.dtype == orig_type:
return result
return result.astype(orig_type)
elif isinstance(result, Iterable): # got back a sequence of arrays
return tuple(arr.astype(orig_type)
if arr.dtype != orig_type else arr
for arr in result)
return result
return func_wrapper
def _extract_and_transpose(arg, do_transpose):
"""Return a transposed view of the :class:`numpy.ndarray` inside of a
:class:`xarray.DataArray` object.
If the *arg* parameter is not a :class:`xarray.DataArray` object, then
*arg* is returned.
Args:
arg (:class:`xarray.DataArray` or :obj:`object`): Can be any object
type.
do_transpose: Set to False to only extract the variable. When True,
the extracted array will also be transposed to a Fortran view if
it is not already Fortran contiguous.
Returns:
:class:`numpy.ndarray`: A numpy array. If *do_transpose* is True,
the numpy array will also be a Fortran contiguous view.
"""
if xarray_enabled():
if isinstance(arg, DataArray):
arg = to_np(arg)
if do_transpose:
if isinstance(arg, np.ndarray):
if not arg.flags.f_contiguous and arg.ndim > 1:
return arg.T
return arg
def extract_and_transpose(do_transpose=True, outviews="outview"):
"""A decorator to extract the data array from a :class:`xarray.DataArray`
This decorator also transposes the view of the data to Fortran
contiguous if *do_transpose* is True.
Args:
do_transpose: Set to False to only extract the variable. When True,
the extracted array will also be transposed to a Fortran view if
it is not already Fortran contiguous.
outviews (:obj:`str` or a sequence): A single key or sequence of keys
that indicate the wrapped function's keyword argument to use
as the output variable(s) in the wrapped function.
Returns:
:class:`numpy.ndarray`: A numpy array. If *do_transpose* is True,
the numpy array will also be a Fortran contiguous view.
"""
@wrapt.decorator
def func_wrapper(wrapped, instance, args, kwargs):
# Handle output views if applicable
_outkeys = [outviews] if isstr(outviews) else outviews
_outviews = from_args(wrapped, _outkeys, *args, **kwargs)
has_outview = False
for outkey in _outkeys:
_outview = _outviews[outkey]
if _outview is not None:
has_outview = True
new_args = [_extract_and_transpose(arg, do_transpose) for arg in args]
new_kargs = {key:_extract_and_transpose(val, do_transpose)
for key,val in viewitems(kwargs)}
result = wrapped(*new_args, **new_kargs)
# Do nothing for supplied output views
if has_outview:
return result
if isinstance(result, np.ndarray):
if result.flags.f_contiguous and result.ndim > 1:
return result.T
elif isinstance(result, Iterable):
return tuple(x.T if x.flags.f_contiguous and x.ndim > 1 else x
for x in result)
return result
return func_wrapper
def check_args(refvaridx, refvarndim, rightdims, stagger=None,
refstagdim=None):
"""A decorator to check that the wrapped function's arguments are valid.
An exception is raised when an invalid argument is found.
Args:
refvaridx (:obj:`int`): The wrapped function's positional argument
index to use as the reference variable.
refvarndim (:obj:`int`): The number of dimensions for the reference
variable that is expected by the wrapped function.
rightdims (sequence of :obj:`int`): The expected number of right
dimensions for each argument.
stagger (sequence of :obj:`int` or :obj:`None`, optional): The
dimension that is staggered for each argument in the wrapped
function. Use :obj:`None` in the sequence to indicate no
staggering for that argument. Default is None.
refstagdim (:obj:`int`, optional): The staggered dimension for the
reference variable, if applicable. Default is None.
Returns:
None
Raises:
:class:`ValueError`: Raised when an invalid argument is detected.
"""
@wrapt.decorator
def func_wrapper(wrapped, instance, args, kwargs):
refvar = args[refvaridx]
try:
_ndim = refvar.ndim
except AttributeError:
raise ValueError("argument {} is not an arraylike "
"object".format(refvaridx))
else:
extra_dims = refvar.ndim - refvarndim
# Always use unstaggered as the basis of comparison
if refstagdim is not None:
_refshape = list(refvar.shape)
_refshape[refstagdim] -= 1
_refshape = tuple(_refshape)
else:
_refshape = refvar.shape
if stagger is None:
_stagger = [None]*len(rightdims)
else:
_stagger = stagger
for i,ndim in enumerate(rightdims):
if ndim is None:
continue
var = args[i]
try:
_ = var.ndim
except AttributeError:
raise ValueError("argument {} is not an arraylike "
"object".format(i))
right_var_ndims = rightdims[i]
# Check that the number of dims is correct
if (var.ndim - extra_dims != right_var_ndims):
raise ValueError("invalid number of dimensions for argument "
"{} (got {}, expected {}).".format(i,
var.ndim,
right_var_ndims + extra_dims))
# Add 1 to the reference staggered dim index before doing the check
if _stagger[i] is not None:
ref_shape = list(_refshape)
ref_shape[_stagger[i]] += 1
ref_shape = tuple(ref_shape)
else:
ref_shape = _refshape
ref_right_sizes = ref_shape[extra_dims:]
# Check that right dimensions are lined up
if (var.shape[-right_var_ndims:] !=
ref_right_sizes[-right_var_ndims:]):
raise ValueError("invalid shape for argument "
"{} (got {}, expected {})".format(i,
var.shape[-right_var_ndims:],
ref_right_sizes[-right_var_ndims:]))
return wrapped(*args, **kwargs)
return func_wrapper
| [
"numpy.empty"
] | [((6435, 6463), 'numpy.empty', 'np.empty', (['outdims', 'alg_dtype'], {}), '(outdims, alg_dtype)\n', (6443, 6463), True, 'import numpy as np\n')] |
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
D = int(C / G)
X = X.reshape(N, G, D, -1)
mu = np.mean(X, axis=(2, 3), keepdims=True)
std = np.sqrt((np.var(X, axis=(2, 3), keepdims=True) + epsilon))
gamma = gamma.reshape(G, D, 1)
beta = beta.reshape(G, D, 1)
Y = gamma * (X - mu) / std + beta
return [Y.reshape(dims), mu.reshape(N, G), (1.0 / std).reshape(N, G)]
def group_norm_nhwc_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[-1]
G = group
D = int(C / G)
X = X.reshape(N, -1, G, D)
mu = np.mean(X, axis=(1, 3), keepdims=True)
std = np.sqrt((np.var(X, axis=(1, 3), keepdims=True) + epsilon))
gamma = gamma.reshape(G, D)
beta = beta.reshape(G, D)
Y = gamma * (X - mu) / std + beta
return [Y.reshape(dims), mu.reshape(N, G), (1.0 / std).reshape(N, G)]
@serial.given(
N=st.integers(1, 5), G=st.integers(1, 5), D=st.integers(1, 5),
H=st.integers(2, 5), W=st.integers(2, 5),
epsilon=st.floats(min_value=1e-5, max_value=1e-4),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
def test_group_norm_2d(
self, N, G, D, H, W, epsilon, order, gc, dc):
op = core.CreateOperator(
"GroupNorm",
["X", "gamma", "beta"],
["Y", "mean", "inv_std"],
group=G,
epsilon=epsilon,
order=order,
)
C = G * D
if order == "NCHW":
X = np.random.randn(N, C, H, W).astype(np.float32) + 1.0
else:
X = np.random.randn(N, H, W, C).astype(np.float32) + 1.0
gamma = np.random.randn(C).astype(np.float32)
beta = np.random.randn(C).astype(np.float32)
inputs = [X, gamma, beta]
def ref_op(X, gamma, beta):
if order == "NCHW":
return self.group_norm_nchw_ref(X, gamma, beta, G, epsilon)
else:
return self.group_norm_nhwc_ref(X, gamma, beta, G, epsilon)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref_op,
threshold=5e-3,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(N=st.integers(1, 5), G=st.integers(1, 3), D=st.integers(2, 3),
T=st.integers(2, 4), H=st.integers(2, 4), W=st.integers(2, 4),
epsilon=st.floats(min_value=1e-5, max_value=1e-4),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
def test_group_norm_3d(
self, N, G, D, T, H, W, epsilon, order, gc, dc):
op = core.CreateOperator(
"GroupNorm",
["X", "gamma", "beta"],
["Y", "mean", "inv_std"],
group=G,
epsilon=epsilon,
order=order,
)
C = G * D
if order == "NCHW":
X = np.random.randn(N, C, T, H, W).astype(np.float32) + 1.0
else:
X = np.random.randn(N, T, H, W, C).astype(np.float32) + 1.0
gamma = np.random.randn(C).astype(np.float32)
beta = np.random.randn(C).astype(np.float32)
inputs = [X, gamma, beta]
def ref_op(X, gamma, beta):
if order == "NCHW":
return self.group_norm_nchw_ref(X, gamma, beta, G, epsilon)
else:
return self.group_norm_nhwc_ref(X, gamma, beta, G, epsilon)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref_op,
threshold=5e-3,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(N=st.integers(1, 5), G=st.integers(1, 5), D=st.integers(2, 2),
H=st.integers(2, 5), W=st.integers(2, 5),
epsilon=st.floats(min_value=1e-5, max_value=1e-4),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
@settings(deadline=10000)
def test_group_norm_grad(
self, N, G, D, H, W, epsilon, order, gc, dc):
op = core.CreateOperator(
"GroupNorm",
["X", "gamma", "beta"],
["Y", "mean", "inv_std"],
group=G,
epsilon=epsilon,
order=order,
)
C = G * D
X = np.arange(N * C * H * W).astype(np.float32)
np.random.shuffle(X)
if order == "NCHW":
X = X.reshape((N, C, H, W))
else:
X = X.reshape((N, H, W, C))
gamma = np.random.randn(C).astype(np.float32)
beta = np.random.randn(C).astype(np.float32)
inputs = [X, gamma, beta]
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
| [
"numpy.mean",
"hypothesis.strategies.sampled_from",
"hypothesis.strategies.integers",
"numpy.arange",
"hypothesis.strategies.floats",
"hypothesis.settings",
"caffe2.python.core.CreateOperator",
"unittest.main",
"numpy.random.randn",
"numpy.var",
"numpy.random.shuffle"
] | [((4406, 4430), 'hypothesis.settings', 'settings', ([], {'deadline': '(10000)'}), '(deadline=10000)\n', (4414, 4430), False, 'from hypothesis import given, settings\n'), ((5236, 5251), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5249, 5251), False, 'import unittest\n'), ((533, 571), 'numpy.mean', 'np.mean', (['X'], {'axis': '(2, 3)', 'keepdims': '(True)'}), '(X, axis=(2, 3), keepdims=True)\n', (540, 571), True, 'import numpy as np\n'), ((1062, 1100), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1, 3)', 'keepdims': '(True)'}), '(X, axis=(1, 3), keepdims=True)\n', (1069, 1100), True, 'import numpy as np\n'), ((1722, 1847), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""GroupNorm"""', "['X', 'gamma', 'beta']", "['Y', 'mean', 'inv_std']"], {'group': 'G', 'epsilon': 'epsilon', 'order': 'order'}), "('GroupNorm', ['X', 'gamma', 'beta'], ['Y', 'mean',\n 'inv_std'], group=G, epsilon=epsilon, order=order)\n", (1741, 1847), False, 'from caffe2.python import core\n'), ((3120, 3245), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""GroupNorm"""', "['X', 'gamma', 'beta']", "['Y', 'mean', 'inv_std']"], {'group': 'G', 'epsilon': 'epsilon', 'order': 'order'}), "('GroupNorm', ['X', 'gamma', 'beta'], ['Y', 'mean',\n 'inv_std'], group=G, epsilon=epsilon, order=order)\n", (3139, 3245), False, 'from caffe2.python import core\n'), ((4532, 4657), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""GroupNorm"""', "['X', 'gamma', 'beta']", "['Y', 'mean', 'inv_std']"], {'group': 'G', 'epsilon': 'epsilon', 'order': 'order'}), "('GroupNorm', ['X', 'gamma', 'beta'], ['Y', 'mean',\n 'inv_std'], group=G, epsilon=epsilon, order=order)\n", (4551, 4657), False, 'from caffe2.python import core\n'), ((4820, 4840), 'numpy.random.shuffle', 'np.random.shuffle', (['X'], {}), '(X)\n', (4837, 4840), True, 'import numpy as np\n'), ((1394, 1411), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (1405, 1411), True, 'import hypothesis.strategies as st\n'), ((1415, 1432), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (1426, 1432), True, 'import hypothesis.strategies as st\n'), ((1436, 1453), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (1447, 1453), True, 'import hypothesis.strategies as st\n'), ((1465, 1482), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (1476, 1482), True, 'import hypothesis.strategies as st\n'), ((1486, 1503), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (1497, 1503), True, 'import hypothesis.strategies as st\n'), ((1521, 1565), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(1e-05)', 'max_value': '(0.0001)'}), '(min_value=1e-05, max_value=0.0001)\n', (1530, 1565), True, 'import hypothesis.strategies as st\n'), ((1578, 1611), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (["['NCHW', 'NHWC']"], {}), "(['NCHW', 'NHWC'])\n", (1593, 1611), True, 'import hypothesis.strategies as st\n'), ((2759, 2776), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (2770, 2776), True, 'import hypothesis.strategies as st\n'), ((2780, 2797), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(3)'], {}), '(1, 3)\n', (2791, 2797), True, 'import hypothesis.strategies as st\n'), ((2801, 2818), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(3)'], {}), '(2, 3)\n', (2812, 2818), True, 'import hypothesis.strategies as st\n'), ((2833, 2850), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(4)'], {}), '(2, 4)\n', (2844, 2850), True, 'import hypothesis.strategies as st\n'), ((2854, 2871), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(4)'], {}), '(2, 4)\n', (2865, 2871), True, 'import hypothesis.strategies as st\n'), ((2875, 2892), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(4)'], {}), '(2, 4)\n', (2886, 2892), True, 'import hypothesis.strategies as st\n'), ((2913, 2957), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(1e-05)', 'max_value': '(0.0001)'}), '(min_value=1e-05, max_value=0.0001)\n', (2922, 2957), True, 'import hypothesis.strategies as st\n'), ((2973, 3006), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (["['NCHW', 'NHWC']"], {}), "(['NCHW', 'NHWC'])\n", (2988, 3006), True, 'import hypothesis.strategies as st\n'), ((4163, 4180), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (4174, 4180), True, 'import hypothesis.strategies as st\n'), ((4184, 4201), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (4195, 4201), True, 'import hypothesis.strategies as st\n'), ((4205, 4222), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(2)'], {}), '(2, 2)\n', (4216, 4222), True, 'import hypothesis.strategies as st\n'), ((4237, 4254), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (4248, 4254), True, 'import hypothesis.strategies as st\n'), ((4258, 4275), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (4269, 4275), True, 'import hypothesis.strategies as st\n'), ((4296, 4340), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(1e-05)', 'max_value': '(0.0001)'}), '(min_value=1e-05, max_value=0.0001)\n', (4305, 4340), True, 'import hypothesis.strategies as st\n'), ((4356, 4389), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (["['NCHW', 'NHWC']"], {}), "(['NCHW', 'NHWC'])\n", (4371, 4389), True, 'import hypothesis.strategies as st\n'), ((595, 632), 'numpy.var', 'np.var', (['X'], {'axis': '(2, 3)', 'keepdims': '(True)'}), '(X, axis=(2, 3), keepdims=True)\n', (601, 632), True, 'import numpy as np\n'), ((1124, 1161), 'numpy.var', 'np.var', (['X'], {'axis': '(1, 3)', 'keepdims': '(True)'}), '(X, axis=(1, 3), keepdims=True)\n', (1130, 1161), True, 'import numpy as np\n'), ((2142, 2160), 'numpy.random.randn', 'np.random.randn', (['C'], {}), '(C)\n', (2157, 2160), True, 'import numpy as np\n'), ((2195, 2213), 'numpy.random.randn', 'np.random.randn', (['C'], {}), '(C)\n', (2210, 2213), True, 'import numpy as np\n'), ((3546, 3564), 'numpy.random.randn', 'np.random.randn', (['C'], {}), '(C)\n', (3561, 3564), True, 'import numpy as np\n'), ((3599, 3617), 'numpy.random.randn', 'np.random.randn', (['C'], {}), '(C)\n', (3614, 3617), True, 'import numpy as np\n'), ((4768, 4792), 'numpy.arange', 'np.arange', (['(N * C * H * W)'], {}), '(N * C * H * W)\n', (4777, 4792), True, 'import numpy as np\n'), ((4979, 4997), 'numpy.random.randn', 'np.random.randn', (['C'], {}), '(C)\n', (4994, 4997), True, 'import numpy as np\n'), ((5032, 5050), 'numpy.random.randn', 'np.random.randn', (['C'], {}), '(C)\n', (5047, 5050), True, 'import numpy as np\n'), ((1990, 2017), 'numpy.random.randn', 'np.random.randn', (['N', 'C', 'H', 'W'], {}), '(N, C, H, W)\n', (2005, 2017), True, 'import numpy as np\n'), ((2073, 2100), 'numpy.random.randn', 'np.random.randn', (['N', 'H', 'W', 'C'], {}), '(N, H, W, C)\n', (2088, 2100), True, 'import numpy as np\n'), ((3388, 3418), 'numpy.random.randn', 'np.random.randn', (['N', 'C', 'T', 'H', 'W'], {}), '(N, C, T, H, W)\n', (3403, 3418), True, 'import numpy as np\n'), ((3474, 3504), 'numpy.random.randn', 'np.random.randn', (['N', 'T', 'H', 'W', 'C'], {}), '(N, T, H, W, C)\n', (3489, 3504), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by <NAME>
# 2017-03-03
"""論文[1]に従い六角格子内の1点をその六角格子セルを代表する点とみなし,
隣接する6つのセルを代表する点を繋ぐことでランダムな三角格子を生成する
[1] https://www.jstage.jst.go.jp/article/journalcpij/44.3/0/44.3_799/_pdf
"""
import numpy as np
def pick_param():
"""Pick up random point from hex region"""
# -1 <= p <= 1
p = 2. * np.random.rand() - 1.
# -1 <= q <= 1
q = 2. * np.random.rand() - 1.
if p+q >= -1 and p+q <= 1:
return p, q
else:
return pick_param()
def randomize(lattice):
X, Y = lattice.coordinates_x, lattice.coordinates_y
ab = np.array([pick_param() for n in range(X.shape[0])])
xy = np.dot(ab,
np.array([
[1., 0.],
[0.5, np.sqrt(3)/2]
])
)
X, Y = X + 0.5 * lattice.dx * xy.T[0], Y + 0.5 * lattice.dx * xy.T[1]
lattice.coordinates_x, lattice.coordinates_y = X, Y
return X, Y
if __name__ == '__main__':
from triangular import LatticeTriangular as LT
import matplotlib.pyplot as plt
import matplotlib.tri as tri
fig, ax = plt.subplots()
Lx, Ly = 50, 30
# Lx, Ly = 10, 10
lattice = LT(
np.zeros((Lx, Ly), dtype=np.int),
scale=float(max(Lx, Ly)),
boundary={'h': 'periodic',
'v': 'reflective'}
)
lattice_X = lattice.coordinates_x
lattice_Y = lattice.coordinates_y
X_min, X_max = min(lattice_X) - 0.7, max(lattice_X) + 0.7
Y_min, Y_max = min(lattice_Y) - 0.5, max(lattice_Y) + 0.5
ax.set_xlim([X_min, X_max])
ax.set_ylim([Y_min, Y_max])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
_triang = tri.Triangulation(lattice_X, lattice_Y)
ax.scatter(lattice_X, lattice_Y, s=5)
randomize(lattice)
triang = tri.Triangulation(lattice.coordinates_x, lattice.coordinates_y, triangles=_triang.triangles)
# triang = tri.Triangulation(lattice.coordinates_x, lattice.coordinates_y)
# ax.triplot(triang, color='#d5d5d5', lw=0.5)
ax.triplot(triang, color='k', lw=0.5)
# trilattice.lattice[neighbors] = 2
# colorseq = np.zeros((Lx, Ly))
# colorseq[trilattice.lattice == 2] = 0.9
# colorseq[trilattice.lattice == 0] = 0.
# colorseq[trilattice.lattice == 1] = 0.5
# X, Y = trilattice.to_realspace(scale=20, x0=-10, y0=-10)
# import matplotlib.pyplot as plt
# plt.scatter(X, Y, s=100., c=colorseq)
# plt.show()
plt.show()
| [
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.tri.Triangulation",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1126, 1140), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1138, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1717, 1756), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['lattice_X', 'lattice_Y'], {}), '(lattice_X, lattice_Y)\n', (1734, 1756), True, 'import matplotlib.tri as tri\n'), ((1837, 1934), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['lattice.coordinates_x', 'lattice.coordinates_y'], {'triangles': '_triang.triangles'}), '(lattice.coordinates_x, lattice.coordinates_y, triangles=\n _triang.triangles)\n', (1854, 1934), True, 'import matplotlib.tri as tri\n'), ((2484, 2494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2492, 2494), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1242), 'numpy.zeros', 'np.zeros', (['(Lx, Ly)'], {'dtype': 'np.int'}), '((Lx, Ly), dtype=np.int)\n', (1218, 1242), True, 'import numpy as np\n'), ((353, 369), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (367, 369), True, 'import numpy as np\n'), ((407, 423), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (421, 423), True, 'import numpy as np\n'), ((764, 774), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (771, 774), True, 'import numpy as np\n')] |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
from .tesisfunctions import Plotim,overlay,padVH
import cv2
import numpy as np
#from invariantMoments import centroid,invmoments,normalizedinvariantmoment,bwmoment
from .tesisfunctions import sigmoid,histogram,brightness,getthresh,threshold,pad,graphpolygontest, polygontest
#http://stackoverflow.com/questions/14725181/speed-up-iteration-over-numpy-arrays-opencv-cv2-image
#http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html
fn1 = r'im1_2.jpg'
#fn1 = r"asift2Result_with_alfa1.png"
#fn1 = r"im_completer_Result2.png"
fore = cv2.imread(fn1)
fore = cv2.resize(fore,(300,300))
name = fn1.split('\\')[-1].split(".")[0]
fore2 = fore.copy()
"""
fore = fore.astype("float")
fb = fore[:,:,0]
fg = fore[:,:,1]
fr = fore[:,:,2]
# threshold retinal area
alfa = -1
beta = 50 # if alfa >0 :if beta = 50 with noise, if beta = 200 without noise
th = 1
kernel = np.ones((100,100),np.uint8)
enhanced = sigmoid(fr,alfa,beta)
thresh = cv2.threshold(enhanced.astype("uint8"),th,1,cv2.THRESH_BINARY_INV)[1]
#dilation = cv2.dilate(thresh,kernel,iterations = 1)
#erosion = cv2.erode(dilation,kernel,iterations = 1)
#closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
#dilation = cv2.dilate(opening,kernel,iterations = 1)
lastthresh = opening
"""
P = brightness(fore)
thresh = getthresh(cv2.resize(P,(300,300)))
print(thresh)
lastthresh=threshold(P,thresh,1,0)
thresh,lastthresh = cv2.threshold(P,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#lastthresh = pad(lastthresh,1)
plotc = Plotim(name + " overlayed lastthresh", overlay(fore.copy(), lastthresh * 255, alpha=lastthresh))
plotc.show()
# find biggest area
contours,hierarchy = cv2.findContours(lastthresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
print("objects: ",len(contours))
index = 0
maxarea = 0
#objectarea = np.sum(lastthresh)
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area>maxarea:
index = i
maxarea = area
print("area contour:",maxarea,"index: ",index)
cnt = contours[index]
print("optaining polygon test...")
polygontest = graphpolygontest((P,cnt)).show()
#DEFECTS
pallet = [[0,0,0],[255,255,255]]
pallet = np.array(pallet,np.uint8)
imdefects = pallet[lastthresh]
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
distances = defects[:,0,3]
two_max = np.argpartition(distances, -2)[-2:] # get indeces of two maximum values
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(imdefects,start,end,[0,255,0],2)
cv2.circle(imdefects,far,5,[0,0,255],-1)
#SEPARATING LINE
points = defects[:,0,2]
x1,y1 = tuple(cnt[points[two_max[0]]][0])
x2,y2 = tuple(cnt[points[two_max[1]]][0])
m = old_div((y2-y1),float(x2-x1))
b = int(y1-x1*m)
# find interception with xf and yf axis
if b>imdefects.shape[0]: # if start outside yf
start = int(old_div((imdefects.shape[0]-b),m)),imdefects.shape[0] # (yf-b)/m, yf
else: # if start inside yf
start = 0,b # 0,y
y = int(m*imdefects.shape[1]+b) # m*xf+b
if y<0: # if end outside yf
end = int(old_div(-b,m)),0# x,0
else: # if end inside yf
end = imdefects.shape[1],y # xf, y
cv2.line(imdefects,start,end,[0,0,100],2)
plotc = Plotim(name + " defects", imdefects)
plotc.show()
#ROI
ROI = np.zeros(P.shape,dtype=np.uint8)
cv2.drawContours(ROI,[cnt],0,1,-1)
plotc = Plotim(name + " ROI", ROI)
plotc.show()
M = cv2.moments(cnt) # find moments
#M2 = invmoments(ROI,Area=None,center=None)
#cx = int(M['m10']/M['m00'])
#cy = int(M['m01']/M['m00'])
#x,y = centroid(ROI,maxarea)
#normalizedinvariantmoment(ROI,maxarea,0,0,x,y)
#n00 = bwmoment(ROI,0,0,cx,cy)
#print "(cx,cy)",(cx,cy)
#print "x,y",x,y
#cv2.circle(fore, (cx,cy), 10, (0, 0, 255), -1, 8)
#cv2.circle(fore, (int(x),int(y)), 10, (0, 255, 255), -1, 8)
cv2.drawContours(fore,[cnt],0,(0, 0, 255),2)
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(fore,ellipse,(0,255,0),2)
plotc = Plotim(name + " description", fore)
plotc.show()
mask = np.ones(P.shape,dtype=np.uint8)
cv2.ellipse(mask,ellipse,0,-1)
fore2[mask>0]=0
plotc = Plotim(name + " result", fore2)
plotc.show()
cv2.imwrite("mask_"+name+".png",fore2)
"""
# Saving the objects:
import pickle
data = {"thresh":thresh,"lastthresh":lastthresh,"cnt":cnt,"ellipse":ellipse,"polygontest":polygontest}
with open("masks_"+name+'.pickle', 'w') as f:
pickle.dump(data, f)""" | [
"cv2.convexityDefects",
"past.utils.old_div",
"numpy.array",
"builtins.range",
"cv2.ellipse",
"cv2.fitEllipse",
"cv2.threshold",
"cv2.line",
"cv2.contourArea",
"cv2.drawContours",
"numpy.ones",
"cv2.circle",
"cv2.moments",
"cv2.resize",
"cv2.imread",
"cv2.convexHull",
"cv2.imwrite",
... | [((781, 796), 'cv2.imread', 'cv2.imread', (['fn1'], {}), '(fn1)\n', (791, 796), False, 'import cv2\n'), ((804, 832), 'cv2.resize', 'cv2.resize', (['fore', '(300, 300)'], {}), '(fore, (300, 300))\n', (814, 832), False, 'import cv2\n'), ((1683, 1742), 'cv2.threshold', 'cv2.threshold', (['P', '(0)', '(1)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(P, 0, 1, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1696, 1742), False, 'import cv2\n'), ((2429, 2455), 'numpy.array', 'np.array', (['pallet', 'np.uint8'], {}), '(pallet, np.uint8)\n', (2437, 2455), True, 'import numpy as np\n'), ((2494, 2533), 'cv2.convexHull', 'cv2.convexHull', (['cnt'], {'returnPoints': '(False)'}), '(cnt, returnPoints=False)\n', (2508, 2533), False, 'import cv2\n'), ((2545, 2576), 'cv2.convexityDefects', 'cv2.convexityDefects', (['cnt', 'hull'], {}), '(cnt, hull)\n', (2565, 2576), False, 'import cv2\n'), ((2695, 2718), 'builtins.range', 'range', (['defects.shape[0]'], {}), '(defects.shape[0])\n', (2700, 2718), False, 'from builtins import range\n'), ((3489, 3536), 'cv2.line', 'cv2.line', (['imdefects', 'start', 'end', '[0, 0, 100]', '(2)'], {}), '(imdefects, start, end, [0, 0, 100], 2)\n', (3497, 3536), False, 'import cv2\n'), ((3602, 3635), 'numpy.zeros', 'np.zeros', (['P.shape'], {'dtype': 'np.uint8'}), '(P.shape, dtype=np.uint8)\n', (3610, 3635), True, 'import numpy as np\n'), ((3635, 3673), 'cv2.drawContours', 'cv2.drawContours', (['ROI', '[cnt]', '(0)', '(1)', '(-1)'], {}), '(ROI, [cnt], 0, 1, -1)\n', (3651, 3673), False, 'import cv2\n'), ((3722, 3738), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (3733, 3738), False, 'import cv2\n'), ((4118, 4166), 'cv2.drawContours', 'cv2.drawContours', (['fore', '[cnt]', '(0)', '(0, 0, 255)', '(2)'], {}), '(fore, [cnt], 0, (0, 0, 255), 2)\n', (4134, 4166), False, 'import cv2\n'), ((4173, 4192), 'cv2.fitEllipse', 'cv2.fitEllipse', (['cnt'], {}), '(cnt)\n', (4187, 4192), False, 'import cv2\n'), ((4193, 4235), 'cv2.ellipse', 'cv2.ellipse', (['fore', 'ellipse', '(0, 255, 0)', '(2)'], {}), '(fore, ellipse, (0, 255, 0), 2)\n', (4204, 4235), False, 'import cv2\n'), ((4297, 4329), 'numpy.ones', 'np.ones', (['P.shape'], {'dtype': 'np.uint8'}), '(P.shape, dtype=np.uint8)\n', (4304, 4329), True, 'import numpy as np\n'), ((4329, 4362), 'cv2.ellipse', 'cv2.ellipse', (['mask', 'ellipse', '(0)', '(-1)'], {}), '(mask, ellipse, 0, -1)\n', (4340, 4362), False, 'import cv2\n'), ((4429, 4472), 'cv2.imwrite', 'cv2.imwrite', (["('mask_' + name + '.png')", 'fore2'], {}), "('mask_' + name + '.png', fore2)\n", (4440, 4472), False, 'import cv2\n'), ((1589, 1614), 'cv2.resize', 'cv2.resize', (['P', '(300, 300)'], {}), '(P, (300, 300))\n', (1599, 1614), False, 'import cv2\n'), ((2134, 2162), 'cv2.contourArea', 'cv2.contourArea', (['contours[i]'], {}), '(contours[i])\n', (2149, 2162), False, 'import cv2\n'), ((2613, 2643), 'numpy.argpartition', 'np.argpartition', (['distances', '(-2)'], {}), '(distances, -2)\n', (2628, 2643), True, 'import numpy as np\n'), ((2834, 2881), 'cv2.line', 'cv2.line', (['imdefects', 'start', 'end', '[0, 255, 0]', '(2)'], {}), '(imdefects, start, end, [0, 255, 0], 2)\n', (2842, 2881), False, 'import cv2\n'), ((2880, 2926), 'cv2.circle', 'cv2.circle', (['imdefects', 'far', '(5)', '[0, 0, 255]', '(-1)'], {}), '(imdefects, far, 5, [0, 0, 255], -1)\n', (2890, 2926), False, 'import cv2\n'), ((3201, 3235), 'past.utils.old_div', 'old_div', (['(imdefects.shape[0] - b)', 'm'], {}), '(imdefects.shape[0] - b, m)\n', (3208, 3235), False, 'from past.utils import old_div\n'), ((3402, 3416), 'past.utils.old_div', 'old_div', (['(-b)', 'm'], {}), '(-b, m)\n', (3409, 3416), False, 'from past.utils import old_div\n')] |
import numpy as np
import os
import pandas as pd
import micro_dl.utils.tile_utils as tile_utils
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.image_utils as image_utils
import micro_dl.utils.mp_utils as mp_utils
class ImageTilerUniform:
"""Tiles all images in a dataset"""
def __init__(self,
input_dir,
output_dir,
tile_size=[256, 256],
step_size=[64, 64],
depths=1,
time_ids=-1,
channel_ids=-1,
normalize_channels=-1,
slice_ids=-1,
pos_ids=-1,
hist_clip_limits=None,
flat_field_dir=None,
image_format='zyx',
num_workers=4,
int2str_len=3,
tile_3d=False):
"""
Tiles images.
If tile_dir already exist, it will check which channels are already
tiled, get indices from them and tile from indices only on the channels
not already present.
:param str input_dir: Directory with frames to be tiled
:param str output_dir: Base output directory
:param list tile_size: size of the blocks to be cropped
from the image
:param list step_size: size of the window shift. In case
of no overlap, the step size is tile_size. If overlap, step_size <
tile_size
:param int/list depths: The z depth for generating stack training data
Default 1 assumes 2D data for all channels to be tiled.
For cases where input and target shapes are not the same (e.g. stack
to 2D) you should specify depths for each channel in tile.channels.
:param list/int time_ids: Tile given timepoint indices
:param list/int channel_ids: Tile images in the given channel indices
default=-1, tile all channels.
:param list/int normalize_channels: list of booleans matching channel_ids
indicating if channel should be normalized or not.
:param int slice_ids: Index of which focal plane acquisition to
use (for 2D). default=-1 for the whole z-stack
:param int pos_ids: Position (FOV) indices to use
:param list hist_clip_limits: lower and upper percentiles used for
histogram clipping.
:param str flat_field_dir: Flatfield directory. None if no flatfield
correction
:param str image_format: zyx (preferred) or xyz
:param int num_workers: number of workers for multiprocessing
:param int int2str_len: number of characters for each idx to be used
in file names
:param bool tile_3d: Whether tiling is 3D or 2D
"""
self.input_dir = input_dir
self.output_dir = output_dir
self.normalize_channels = normalize_channels
self.depths = depths
self.tile_size = tile_size
self.step_size = step_size
self.hist_clip_limits = hist_clip_limits
self.image_format = image_format
assert self.image_format in {'zyx', 'xyz'}, \
'Data format must be zyx or xyz'
self.num_workers = num_workers
self.int2str_len = int2str_len
self.tile_3d = tile_3d
self.str_tile_step = 'tiles_{}_step_{}'.format(
'-'.join([str(val) for val in tile_size]),
'-'.join([str(val) for val in step_size]),
)
self.tile_dir = os.path.join(
output_dir,
self.str_tile_step,
)
# If tile dir already exist, only tile channels not already present
self.tiles_exist = False
# If tile dir already exist, things could get messy because we don't
# have any checks in place for how to add to existing tiles
try:
os.makedirs(self.tile_dir, exist_ok=False)
# make dir for saving indiv meta per image, could be used for
# tracking job success / fail
os.makedirs(os.path.join(self.tile_dir, 'meta_dir'),
exist_ok=False)
except FileExistsError as e:
print("Tile dir exists. Only add untiled channels.")
self.tiles_exist = True
# make dir for saving individual meta per image, could be used for
# tracking job success / fail
os.makedirs(os.path.join(self.tile_dir, 'meta_dir'),
exist_ok=True)
self.flat_field_dir = flat_field_dir
self.frames_metadata = aux_utils.read_meta(self.input_dir)
# Get metadata indices
metadata_ids, _ = aux_utils.validate_metadata_indices(
frames_metadata=self.frames_metadata,
time_ids=time_ids,
channel_ids=channel_ids,
slice_ids=slice_ids,
pos_ids=pos_ids,
uniform_structure=True
)
self.channel_ids = metadata_ids['channel_ids']
self.normalize_channels = normalize_channels
self.time_ids = metadata_ids['time_ids']
self.slice_ids = metadata_ids['slice_ids']
self.pos_ids = metadata_ids['pos_ids']
self.normalize_channels = normalize_channels
# Determine which channels should be normalized in tiling
if self.normalize_channels == -1:
self.normalize_channels = [True] * len(self.channel_ids)
else:
assert len(self.normalize_channels) == len(self.channel_ids),\
"Channel ids {} and normalization list {} mismatch".format(
self.channel_ids,
self.normalize_channels,
)
# If more than one depth is specified, length must match channel ids
if isinstance(self.depths, list):
assert len(self.depths) == len(self.channel_ids),\
"depths ({}) and channels ({}) length mismatch".format(
self.depths, self.channel_ids,
)
# Get max of all specified depths
max_depth = max(self.depths)
# Convert channels + depths to dict for lookup
self.channel_depth = dict(zip(self.channel_ids, self.depths))
else:
# If depth is scalar, make depth the same for all channels
max_depth = self.depths
self.channel_depth = dict(zip(
self.channel_ids,
[self.depths] * len(self.channel_ids)),
)
# Adjust slice margins
self.slice_ids = aux_utils.adjust_slice_margins(
slice_ids=self.slice_ids,
depth=max_depth,
)
def get_tile_dir(self):
"""
Return directory containing tiles
:return str tile_dir: Directory with tiles
"""
return self.tile_dir
def _get_dataframe(self):
"""
Creates an empty dataframe with metadata column names for tiles. It's
the same names as for frames, but with channel_name removed and with
the addition of row_start and col_start.
TODO: Should I also save row_end and col_end while I'm at it?
Might be useful if we want to recreate tiles from a previous preprocessing
with mask run... Or just retrieve tile_size from preprocessing_info...
This is one of the functions that will have to be adapted once tested on
3D data.
:return dataframe tiled_metadata
"""
return pd.DataFrame(columns=[
"channel_idx",
"slice_idx",
"time_idx",
"file_name",
"pos_idx",
"row_start",
"col_start"])
def _get_flat_field(self, channel_idx):
"""
Get flat field image for a given channel index
:param int channel_idx: Channel index
:return np.array flat_field_im: flat field image for channel
"""
flat_field_im = None
if self.flat_field_dir is not None:
flat_field_im = np.load(
os.path.join(
self.flat_field_dir,
'flat-field_channel-{}.npy'.format(channel_idx),
)
)
return flat_field_im
def _get_tile_indices(self, tiled_meta,
time_idx,
channel_idx,
pos_idx,
slice_idx):
"""Get the tile indices from saved meta data
:param pd.DataFrame tiled_meta: DF with image level meta info
:param int time_idx: time index for current image
:param int channel_idx: channel index for current image
:param int pos_idx: position / sample index for current image
:param int slice_idx: slice index of current image
:return list tile_indices: list of tile indices
"""
# Get tile indices from one channel only
c = tiled_meta['channel_idx'] == channel_idx
z = tiled_meta['slice_idx'] == slice_idx
p = tiled_meta['pos_idx'] == pos_idx
t = tiled_meta['time_idx'] == time_idx
channel_meta = tiled_meta[c & z & p & t]
# Get tile_indices
if self.tile_3d:
tile_indices = pd.concat([
channel_meta['row_start'],
channel_meta['row_start'].add(self.tile_size[0]),
channel_meta['col_start'],
channel_meta['col_start'].add(self.tile_size[1]),
channel_meta['slice_start'],
channel_meta['slice_start'].add(self.tile_size[2])
], axis=1)
else:
tile_indices = pd.concat([
channel_meta['row_start'],
channel_meta['row_start'].add(self.tile_size[0]),
channel_meta['col_start'],
channel_meta['col_start'].add(self.tile_size[1]),
], axis=1)
# Match list format similar to tile_image
tile_indices = tile_indices.values.tolist()
return tile_indices
def _get_tiled_data(self):
"""
If tile directory already exists, check which channels have been
processed and only tile new channels.
:return dataframe tiled_meta: Metadata with previously tiled channels
:return list of lists tile_indices: Nbr tiles x 4 indices with row
start + stop and column start + stop indices
"""
if self.tiles_exist:
tiled_meta = aux_utils.read_meta(self.tile_dir)
# Find untiled channels
tiled_channels = np.unique(tiled_meta['channel_idx'])
new_channels = list(set(self.channel_ids) -
set(tiled_channels))
if len(new_channels) == 0:
print('All channels in config have already been tiled')
return
self.channel_ids = new_channels
tile_indices = self._get_tile_indices(
tiled_meta=tiled_meta,
time_idx=self.time_ids[0],
channel_idx=tiled_channels[0],
pos_idx=self.pos_ids[0],
slice_idx=self.slice_ids[0]
)
else:
tiled_meta = self._get_dataframe()
tile_indices = None
return tiled_meta, tile_indices
def _get_input_fnames(self,
time_idx,
channel_idx,
slice_idx,
pos_idx,
mask_dir=None):
"""Get input_fnames
:param int time_idx: Time index
:param int channel_idx: Channel index
:param int slice_idx: Slice (z) index
:param int pos_idx: Position (FOV) index
:param str mask_dir: Directory containing masks
:return: list of input fnames
"""
if mask_dir is None:
depth = self.channel_depth[channel_idx]
else:
depth = self.mask_depth
margin = 0 if depth == 1 else depth // 2
im_fnames = []
for z in range(slice_idx - margin, slice_idx + margin + 1):
if mask_dir is not None:
mask_meta = aux_utils.read_meta(mask_dir)
meta_idx = aux_utils.get_meta_idx(
mask_meta,
time_idx,
channel_idx,
z,
pos_idx,
)
file_path = os.path.join(
mask_dir,
mask_meta.loc[meta_idx, 'file_name'],
)
else:
meta_idx = aux_utils.get_meta_idx(
self.frames_metadata,
time_idx,
channel_idx,
z,
pos_idx,
)
file_path = os.path.join(
self.input_dir,
self.frames_metadata.loc[meta_idx, 'file_name'],
)
# check if file_path exists
im_fnames.append(file_path)
return im_fnames
def get_crop_tile_args(self,
channel_idx,
time_idx,
slice_idx,
pos_idx,
task_type,
tile_indices=None,
mask_dir=None,
min_fraction=None,
normalize_im=False):
"""Gather arguments for cropping or tiling
:param int channel_idx: channel index for current image
:param int time_idx: time index for current image
:param int slice_idx: slice index for current image
:param int pos_idx: position / sample index for current image
:param str task_type: crop or tile
:param list tile_indices: list of tile indices
:param str mask_dir: dir containing image level masks
:param float min_fraction: min foreground volume fraction for use tile
:param bool normalize_im: indicator to normalize image based on z-score or not
:return list cur_args: tuple of arguments for tiling
list tile_indices: tile indices for current image
"""
input_fnames = self._get_input_fnames(
time_idx=time_idx,
channel_idx=channel_idx,
slice_idx=slice_idx,
pos_idx=pos_idx,
mask_dir=mask_dir
)
# no flat field correction for mask
flat_field_fname = None
hist_clip_limits = None
is_mask = False
if mask_dir is None:
if self.flat_field_dir is not None:
flat_field_fname = os.path.join(
self.flat_field_dir,
'flat-field_channel-{}.npy'.format(channel_idx)
)
# no hist_clipping for mask as mask is bool
if self.hist_clip_limits is not None:
hist_clip_limits = tuple(
self.hist_clip_limits
)
else:
# Using masks, need to make sure they're bool
is_mask = True
if task_type == 'crop':
cur_args = (tuple(input_fnames),
flat_field_fname,
hist_clip_limits,
time_idx,
channel_idx,
pos_idx,
slice_idx,
tuple(tile_indices),
self.image_format,
self.tile_dir,
self.int2str_len,
is_mask,
self.tile_3d,
normalize_im)
elif task_type == 'tile':
cur_args = (tuple(input_fnames),
flat_field_fname,
hist_clip_limits,
time_idx,
channel_idx,
pos_idx,
slice_idx,
self.tile_size,
self.step_size,
min_fraction,
self.image_format,
self.tile_dir,
self.int2str_len,
is_mask,
normalize_im)
return cur_args
def tile_stack(self):
"""
Tiles images in the specified channels.
https://research.wmz.ninja/articles/2018/03/
on-sharing-large-arrays-when-using-pythons-multiprocessing.html
Saves a csv with columns
['time_idx', 'channel_idx', 'pos_idx','slice_idx', 'file_name']
for all the tiles
"""
# Get or create tiled metadata and tile indices
prev_tiled_metadata, tile_indices = self._get_tiled_data()
tiled_meta0 = None
fn_args = []
for channel_idx in self.channel_ids:
# Find channel index position in channel_ids list
list_idx = self.channel_ids.index(channel_idx)
# Perform flatfield correction if flatfield dir is specified
flat_field_im = self._get_flat_field(channel_idx=channel_idx)
for slice_idx in self.slice_ids:
for time_idx in self.time_ids:
for pos_idx in self.pos_ids:
if tile_indices is None:
# tile and save first image
# get meta data and tile_indices
im = image_utils.preprocess_imstack(
frames_metadata=self.frames_metadata,
input_dir=self.input_dir,
depth=self.channel_depth[channel_idx],
time_idx=time_idx,
channel_idx=channel_idx,
slice_idx=slice_idx,
pos_idx=pos_idx,
flat_field_im=flat_field_im,
hist_clip_limits=self.hist_clip_limits,
normalize_im=self.normalize_channels[list_idx],
)
save_dict = {'time_idx': time_idx,
'channel_idx': channel_idx,
'pos_idx': pos_idx,
'slice_idx': slice_idx,
'save_dir': self.tile_dir,
'image_format': self.image_format,
'int2str_len': self.int2str_len}
tiled_meta0, tile_indices = \
tile_utils.tile_image(
input_image=im,
tile_size=self.tile_size,
step_size=self.step_size,
return_index=True,
save_dict=save_dict,
)
else:
cur_args = self.get_crop_tile_args(
channel_idx,
time_idx,
slice_idx,
pos_idx,
task_type='crop',
tile_indices=tile_indices,
normalize_im=self.normalize_channels[list_idx],
)
fn_args.append(cur_args)
tiled_meta_df_list = mp_utils.mp_crop_save(
fn_args,
workers=self.num_workers,
)
if tiled_meta0 is not None:
tiled_meta_df_list.append(tiled_meta0)
tiled_metadata = pd.concat(tiled_meta_df_list, ignore_index=True)
if self.tiles_exist:
tiled_metadata.reset_index(drop=True, inplace=True)
prev_tiled_metadata.reset_index(drop=True, inplace=True)
tiled_metadata = pd.concat(
[prev_tiled_metadata, tiled_metadata],
ignore_index=True,
)
# Finally, save all the metadata
tiled_metadata = tiled_metadata.sort_values(by=['file_name'])
tiled_metadata.to_csv(
os.path.join(self.tile_dir, "frames_meta.csv"),
sep=",",
)
def tile_mask_stack(self,
mask_dir,
mask_channel,
min_fraction,
mask_depth=1):
"""
Tiles images in the specified channels assuming there are masks
already created in mask_dir. Only tiles above a certain fraction
of foreground in mask tile will be saved and added to metadata.
Saves a csv with columns ['time_idx', 'channel_idx', 'pos_idx',
'slice_idx', 'file_name'] for all the tiles
:param str mask_dir: Directory containing masks
:param int mask_channel: Channel number assigned to mask
:param float min_fraction: Minimum fraction of foreground in tiled masks
:param int mask_depth: Depth for mask channel
"""
# mask depth has to match input or ouput channel depth
assert mask_depth <= max(self.channel_depth.values())
self.mask_depth = mask_depth
# tile and save masks
# if mask channel is already tiled
if self.tiles_exist and mask_channel in self.channel_ids:
mask_meta_df = pd.read_csv(
os.path.join(self.tile_dir, 'frames_meta.csv')
)
else:
# TODO: different masks across timepoints (but MaskProcessor
# generates mask for tp=0 only)
mask_fn_args = []
for slice_idx in self.slice_ids:
for time_idx in self.time_ids:
for pos_idx in self.pos_ids:
# Evaluate mask, then channels.The masks will influence
# tiling indices, so it's not allowed to add masks to
# existing tiled data sets (indices will be retrieved
# from existing meta)
cur_args = self.get_crop_tile_args(
channel_idx=mask_channel,
time_idx=time_idx,
slice_idx=slice_idx,
pos_idx=pos_idx,
task_type='tile',
mask_dir=mask_dir,
min_fraction=min_fraction,
normalize_im=False,
)
mask_fn_args.append(cur_args)
# tile_image uses min_fraction assuming input_image is a bool
mask_meta_df_list = mp_utils.mp_tile_save(
mask_fn_args,
workers=self.num_workers,
)
mask_meta_df = pd.concat(mask_meta_df_list, ignore_index=True)
# Finally, save all the metadata
mask_meta_df = mask_meta_df.sort_values(by=['file_name'])
mask_meta_df.to_csv(
os.path.join(self.tile_dir, 'frames_meta.csv'),
sep=',',
)
# remove mask_channel from self.channel_ids if included
_ = [self.channel_ids.pop(idx)
for idx, val in enumerate(self.channel_ids)
if val == mask_channel]
_ = [self.normalize_channels.pop(idx)
for idx, val in enumerate(self.channel_ids)
if val == mask_channel]
fn_args = []
for slice_idx in self.slice_ids:
for time_idx in self.time_ids:
for pos_idx in np.unique(self.frames_metadata["pos_idx"]):
# Loop through all channels and tile from indices
cur_tile_indices = self._get_tile_indices(
tiled_meta=mask_meta_df,
time_idx=time_idx,
channel_idx=mask_channel,
pos_idx=pos_idx,
slice_idx=slice_idx
)
if np.any(cur_tile_indices):
for i, channel_idx in enumerate(self.channel_ids):
cur_args = self.get_crop_tile_args(
channel_idx,
time_idx,
slice_idx,
pos_idx,
task_type='crop',
tile_indices=cur_tile_indices,
normalize_im=self.normalize_channels[i],
)
fn_args.append(cur_args)
tiled_meta_df_list = mp_utils.mp_crop_save(
fn_args,
workers=self.num_workers,
)
tiled_metadata = pd.concat(tiled_meta_df_list, ignore_index=True)
# If there's been tiling done already, add to existing metadata
prev_tiled_metadata = aux_utils.read_meta(self.tile_dir)
tiled_metadata = pd.concat(
[prev_tiled_metadata.reset_index(drop=True),
tiled_metadata.reset_index(drop=True)],
axis=0,
ignore_index=True,
)
# Finally, save all the metadata
tiled_metadata = tiled_metadata.sort_values(by=['file_name'])
tiled_metadata.to_csv(
os.path.join(self.tile_dir, "frames_meta.csv"),
sep=',',
)
| [
"micro_dl.utils.mp_utils.mp_crop_save",
"micro_dl.utils.aux_utils.validate_metadata_indices",
"micro_dl.utils.aux_utils.read_meta",
"numpy.unique",
"os.makedirs",
"micro_dl.utils.aux_utils.get_meta_idx",
"micro_dl.utils.image_utils.preprocess_imstack",
"os.path.join",
"numpy.any",
"micro_dl.utils.... | [((3507, 3551), 'os.path.join', 'os.path.join', (['output_dir', 'self.str_tile_step'], {}), '(output_dir, self.str_tile_step)\n', (3519, 3551), False, 'import os\n'), ((4556, 4591), 'micro_dl.utils.aux_utils.read_meta', 'aux_utils.read_meta', (['self.input_dir'], {}), '(self.input_dir)\n', (4575, 4591), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((4649, 4836), 'micro_dl.utils.aux_utils.validate_metadata_indices', 'aux_utils.validate_metadata_indices', ([], {'frames_metadata': 'self.frames_metadata', 'time_ids': 'time_ids', 'channel_ids': 'channel_ids', 'slice_ids': 'slice_ids', 'pos_ids': 'pos_ids', 'uniform_structure': '(True)'}), '(frames_metadata=self.frames_metadata,\n time_ids=time_ids, channel_ids=channel_ids, slice_ids=slice_ids,\n pos_ids=pos_ids, uniform_structure=True)\n', (4684, 4836), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((6519, 6592), 'micro_dl.utils.aux_utils.adjust_slice_margins', 'aux_utils.adjust_slice_margins', ([], {'slice_ids': 'self.slice_ids', 'depth': 'max_depth'}), '(slice_ids=self.slice_ids, depth=max_depth)\n', (6549, 6592), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((7449, 7565), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['channel_idx', 'slice_idx', 'time_idx', 'file_name', 'pos_idx',\n 'row_start', 'col_start']"}), "(columns=['channel_idx', 'slice_idx', 'time_idx', 'file_name',\n 'pos_idx', 'row_start', 'col_start'])\n", (7461, 7565), True, 'import pandas as pd\n'), ((19693, 19749), 'micro_dl.utils.mp_utils.mp_crop_save', 'mp_utils.mp_crop_save', (['fn_args'], {'workers': 'self.num_workers'}), '(fn_args, workers=self.num_workers)\n', (19714, 19749), True, 'import micro_dl.utils.mp_utils as mp_utils\n'), ((19897, 19945), 'pandas.concat', 'pd.concat', (['tiled_meta_df_list'], {'ignore_index': '(True)'}), '(tiled_meta_df_list, ignore_index=True)\n', (19906, 19945), True, 'import pandas as pd\n'), ((24922, 24978), 'micro_dl.utils.mp_utils.mp_crop_save', 'mp_utils.mp_crop_save', (['fn_args'], {'workers': 'self.num_workers'}), '(fn_args, workers=self.num_workers)\n', (24943, 24978), True, 'import micro_dl.utils.mp_utils as mp_utils\n'), ((25039, 25087), 'pandas.concat', 'pd.concat', (['tiled_meta_df_list'], {'ignore_index': '(True)'}), '(tiled_meta_df_list, ignore_index=True)\n', (25048, 25087), True, 'import pandas as pd\n'), ((25190, 25224), 'micro_dl.utils.aux_utils.read_meta', 'aux_utils.read_meta', (['self.tile_dir'], {}), '(self.tile_dir)\n', (25209, 25224), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((3867, 3909), 'os.makedirs', 'os.makedirs', (['self.tile_dir'], {'exist_ok': '(False)'}), '(self.tile_dir, exist_ok=False)\n', (3878, 3909), False, 'import os\n'), ((4403, 4442), 'os.path.join', 'os.path.join', (['self.tile_dir', '"""meta_dir"""'], {}), "(self.tile_dir, 'meta_dir')\n", (4415, 4442), False, 'import os\n'), ((10431, 10465), 'micro_dl.utils.aux_utils.read_meta', 'aux_utils.read_meta', (['self.tile_dir'], {}), '(self.tile_dir)\n', (10450, 10465), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((10531, 10567), 'numpy.unique', 'np.unique', (["tiled_meta['channel_idx']"], {}), "(tiled_meta['channel_idx'])\n", (10540, 10567), True, 'import numpy as np\n'), ((20137, 20204), 'pandas.concat', 'pd.concat', (['[prev_tiled_metadata, tiled_metadata]'], {'ignore_index': '(True)'}), '([prev_tiled_metadata, tiled_metadata], ignore_index=True)\n', (20146, 20204), True, 'import pandas as pd\n'), ((20406, 20452), 'os.path.join', 'os.path.join', (['self.tile_dir', '"""frames_meta.csv"""'], {}), "(self.tile_dir, 'frames_meta.csv')\n", (20418, 20452), False, 'import os\n'), ((22930, 22991), 'micro_dl.utils.mp_utils.mp_tile_save', 'mp_utils.mp_tile_save', (['mask_fn_args'], {'workers': 'self.num_workers'}), '(mask_fn_args, workers=self.num_workers)\n', (22951, 22991), True, 'import micro_dl.utils.mp_utils as mp_utils\n'), ((23066, 23113), 'pandas.concat', 'pd.concat', (['mask_meta_df_list'], {'ignore_index': '(True)'}), '(mask_meta_df_list, ignore_index=True)\n', (23075, 23113), True, 'import pandas as pd\n'), ((25586, 25632), 'os.path.join', 'os.path.join', (['self.tile_dir', '"""frames_meta.csv"""'], {}), "(self.tile_dir, 'frames_meta.csv')\n", (25598, 25632), False, 'import os\n'), ((4050, 4089), 'os.path.join', 'os.path.join', (['self.tile_dir', '"""meta_dir"""'], {}), "(self.tile_dir, 'meta_dir')\n", (4062, 4089), False, 'import os\n'), ((12141, 12170), 'micro_dl.utils.aux_utils.read_meta', 'aux_utils.read_meta', (['mask_dir'], {}), '(mask_dir)\n', (12160, 12170), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((12198, 12266), 'micro_dl.utils.aux_utils.get_meta_idx', 'aux_utils.get_meta_idx', (['mask_meta', 'time_idx', 'channel_idx', 'z', 'pos_idx'], {}), '(mask_meta, time_idx, channel_idx, z, pos_idx)\n', (12220, 12266), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((12414, 12474), 'os.path.join', 'os.path.join', (['mask_dir', "mask_meta.loc[meta_idx, 'file_name']"], {}), "(mask_dir, mask_meta.loc[meta_idx, 'file_name'])\n", (12426, 12474), False, 'import os\n'), ((12579, 12658), 'micro_dl.utils.aux_utils.get_meta_idx', 'aux_utils.get_meta_idx', (['self.frames_metadata', 'time_idx', 'channel_idx', 'z', 'pos_idx'], {}), '(self.frames_metadata, time_idx, channel_idx, z, pos_idx)\n', (12601, 12658), True, 'import micro_dl.utils.aux_utils as aux_utils\n'), ((12806, 12883), 'os.path.join', 'os.path.join', (['self.input_dir', "self.frames_metadata.loc[meta_idx, 'file_name']"], {}), "(self.input_dir, self.frames_metadata.loc[meta_idx, 'file_name'])\n", (12818, 12883), False, 'import os\n'), ((21647, 21693), 'os.path.join', 'os.path.join', (['self.tile_dir', '"""frames_meta.csv"""'], {}), "(self.tile_dir, 'frames_meta.csv')\n", (21659, 21693), False, 'import os\n'), ((23278, 23324), 'os.path.join', 'os.path.join', (['self.tile_dir', '"""frames_meta.csv"""'], {}), "(self.tile_dir, 'frames_meta.csv')\n", (23290, 23324), False, 'import os\n'), ((23839, 23881), 'numpy.unique', 'np.unique', (["self.frames_metadata['pos_idx']"], {}), "(self.frames_metadata['pos_idx'])\n", (23848, 23881), True, 'import numpy as np\n'), ((24288, 24312), 'numpy.any', 'np.any', (['cur_tile_indices'], {}), '(cur_tile_indices)\n', (24294, 24312), True, 'import numpy as np\n'), ((17532, 17881), 'micro_dl.utils.image_utils.preprocess_imstack', 'image_utils.preprocess_imstack', ([], {'frames_metadata': 'self.frames_metadata', 'input_dir': 'self.input_dir', 'depth': 'self.channel_depth[channel_idx]', 'time_idx': 'time_idx', 'channel_idx': 'channel_idx', 'slice_idx': 'slice_idx', 'pos_idx': 'pos_idx', 'flat_field_im': 'flat_field_im', 'hist_clip_limits': 'self.hist_clip_limits', 'normalize_im': 'self.normalize_channels[list_idx]'}), '(frames_metadata=self.frames_metadata,\n input_dir=self.input_dir, depth=self.channel_depth[channel_idx],\n time_idx=time_idx, channel_idx=channel_idx, slice_idx=slice_idx,\n pos_idx=pos_idx, flat_field_im=flat_field_im, hist_clip_limits=self.\n hist_clip_limits, normalize_im=self.normalize_channels[list_idx])\n', (17562, 17881), True, 'import micro_dl.utils.image_utils as image_utils\n'), ((18782, 18916), 'micro_dl.utils.tile_utils.tile_image', 'tile_utils.tile_image', ([], {'input_image': 'im', 'tile_size': 'self.tile_size', 'step_size': 'self.step_size', 'return_index': '(True)', 'save_dict': 'save_dict'}), '(input_image=im, tile_size=self.tile_size, step_size=\n self.step_size, return_index=True, save_dict=save_dict)\n', (18803, 18916), True, 'import micro_dl.utils.tile_utils as tile_utils\n')] |
import musket_core.generic_config as generic
import musket_core.datasets as datasets
import musket_core.configloader as configloader
import musket_core.utils as utils
import musket_core.context as context
import numpy as np
import keras
import musket_core.net_declaration as net
import musket_core.quasymodels as qm
import os
import tqdm
import sys
def model_function(func):
func.model=True
return func
def _shape(x):
if isinstance(x,tuple):
return [i.shape for i in x]
if isinstance(x,list):
return [i.shape for i in x]
return x.shape
class GenericPipeline(generic.GenericTaskConfig):
def __init__(self,**atrs):
super().__init__(**atrs)
self.dataset_clazz = datasets.DefaultKFoldedDataSet
self._multiOutput=None
pass
def createNet(self):
inp,output=utils.load_yaml(self.path + ".shapes")
if not hasattr(context.context,"net_cx"):
context.context.net_cx=[]
contributions=None
if os.path.exists(self.path+".contribution"):
contributions=utils.load(self.path+".contribution")
else:
contributions=None
if isinstance(inp,list):
inputs=[keras.Input(x) for x in inp]
if contributions is not None:
if isinstance(contributions, list):
for i in range(len(inputs)):
inputs[i].contribution=contributions[i]
else:
for i in range(len(inputs)):
inputs[i].contribution=contributions
else:
i=keras.Input(inp);
i.contribution=contributions
inputs=[i]
m=net.create_model_from_config(self.declarations,inputs,self.architecture,self.imports)
if context.isTrainMode():
if hasattr(context.context, "net_cx"):
utils.save(self.path+".ncx", context.context.net_cx)
context.context.net_cx=[]
return m
def load_writeable_dataset(self, ds, path):
if self.isMultiOutput():
rr = utils.load(path)
resName = (ds.name if hasattr(ds, "name") else "") + "_predictions"
result = datasets.BufferedWriteableDS(ds, resName, path, rr)
else:
rr = np.load(path)
resName = (ds.name if hasattr(ds, "name") else "") + "_predictions"
result = datasets.BufferedWriteableDS(ds, resName, path, rr)
return result
def create_writeable_dataset(self, dataset:datasets.DataSet, dsPath:str)->datasets.WriteableDataSet:
inp,output=utils.load_yaml(self.path + ".shapes")
resName = (dataset.name if hasattr(dataset, "name") else "") + "_predictions"
result = datasets.BufferedWriteableDS(dataset, resName, dsPath,pickle=self.isMultiOutput())
return result
def isMultiOutput(self):
if self._multiOutput is not None:
return self._multiOutput
inp,output=utils.load_yaml(self.path + ".shapes")
self._multiOutput= len(output)>1 and isinstance(output, list)
return self._multiOutput
def predict_on_batch(self, mdl, ttflips, batch):
res = mdl.predict(batch.images)
if self.isMultiOutput():
result=[]
for i in range(len(res[0])):
elementOutputs=[]
for x in res:
elementOutputs.append(x[i])
result.append(elementOutputs)
return result
return res
def evaluateAll(self,ds, fold:int,stage=-1,negatives="real",ttflips=None,batchSize=32):
folds = self.kfold(ds, range(0, len(ds)),batch=batchSize)
vl, vg, test_g = folds.generator(fold, False,negatives=negatives,returnBatch=True)
indexes = folds.sampledIndexes(fold, False, negatives)
m = self.load_model(fold, stage)
num=0
with tqdm.tqdm(total=len(indexes), unit="files", desc="segmentation of validation set from " + str(fold)) as pbar:
try:
for f in test_g():
if num>=len(indexes): break
x, y, b = f
z = self.predict_on_batch(m,ttflips,b)
ids=b.data[0]
b.results=z
b.ground_truth=b.data[1]
yield b
num=num+len(z)
pbar.update(len(ids))
finally:
vl.terminate()
vg.terminate()
pass
def evaluate_all_to_arrays(self,ds, fold:int,stage=-1,negatives="real",ttflips=None,batchSize=32):
lastFullValPred = None
lastFullValLabels = None
for v in self.evaluateAll(ds, fold, stage,negatives,ttflips,batchSize):
if lastFullValPred is None:
lastFullValPred = v.results
lastFullValLabels = v.ground_truth
else:
lastFullValPred = np.append(lastFullValPred, v.results, axis=0)
lastFullValLabels = np.append(lastFullValLabels, v.ground_truth, axis=0)
return lastFullValPred,lastFullValLabels
def predict_on_dataset(self, dataset, fold=0, stage=0, limit=-1, batch_size=32, ttflips=False, cacheModel=False):
if cacheModel:
if hasattr(self, "_mdl"):
mdl=self._mdl
else:
mdl = self.createNetForInference(fold, stage)
self._mdl=mdl
else:
mdl = self.createNetForInference(fold, stage)
if self.testTimeAugmentation is not None:
mdl=qm.TestTimeAugModel(mdl,net.create_test_time_aug(self.testTimeAugmentation,self.imports))
if self.preprocessing is not None:
dataset = net.create_preprocessor_from_config(self.declarations, dataset, self.preprocessing, self.imports)
for original_batch in datasets.generic_batch_generator(dataset, batch_size, limit):
res = self.predict_on_batch(mdl, ttflips, original_batch)
original_batch.results=res
yield original_batch
def predict_in_dataset(self, dataset, fold, stage, cb, data, limit=-1, batch_size=32, ttflips=False):
with tqdm.tqdm(total=len(dataset), unit="files", desc="prediction from " + str(dataset)) as pbar:
for v in self.predict_on_dataset(dataset, fold=fold, stage=stage, limit=limit, batch_size=batch_size, ttflips=ttflips):
b=v
for i in range(len(b.data)):
id=b.data[i]
cb(id,b.results[i],data)
pbar.update(batch_size)
def predict_all_to_array_with_ids(self, dataset, fold, stage, limit=-1, batch_size=32, ttflips=False):
res=[]
ids=[]
with tqdm.tqdm(total=len(dataset), unit="files", desc="prediction from " + str(dataset)) as pbar:
for v in self.predict_on_dataset(dataset, fold=fold, stage=stage, limit=limit, batch_size=batch_size, ttflips=ttflips):
b=v
for i in range(len(b.data)):
id=b.data[i]
ids.append(id)
res.append(b.results[i])
pbar.update(batch_size)
return np.array(res),ids
def fit(self, dataset=None, subsample=1.0, foldsToExecute=None, start_from_stage=0, drawingFunction=None,parallel = False):
dataset = self.init_shapes(dataset)
return super().fit(dataset,subsample,foldsToExecute,start_from_stage,drawingFunction,parallel=parallel)
def validate(self):
self.init_shapes(None)
super().validate()
def init_shapes(self, dataset):
if dataset is None:
dataset = self.get_dataset()
self._dataset = dataset
if self.preprocessing is not None:
dataset = net.create_preprocessor_from_config(self.declarations, dataset, self.preprocessing, self.imports)
predItem = dataset[0]
if hasattr(dataset, "contribution"):
utils.save(self.path+ ".contribution",getattr(dataset, "contribution"))
elif hasattr(dataset, "contributions"):
utils.save(self.path+ ".contribution",getattr(dataset, "contributions"))
utils.save_yaml(self.path + ".shapes", (_shape(predItem.x), _shape(predItem.y)))
return dataset
def parse(path,extra=None) -> GenericPipeline:
extraImports=[]
if isinstance(path, str):
if not os.path.exists(path) or os.path.isdir(path):
pth=context.get_current_project_path()
if os.path.exists(pth+"/experiments/"+path+"/config.yaml"):
path=pth+"/experiments/"+path+"/config.yaml"
if os.path.exists(pth+"/common.yaml") and extra is None:
extra=pth+"/common.yaml"
if os.path.exists(pth+"/modules"):
for m in os.listdir(pth+"/modules"):
sys.path.insert(0, pth+"/modules")
if ".py" in m:
extraImports.append(m[0:m.index(".py")])
cfg = configloader.parse("generic", path,extra)
cfg.path = path
for e in extraImports:
cfg.imports.append(e)
return cfg | [
"sys.path.insert",
"musket_core.datasets.BufferedWriteableDS",
"musket_core.utils.save",
"musket_core.datasets.generic_batch_generator",
"numpy.array",
"musket_core.context.isTrainMode",
"numpy.load",
"os.path.exists",
"os.listdir",
"musket_core.configloader.parse",
"os.path.isdir",
"musket_co... | [((9086, 9128), 'musket_core.configloader.parse', 'configloader.parse', (['"""generic"""', 'path', 'extra'], {}), "('generic', path, extra)\n", (9104, 9128), True, 'import musket_core.configloader as configloader\n'), ((841, 879), 'musket_core.utils.load_yaml', 'utils.load_yaml', (["(self.path + '.shapes')"], {}), "(self.path + '.shapes')\n", (856, 879), True, 'import musket_core.utils as utils\n'), ((1006, 1049), 'os.path.exists', 'os.path.exists', (["(self.path + '.contribution')"], {}), "(self.path + '.contribution')\n", (1020, 1049), False, 'import os\n'), ((1735, 1827), 'musket_core.net_declaration.create_model_from_config', 'net.create_model_from_config', (['self.declarations', 'inputs', 'self.architecture', 'self.imports'], {}), '(self.declarations, inputs, self.architecture,\n self.imports)\n', (1763, 1827), True, 'import musket_core.net_declaration as net\n'), ((1832, 1853), 'musket_core.context.isTrainMode', 'context.isTrainMode', ([], {}), '()\n', (1851, 1853), True, 'import musket_core.context as context\n'), ((2651, 2689), 'musket_core.utils.load_yaml', 'utils.load_yaml', (["(self.path + '.shapes')"], {}), "(self.path + '.shapes')\n", (2666, 2689), True, 'import musket_core.utils as utils\n'), ((3026, 3064), 'musket_core.utils.load_yaml', 'utils.load_yaml', (["(self.path + '.shapes')"], {}), "(self.path + '.shapes')\n", (3041, 3064), True, 'import musket_core.utils as utils\n'), ((5929, 5989), 'musket_core.datasets.generic_batch_generator', 'datasets.generic_batch_generator', (['dataset', 'batch_size', 'limit'], {}), '(dataset, batch_size, limit)\n', (5961, 5989), True, 'import musket_core.datasets as datasets\n'), ((1075, 1114), 'musket_core.utils.load', 'utils.load', (["(self.path + '.contribution')"], {}), "(self.path + '.contribution')\n", (1085, 1114), True, 'import musket_core.utils as utils\n'), ((1643, 1659), 'keras.Input', 'keras.Input', (['inp'], {}), '(inp)\n', (1654, 1659), False, 'import keras\n'), ((2134, 2150), 'musket_core.utils.load', 'utils.load', (['path'], {}), '(path)\n', (2144, 2150), True, 'import musket_core.utils as utils\n'), ((2252, 2303), 'musket_core.datasets.BufferedWriteableDS', 'datasets.BufferedWriteableDS', (['ds', 'resName', 'path', 'rr'], {}), '(ds, resName, path, rr)\n', (2280, 2303), True, 'import musket_core.datasets as datasets\n'), ((2335, 2348), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (2342, 2348), True, 'import numpy as np\n'), ((2450, 2501), 'musket_core.datasets.BufferedWriteableDS', 'datasets.BufferedWriteableDS', (['ds', 'resName', 'path', 'rr'], {}), '(ds, resName, path, rr)\n', (2478, 2501), True, 'import musket_core.datasets as datasets\n'), ((5801, 5903), 'musket_core.net_declaration.create_preprocessor_from_config', 'net.create_preprocessor_from_config', (['self.declarations', 'dataset', 'self.preprocessing', 'self.imports'], {}), '(self.declarations, dataset, self.\n preprocessing, self.imports)\n', (5836, 5903), True, 'import musket_core.net_declaration as net\n'), ((7274, 7287), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (7282, 7287), True, 'import numpy as np\n'), ((7863, 7965), 'musket_core.net_declaration.create_preprocessor_from_config', 'net.create_preprocessor_from_config', (['self.declarations', 'dataset', 'self.preprocessing', 'self.imports'], {}), '(self.declarations, dataset, self.\n preprocessing, self.imports)\n', (7898, 7965), True, 'import musket_core.net_declaration as net\n'), ((8503, 8522), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8516, 8522), False, 'import os\n'), ((8540, 8574), 'musket_core.context.get_current_project_path', 'context.get_current_project_path', ([], {}), '()\n', (8572, 8574), True, 'import musket_core.context as context\n'), ((8590, 8651), 'os.path.exists', 'os.path.exists', (["(pth + '/experiments/' + path + '/config.yaml')"], {}), "(pth + '/experiments/' + path + '/config.yaml')\n", (8604, 8651), False, 'import os\n'), ((8833, 8865), 'os.path.exists', 'os.path.exists', (["(pth + '/modules')"], {}), "(pth + '/modules')\n", (8847, 8865), False, 'import os\n'), ((1228, 1242), 'keras.Input', 'keras.Input', (['x'], {}), '(x)\n', (1239, 1242), False, 'import keras\n'), ((1922, 1976), 'musket_core.utils.save', 'utils.save', (["(self.path + '.ncx')", 'context.context.net_cx'], {}), "(self.path + '.ncx', context.context.net_cx)\n", (1932, 1976), True, 'import musket_core.utils as utils\n'), ((4993, 5038), 'numpy.append', 'np.append', (['lastFullValPred', 'v.results'], {'axis': '(0)'}), '(lastFullValPred, v.results, axis=0)\n', (5002, 5038), True, 'import numpy as np\n'), ((5075, 5127), 'numpy.append', 'np.append', (['lastFullValLabels', 'v.ground_truth'], {'axis': '(0)'}), '(lastFullValLabels, v.ground_truth, axis=0)\n', (5084, 5127), True, 'import numpy as np\n'), ((5670, 5735), 'musket_core.net_declaration.create_test_time_aug', 'net.create_test_time_aug', (['self.testTimeAugmentation', 'self.imports'], {}), '(self.testTimeAugmentation, self.imports)\n', (5694, 5735), True, 'import musket_core.net_declaration as net\n'), ((8479, 8499), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8493, 8499), False, 'import os\n'), ((8723, 8759), 'os.path.exists', 'os.path.exists', (["(pth + '/common.yaml')"], {}), "(pth + '/common.yaml')\n", (8737, 8759), False, 'import os\n'), ((8890, 8918), 'os.listdir', 'os.listdir', (["(pth + '/modules')"], {}), "(pth + '/modules')\n", (8900, 8918), False, 'import os\n'), ((8938, 8974), 'sys.path.insert', 'sys.path.insert', (['(0)', "(pth + '/modules')"], {}), "(0, pth + '/modules')\n", (8953, 8974), False, 'import sys\n')] |
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import seaborn as sns
def plot_3d_with_hue(df, cols = ['x','y','z'], hue_col='hue', title='', \
xlabel='X', ylabel='Y', zlabel='Z', figsize=(8,8), hue_color_dict={},\
fig_filepath=None):
'''
Generalized function to plot pandas dataframe values in 3d
df: DataFrame containing the datapoints to plot and a column which corresponds to hue
cols: list of column names. default=['x','y','z']
title, xlabel, ylabel, figsize: args for plot
hue_col: Variables that define subsets of the data, which will be drawn on separate facets in the grid
hue_color_dict: optional, dictionary of colors which correspond to each hue value. keys are values in hue_col, values are colors
'''
fig = plt.figure(figsize=figsize)
ax = Axes3D(fig)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
for val in df[hue_col].unique():
df_val = df[df[hue_col]==val].copy()
if val in hue_color_dict.keys():
ax.scatter(df_val[cols[0]], df_val[cols[1]], df_val[cols[2]], label=val, color=hue_color_dict[val])
else:
ax.scatter(df_val[cols[0]], df_val[cols[1]], df_val[cols[2]], label=val)
plt.legend()
if fig_filepath!=None:
plt.savefig(fig_filepath)
plt.show();
def plot_correlation_heatmap(data, title='', xlabel='X', ylabel='Y', \
figsize=(8,8), fig_filepath=None):
'''
Generalized function to plot correlation between pandas dataframe columns
data: dataframe with columns to calculate correlation
title, xlabel, ylabel, figsize: args for plot
fig_filepath: optional, filepath to save fig
'''
sns.set(style="white")
corr=data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
fig, ax = plt.subplots(figsize=figsize)
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax.set_title('Correlation Heatmap of Variables', fontdict={'fontsize':18})
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if fig_filepath!=None:
plt.savefig(fig_filepath)
plt.show()
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"seaborn.diverging_palette",
"numpy.triu_indices_from",
"seaborn.heatmap",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.figure",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((835, 862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (845, 862), True, 'from matplotlib import pyplot as plt\n'), ((872, 883), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (878, 883), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((1326, 1338), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1336, 1338), True, 'from matplotlib import pyplot as plt\n'), ((1406, 1416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1414, 1416), True, 'from matplotlib import pyplot as plt\n'), ((1788, 1810), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (1795, 1810), True, 'import seaborn as sns\n'), ((1888, 1922), 'numpy.zeros_like', 'np.zeros_like', (['corr'], {'dtype': 'np.bool'}), '(corr, dtype=np.bool)\n', (1901, 1922), True, 'import numpy as np\n'), ((1981, 2010), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1993, 2010), True, 'from matplotlib import pyplot as plt\n'), ((2065, 2109), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(220)', '(10)'], {'as_cmap': '(True)'}), '(220, 10, as_cmap=True)\n', (2086, 2109), True, 'import seaborn as sns\n'), ((2255, 2373), 'seaborn.heatmap', 'sns.heatmap', (['corr'], {'mask': 'mask', 'cmap': 'cmap', 'vmax': '(0.3)', 'center': '(0)', 'square': '(True)', 'linewidths': '(0.5)', 'cbar_kws': "{'shrink': 0.5}"}), "(corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True,\n linewidths=0.5, cbar_kws={'shrink': 0.5})\n", (2266, 2373), True, 'import seaborn as sns\n'), ((2525, 2535), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2533, 2535), True, 'from matplotlib import pyplot as plt\n'), ((1375, 1400), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_filepath'], {}), '(fig_filepath)\n', (1386, 1400), True, 'from matplotlib import pyplot as plt\n'), ((1932, 1958), 'numpy.triu_indices_from', 'np.triu_indices_from', (['mask'], {}), '(mask)\n', (1952, 1958), True, 'import numpy as np\n'), ((2495, 2520), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_filepath'], {}), '(fig_filepath)\n', (2506, 2520), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
# vids = np.load('data/mnist_training_fast_videos.npy')
# bbox = np.load('data/mnist_training_fast_trajectories.npy')
# bbox[:, :, :, 3] = vids.shape[2] - bbox[:, :, :, 3]
# bbox[:, :, :, 1] = vids.shape[2] - bbox[:, :, :, 1]
# bbox = bbox.swapaxes(1, 2)
# length = 20000
# X_train = np.zeros((length, 5, 2, 28, 28))
# y_train = np.zeros((length, 5, 5))
# i = 0
# count = 0
# while count < length:
# print(f'{i/length}: There are {count} examples so far')
# num_t = vids.shape[1]
# indexes = np.triu(np.ones((num_t, num_t)) - np.eye(num_t))
# indexes = np.array([np.where(indexes)[0], np.where(indexes)[1]]).T
# inds = np.random.choice(indexes.shape[0], 250)
# indexes = indexes[inds]
# for idx in indexes:
# seq1 = vids[i, idx[0], :, :]
# seq2 = vids[i, idx[1], :, :]
# bbox1 = bbox[i, idx[0], :, :].astype(int)
# bbox2 = bbox[i, idx[1], :, :].astype(int)
# for j in range(5):
# # print(bbox1[j])
# y1 = min(127, max(0, bbox1[j, 1]))
# y2 = min(127, max(0, bbox1[j, 3]))
# x1 = min(127, max(0, bbox1[j, 0]))
# x2 = min(127, max(0, bbox1[j, 2]))
# img = seq1[y1:y2, x1:x2]
# # print(x1, x2, y1, y2)
# img = cv2.resize(img, (28, 28))
# X_train[count, j, 0, :, :] = img / 255.
# permidx = np.random.permutation(5)
# for j, index in enumerate(permidx):
# # print(bbox2[j])
# y1 = min(127, max(0, bbox2[j, 1]))
# y2 = min(127, max(0, bbox2[j, 3]))
# x1 = min(127, max(0, bbox2[j, 0]))
# x2 = min(127, max(0, bbox2[j, 2]))
# # print(x1, x2, y1, y2)
# img = seq2[y1:y2, x1:x2]
# img = cv2.resize(img, (28, 28))
# X_train[count, index, 1, :, :] = img / 255.
# y_train[count, j, index] = 1
# # assert 2 == 1
# count += 1
# i += 1
# permidx = np.random.permutation(X_train.shape[0])
# X_train = X_train[permidx]
# y_train = y_train[permidx]
# np.save('datasets/X_train.npy', X_train)
# np.save('datasets/y_train.npy', y_train)
vids = np.load('data/icons8_testing_fast_videos.npy')
bbox = np.load('data/icons8_testing_fast_trajectories.npy')
bbox[:, :, :, 3] = vids.shape[2] - bbox[:, :, :, 3]
bbox[:, :, :, 1] = vids.shape[2] - bbox[:, :, :, 1]
bbox = bbox.swapaxes(1, 2)
length = 20000
X_test = np.zeros((length, 5, 2, 28, 28))
y_test = np.zeros((length, 5, 5))
i = 0
count = 0
while count < length:
print(f'{i/length}: There are {count} examples so far')
num_t = vids.shape[1]
indexes = np.triu(np.ones((num_t, num_t)) - np.eye(num_t))
indexes = np.array([np.where(indexes)[0], np.where(indexes)[1]]).T
inds = np.random.choice(indexes.shape[0], 250)
indexes = indexes[inds]
for idx in indexes:
seq1 = vids[i, idx[0], :, :]
seq2 = vids[i, idx[1], :, :]
bbox1 = bbox[i, idx[0], :, :].astype(int)
bbox2 = bbox[i, idx[1], :, :].astype(int)
for j in range(5):
# print(bbox1[j])
y1 = min(127, max(0, bbox1[j, 1]))
y2 = min(127, max(0, bbox1[j, 3]))
x1 = min(127, max(0, bbox1[j, 0]))
x2 = min(127, max(0, bbox1[j, 2]))
img = seq1[y1:y2, x1:x2]
# print(x1, x2, y1, y2)
img = cv2.resize(img, (28, 28))
X_test[count, j, 0, :, :] = img / 255.
permidx = np.random.permutation(5)
for j, index in enumerate(permidx):
# print(bbox2[j])
y1 = min(127, max(0, bbox2[j, 1]))
y2 = min(127, max(0, bbox2[j, 3]))
x1 = min(127, max(0, bbox2[j, 0]))
x2 = min(127, max(0, bbox2[j, 2]))
# print(x1, x2, y1, y2)
img = seq2[y1:y2, x1:x2]
img = cv2.resize(img, (28, 28))
X_test[count, index, 1, :, :] = img / 255.
y_test[count, j, index] = 1
count += 1
i += 1
permidx = np.random.permutation(X_test.shape[0])
X_test = X_test[permidx]
y_test = y_test[permidx]
np.save('datasets/X_test.npy', X_test)
np.save('datasets/y_test.npy', y_test)
| [
"numpy.eye",
"numpy.ones",
"numpy.random.choice",
"numpy.where",
"numpy.zeros",
"cv2.resize",
"numpy.load",
"numpy.save",
"numpy.random.permutation"
] | [((2227, 2273), 'numpy.load', 'np.load', (['"""data/icons8_testing_fast_videos.npy"""'], {}), "('data/icons8_testing_fast_videos.npy')\n", (2234, 2273), True, 'import numpy as np\n'), ((2281, 2333), 'numpy.load', 'np.load', (['"""data/icons8_testing_fast_trajectories.npy"""'], {}), "('data/icons8_testing_fast_trajectories.npy')\n", (2288, 2333), True, 'import numpy as np\n'), ((2492, 2524), 'numpy.zeros', 'np.zeros', (['(length, 5, 2, 28, 28)'], {}), '((length, 5, 2, 28, 28))\n', (2500, 2524), True, 'import numpy as np\n'), ((2534, 2558), 'numpy.zeros', 'np.zeros', (['(length, 5, 5)'], {}), '((length, 5, 5))\n', (2542, 2558), True, 'import numpy as np\n'), ((4067, 4105), 'numpy.random.permutation', 'np.random.permutation', (['X_test.shape[0]'], {}), '(X_test.shape[0])\n', (4088, 4105), True, 'import numpy as np\n'), ((4156, 4194), 'numpy.save', 'np.save', (['"""datasets/X_test.npy"""', 'X_test'], {}), "('datasets/X_test.npy', X_test)\n", (4163, 4194), True, 'import numpy as np\n'), ((4195, 4233), 'numpy.save', 'np.save', (['"""datasets/y_test.npy"""', 'y_test'], {}), "('datasets/y_test.npy', y_test)\n", (4202, 4233), True, 'import numpy as np\n'), ((2830, 2869), 'numpy.random.choice', 'np.random.choice', (['indexes.shape[0]', '(250)'], {}), '(indexes.shape[0], 250)\n', (2846, 2869), True, 'import numpy as np\n'), ((3527, 3551), 'numpy.random.permutation', 'np.random.permutation', (['(5)'], {}), '(5)\n', (3548, 3551), True, 'import numpy as np\n'), ((2707, 2730), 'numpy.ones', 'np.ones', (['(num_t, num_t)'], {}), '((num_t, num_t))\n', (2714, 2730), True, 'import numpy as np\n'), ((2733, 2746), 'numpy.eye', 'np.eye', (['num_t'], {}), '(num_t)\n', (2739, 2746), True, 'import numpy as np\n'), ((3432, 3457), 'cv2.resize', 'cv2.resize', (['img', '(28, 28)'], {}), '(img, (28, 28))\n', (3442, 3457), False, 'import cv2\n'), ((3905, 3930), 'cv2.resize', 'cv2.resize', (['img', '(28, 28)'], {}), '(img, (28, 28))\n', (3915, 3930), False, 'import cv2\n'), ((2772, 2789), 'numpy.where', 'np.where', (['indexes'], {}), '(indexes)\n', (2780, 2789), True, 'import numpy as np\n'), ((2794, 2811), 'numpy.where', 'np.where', (['indexes'], {}), '(indexes)\n', (2802, 2811), True, 'import numpy as np\n')] |
from minisom import MiniSom
from numpy import genfromtxt,array,linalg,zeros,mean,std,apply_along_axis
"""
This script shows how to use MiniSom on the Iris dataset.
In partucular it shows how to train MiniSom and how to visualize the result.
ATTENTION: pylab is required for the visualization.
"""
# reading the iris dataset in the csv format
# (downloaded from http://aima.cs.berkeley.edu/data/iris.csv)
data = genfromtxt('iris.csv', delimiter=',',usecols=(0,1,2,3))
data = apply_along_axis(lambda x: x/linalg.norm(x),1,data) # data normalization
### Initialization and training ###
som = MiniSom(7,7,4,sigma=1.0,learning_rate=0.5)
#som.random_weights_init(data)
print("Training...")
som.train_random(data,100) # random training
print("\n...ready!")
### Plotting the response for each pattern in the iris dataset ###
from pylab import plot,axis,show,pcolor,colorbar,bone
bone()
pcolor(som.distance_map().T) # plotting the distance map as background
colorbar()
target = genfromtxt('iris.csv',delimiter=',',usecols=(4),dtype=str) # loading the labels
t = zeros(len(target),dtype=int)
t[target == 'setosa'] = 0
t[target == 'versicolor'] = 1
t[target == 'virginica'] = 2
# use different colors and markers for each label
markers = ['o','s','D']
colors = ['r','g','b']
for cnt,xx in enumerate(data):
w = som.winner(xx) # getting the winner
# palce a marker on the winning position for the sample xx
plot(w[0]+.5,w[1]+.5,markers[t[cnt]],markerfacecolor='None',
markeredgecolor=colors[t[cnt]],markersize=12,markeredgewidth=2)
axis([0,som.weights.shape[0],0,som.weights.shape[1]])
show() # show the figure | [
"pylab.axis",
"pylab.bone",
"pylab.plot",
"minisom.MiniSom",
"pylab.colorbar",
"numpy.linalg.norm",
"numpy.genfromtxt",
"pylab.show"
] | [((437, 496), 'numpy.genfromtxt', 'genfromtxt', (['"""iris.csv"""'], {'delimiter': '""","""', 'usecols': '(0, 1, 2, 3)'}), "('iris.csv', delimiter=',', usecols=(0, 1, 2, 3))\n", (447, 496), False, 'from numpy import genfromtxt, array, linalg, zeros, mean, std, apply_along_axis\n'), ((616, 662), 'minisom.MiniSom', 'MiniSom', (['(7)', '(7)', '(4)'], {'sigma': '(1.0)', 'learning_rate': '(0.5)'}), '(7, 7, 4, sigma=1.0, learning_rate=0.5)\n', (623, 662), False, 'from minisom import MiniSom\n'), ((899, 905), 'pylab.bone', 'bone', ([], {}), '()\n', (903, 905), False, 'from pylab import plot, axis, show, pcolor, colorbar, bone\n'), ((977, 987), 'pylab.colorbar', 'colorbar', ([], {}), '()\n', (985, 987), False, 'from pylab import plot, axis, show, pcolor, colorbar, bone\n'), ((997, 1056), 'numpy.genfromtxt', 'genfromtxt', (['"""iris.csv"""'], {'delimiter': '""","""', 'usecols': '(4)', 'dtype': 'str'}), "('iris.csv', delimiter=',', usecols=4, dtype=str)\n", (1007, 1056), False, 'from numpy import genfromtxt, array, linalg, zeros, mean, std, apply_along_axis\n'), ((1554, 1610), 'pylab.axis', 'axis', (['[0, som.weights.shape[0], 0, som.weights.shape[1]]'], {}), '([0, som.weights.shape[0], 0, som.weights.shape[1]])\n', (1558, 1610), False, 'from pylab import plot, axis, show, pcolor, colorbar, bone\n'), ((1608, 1614), 'pylab.show', 'show', ([], {}), '()\n', (1612, 1614), False, 'from pylab import plot, axis, show, pcolor, colorbar, bone\n'), ((1425, 1564), 'pylab.plot', 'plot', (['(w[0] + 0.5)', '(w[1] + 0.5)', 'markers[t[cnt]]'], {'markerfacecolor': '"""None"""', 'markeredgecolor': 'colors[t[cnt]]', 'markersize': '(12)', 'markeredgewidth': '(2)'}), "(w[0] + 0.5, w[1] + 0.5, markers[t[cnt]], markerfacecolor='None',\n markeredgecolor=colors[t[cnt]], markersize=12, markeredgewidth=2)\n", (1429, 1564), False, 'from pylab import plot, axis, show, pcolor, colorbar, bone\n'), ((529, 543), 'numpy.linalg.norm', 'linalg.norm', (['x'], {}), '(x)\n', (540, 543), False, 'from numpy import genfromtxt, array, linalg, zeros, mean, std, apply_along_axis\n')] |
# -*- coding: utf-8 -*-
"""Provides functions for handling images."""
import pygame
try:
import numpy
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from thorpy import miscgui
def detect_frame(surf, vacuum=(255, 255, 255)):
"""Returns a Rect of the minimum size to contain all that is not <vacuum>"""
if not HAS_NUMPY:
miscgui.functions.debug_msg("Numpy was not found on this machine.\
Cannot call detect_frame. Returns surface's size instead.")
return surf.get_size()
print("detecting frame...")
vacuum = numpy.array(vacuum)
array = pygame.surfarray.array3d(surf)
x_found = False
last_x = 0
miny = float("inf") #previously : 1000000000
maxy = 0
len_x = len(array)
len_y = len(array[0])
for x in range(len_x):
if x % 100 == 0:
print("scanning pixel line " + str(x))
for y in range(len_y):
if (array[x][y] != vacuum).any():
if not x_found:
x_found = True
first_x = x
last_x = x
if y < miny:
miny = y
if y > maxy:
maxy = y
return pygame.Rect(first_x, miny, last_x - first_x, maxy - miny)
def extract_frames(inGif, outFolder):
"""Needs PIL. No more than 100 frames"""
from PIL import Image
frame = Image.open(inGif)
nframes = 0
while frame:
snframe = str(nframes).zfill(6)
if nframes < 10:
snframe = "0" + str(nframes)
frame.save(outFolder + snframe, 'GIF')
nframes += 1
try:
frame.seek( nframes )
except EOFError:
break
return True
def get_resized_image(image, dims):
"""Fits whitout deformation <image> to <dims>. Return the scaled image.
Note that if dims ratio is not the same as image ratio, the highest side
fits the specified dimensions."""
size = image.get_size()
(fx, fy) = (float(dims[0]) / size[0], float(dims[1]) / size[1])
f = min(fx, fy)
size_x = int(size[0] * f)
size_y = int(size[1] * f)
return pygame.transform.scale(image, (size_x, size_y))
def get_centered_image(img, dims, bckgr):
s = pygame.Surface(dims)
s.fill(bckgr)
img_size = img.get_size()
dx = (dims[0] - img_size[0])/2
dy = (dims[1] - img_size[1])/2
if dx < 0:
dx = -dx
if dy < 0:
dy = -dy
s.blit(img, (dx, dy))
return s
def get_colorkey(colorkey, surf):
if colorkey is not None:
if colorkey is "auto":
colorkey = surf.get_at((0,0))
return colorkey
##def load_image(filename, colorkey=None):
## miscgui.functions.debug_msg("Loading " + filename)
## image = pygame.image.load(filename).convert()
## if colorkey:
## image.set_colorkey(colorkey, pygame.RLEACCEL)
#### image.convert_alpha()
#### image.convert()
## return image
def load_image(filename, colorkey=None, use_img_dict=None):
use_img_dict=miscgui.application.USE_IMG_DICT if use_img_dict is None else use_img_dict
loaded = miscgui.application._loaded.get(filename)
if loaded and use_img_dict:
miscgui.functions.debug_msg(filename + " found in loaded files.")
return loaded
else:
miscgui.functions.debug_msg("Loading " + filename)
image = pygame.image.load(filename).convert()
if colorkey:
image.set_colorkey(colorkey, pygame.RLEACCEL)
if miscgui.application.USE_IMG_DICT:
miscgui.application._loaded[filename] = image
return image
##def load_image(name, path="./", colorkey=None):
## fullname = os.path.join(path, name)
## loaded = miscgui.constants.loaded.get(fullname)
## if loaded:
## miscgui.functions.debug_msg(fullname + " found in loaded files.")
## return loaded
## else:
## miscgui.functions.debug_msg("Loading " + fullname)
## try:
## image = pygame.image.load(fullname)
## except:
## dirpath = os.path.dirname(os.path.dirname(__file__))
## path = dirpath + "/" + fullname
## path=os.path.normpath(path)
## return load_image(name=path, colorkey=colorkey)
## image.convert()
## miscgui.constants.loaded[fullname] = image
## return image
def fusion_images(surf1, surf2, rel_pos=(0,0), colorkey=(255, 255, 255)):
"""Blit surf1 at <rel_pos> from surf1's topleft corner"""
surface = pygame.Surface(surf1.get_rect().size)
if colorkey is not None:
if colorkey is -1:
colorkey = surf1.get_at((0,0))
surface.fill(colorkey)
surface.set_colorkey(colorkey, pygame.RLEACCEL)
surface.blit(surf1,(0,0))
surface.blit(surf2,rel_pos)
return surface.convert()
def fusion_images_fine(size, surf1, pos1, surf2, pos2, colorkey=(255, 255, 255)):
surface = pygame.Surface(size)
if colorkey is not None:
if colorkey is -1:
colorkey = surf1.get_at((0,0))
surface.fill(colorkey)
surface.set_colorkey(colorkey, pygame.RLEACCEL)
surface.blit(surf1, pos1)
surface.blit(surf2, pos2)
return surface.convert()
def capture_screen(surface, rect=None):
"""Returns a copy of the surface <surface>, with restriction <rect>
(None means the whole surface)"""
if not rect:
rect = surface.get_rect()
return surface.copy().subsurface(rect).convert()
def change_color_on_img(img, color_source, color_target, colorkey=None):
px = pygame.PixelArray(img.copy())
px.replace(color_source, color_target)
img2 = px.make_surface()
if colorkey is not None:
img2.set_colorkey(colorkey, pygame.RLEACCEL)
return img2.convert()
def change_color_on_img_ip(img, color_source, color_target, colorkey=None):
px = pygame.PixelArray(img)
px.replace(color_source, color_target)
img2 = px.make_surface()
if colorkey is not None:
img2.set_colorkey(colorkey, pygame.RLEACCEL)
return img2.convert() | [
"PIL.Image.open",
"pygame.surfarray.array3d",
"pygame.Surface",
"numpy.array",
"pygame.PixelArray",
"thorpy.miscgui.application._loaded.get",
"thorpy.miscgui.functions.debug_msg",
"pygame.image.load",
"pygame.Rect",
"pygame.transform.scale"
] | [((575, 594), 'numpy.array', 'numpy.array', (['vacuum'], {}), '(vacuum)\n', (586, 594), False, 'import numpy\n'), ((607, 637), 'pygame.surfarray.array3d', 'pygame.surfarray.array3d', (['surf'], {}), '(surf)\n', (631, 637), False, 'import pygame\n'), ((1217, 1274), 'pygame.Rect', 'pygame.Rect', (['first_x', 'miny', '(last_x - first_x)', '(maxy - miny)'], {}), '(first_x, miny, last_x - first_x, maxy - miny)\n', (1228, 1274), False, 'import pygame\n'), ((1397, 1414), 'PIL.Image.open', 'Image.open', (['inGif'], {}), '(inGif)\n', (1407, 1414), False, 'from PIL import Image\n'), ((2143, 2190), 'pygame.transform.scale', 'pygame.transform.scale', (['image', '(size_x, size_y)'], {}), '(image, (size_x, size_y))\n', (2165, 2190), False, 'import pygame\n'), ((2242, 2262), 'pygame.Surface', 'pygame.Surface', (['dims'], {}), '(dims)\n', (2256, 2262), False, 'import pygame\n'), ((3108, 3149), 'thorpy.miscgui.application._loaded.get', 'miscgui.application._loaded.get', (['filename'], {}), '(filename)\n', (3139, 3149), False, 'from thorpy import miscgui\n'), ((4906, 4926), 'pygame.Surface', 'pygame.Surface', (['size'], {}), '(size)\n', (4920, 4926), False, 'import pygame\n'), ((5841, 5863), 'pygame.PixelArray', 'pygame.PixelArray', (['img'], {}), '(img)\n', (5858, 5863), False, 'import pygame\n'), ((360, 506), 'thorpy.miscgui.functions.debug_msg', 'miscgui.functions.debug_msg', (['"""Numpy was not found on this machine. Cannot call detect_frame. Returns surface\'s size instead."""'], {}), '(\n "Numpy was not found on this machine. Cannot call detect_frame. Returns surface\'s size instead."\n )\n', (387, 506), False, 'from thorpy import miscgui\n'), ((3190, 3255), 'thorpy.miscgui.functions.debug_msg', 'miscgui.functions.debug_msg', (["(filename + ' found in loaded files.')"], {}), "(filename + ' found in loaded files.')\n", (3217, 3255), False, 'from thorpy import miscgui\n'), ((3296, 3346), 'thorpy.miscgui.functions.debug_msg', 'miscgui.functions.debug_msg', (["('Loading ' + filename)"], {}), "('Loading ' + filename)\n", (3323, 3346), False, 'from thorpy import miscgui\n'), ((3363, 3390), 'pygame.image.load', 'pygame.image.load', (['filename'], {}), '(filename)\n', (3380, 3390), False, 'import pygame\n')] |
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch import nn
from torchvision import transforms
import matplotlib.pyplot as plt
import torch
import random
import torch.nn.functional as F
from torch.utils.data.sampler import SubsetRandomSampler
random_seed = 1234
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed) # if use multi-GPU
#torch.backends.cudnn.deterministic = True
#torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
'사용자 데이터셋 인스턴스 생성 클래스'
class train_Dataset(Dataset): # torch의 Dataset 상속받음
def __init__(self, data, transform = None):
self.fashion_mnist = list(data.values)
self.transform = transform
label, img = [], []
for one_line in self.fashion_mnist:
label.append(one_line[0])
img.append(one_line[1:]) # 이미지 1개 당 픽셀 784개
self.label = np.asarray(label)
self.img = np.asarray(img).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, CHANNEL).astype('float32')
def __len__(self): # 학습 데이터 개수
return len(self.label)
def __getitem__(self, idx): # 앞서 만든 리스트의 인덱스 값을 참조해 데이터에 관한 여러 일처리를 진행한 후 그 결과를 반환
label, img = self.label[idx], self.img[idx]
if self.transform:
img = self.transform(img)
return label, img
class test_Dataset(Dataset):
def __init__(self, data, transform = None):
self.fashion_mnist = list(data.values)
self.transform = transform
img = []
for one_line in self.fashion_mnist:
img.append(one_line[:]) # 이미지 1개 당 픽셀 784개
self.img = np.asarray(img).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, CHANNEL).astype('float32')
def __len__(self):
return len(self.fashion_mnist)
def __getitem__(self, idx):
img = self.img[idx]
if self.transform:
img = self.transform(img)
return img
'하이퍼 파라미터'
BATCH_SIZE = 25
LR = 5e-3
NUM_CLASS = 10
IMAGE_SIZE = 28
CHANNEL = 1
Train_epoch = 10
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
'사용자 transform: 여기서는 augmentation 같은거 안 하고 ToTensor() 하나만 적용'
My_transform = transforms.Compose([
transforms.ToTensor(), # default : range [0, 255] -> [0.0, 1.0] 스케일링
])
'데이터'
train = pd.read_csv('./data_fashion_mnist/train.csv', index_col='index') # len 60000
test = pd.read_csv('./data_fashion_mnist/test.csv', index_col='index') # len 10000
'valid set'
valid_size = 10000
indices = torch.randperm(len(train)) # shuffled indices from 0 to 59999
train_indices = indices[:len(indices) - valid_size]
valid_indices = indices[len(indices) - valid_size:] if valid_size else None
Train_data = train_Dataset(train, transform=My_transform)
Valid_data = train_Dataset(train, transform=My_transform)
Test_data = test_Dataset(test, transform=My_transform)
'torch DataLoader 함수: 데이터를 mini-batch로 처리해 주고, GPU를 통한 병렬처리, 학습효율의 향상'
Train_dataloader = DataLoader(dataset=Train_data,
batch_size = BATCH_SIZE,
shuffle=False,
sampler=SubsetRandomSampler(train_indices))
Valid_dataloader = DataLoader(dataset=Valid_data,
batch_size = BATCH_SIZE,
shuffle=False,
sampler=SubsetRandomSampler(valid_indices))
Test_dataloader = DataLoader(dataset=Test_data,
batch_size = 1,
shuffle=False)
'사용자 모델'
class My_model(nn.Module): # torch nn.Module 상속
def __init__(self, num_of_class):
super(My_model, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1), # 28 * 28 * 16
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)) # 14 * 14 * 16
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), # 14 * 14 * 32
nn.BatchNorm2d(32),
nn.ReLU()
# nn.MaxPool2d(kernel_size=2, stride=2)) 7 * 7 * 32
)
self.layer3 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # 14 * 14 * 64
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2) # 7 * 7 * 64
)
self.fc = nn.Linear(7 * 7 * 64, num_of_class)
def forward(self, x):
out = self.layer1(x) # (28, 28, 1) -> (14, 14, 16)
out = self.layer2(out)
out = self.layer3(out)
out = out.reshape(out.size(0), -1) # (7, 7, 64) -> flatten -> (7, 7*64) ??????????????????????
out = self.fc(out)
#out = F.softmax(out, dim=0) # shape (25,10)
return out
'모델 학습'
def train():
model = My_model(NUM_CLASS).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = LR)
criterion = nn.CrossEntropyLoss()
valid_loss_min = np.inf # 초기화 (나중에 업데이트 함)
for epoch in range(1, Train_epoch + 1): # epoch: 모든 데이터
train_loss = 0.0
valid_loss = 0.0
for batch_id, (label, image) in enumerate(Train_dataloader): # iter: batch 데이터 (25개)
label, image = label.to(device), image.to(device) # shape: (25,)
output = model(image) # 1. 모델에 데이터 입력해 출력 얻기 # 10개 클래스에 대한 로짓 # shape: (25, 10)
loss = criterion(output, label) # 2. loss 계산 # NLL loss 2.3078 # shape: ()
optimizer.zero_grad() # 3. 기울기 초기화 (iter 끝날때마다 초기화)
loss.backward() # 4. 역전파
optimizer.step() # 5. 최적화
for batch_id, (label, image) in enumerate(Valid_dataloader):
label, image = label.to(device), image.to(device)
output = model(image)
loss = criterion(output, label)
valid_loss += loss.item()
# calculate avg losses
train_loss = train_loss/len(Train_dataloader.dataset)
valid_loss = valid_loss/len(Valid_dataloader.dataset)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min, valid_loss))
torch.save(model, './data_fashion_mnist/best_model.pt')
torch.save(model.state_dict(), './data_fashion_mnist/best_model.pth')
torch.save(valid_indices, './data_fashion_mnist/valid_indices.pth')
valid_loss_min = valid_loss
return model
'학습된 모델로 테스트'
def test(model):
model = torch.load('./data_fashion_mnist/best_model.pt') # 모델 불러오기
print('success load best_model')
pred = []
with torch.no_grad(): # 파라미터 업데이트 안 함
correct = 0
total = 0
for image in Test_dataloader:
image = image.to(device)
outputs = model(image)
predicted = np.asarray(torch.argmax(outputs, dim=1).cpu()) # 예측 클래스
pred.append(predicted)
return np.array(pred).flatten() # flatten: 1차원으로 펴 줌 (submission 파일에 값을 대입하기 위해) # (10000,)
'메인문'
if __name__ == '__main__':
model = train()
pred = test(model)
'예측 라벨 저장'
submission = pd.read_csv('./data_fashion_mnist/sample_submission.csv')
submission['label'] = pred
submission.to_csv('./data_fashion_mnist/submission.csv', index=False)
| [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"numpy.asarray",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"torch.argmax",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.save",
"torch.cud... | [((312, 342), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (329, 342), False, 'import torch\n'), ((343, 378), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['random_seed'], {}), '(random_seed)\n', (365, 378), False, 'import torch\n'), ((379, 418), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['random_seed'], {}), '(random_seed)\n', (405, 418), False, 'import torch\n'), ((521, 548), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (535, 548), True, 'import numpy as np\n'), ((549, 573), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (560, 573), False, 'import random\n'), ((2328, 2392), 'pandas.read_csv', 'pd.read_csv', (['"""./data_fashion_mnist/train.csv"""'], {'index_col': '"""index"""'}), "('./data_fashion_mnist/train.csv', index_col='index')\n", (2339, 2392), True, 'import pandas as pd\n'), ((2412, 2475), 'pandas.read_csv', 'pd.read_csv', (['"""./data_fashion_mnist/test.csv"""'], {'index_col': '"""index"""'}), "('./data_fashion_mnist/test.csv', index_col='index')\n", (2423, 2475), True, 'import pandas as pd\n'), ((3430, 3488), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'Test_data', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=Test_data, batch_size=1, shuffle=False)\n', (3440, 3488), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((7444, 7501), 'pandas.read_csv', 'pd.read_csv', (['"""./data_fashion_mnist/sample_submission.csv"""'], {}), "('./data_fashion_mnist/sample_submission.csv')\n", (7455, 7501), True, 'import pandas as pd\n'), ((4993, 5014), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5012, 5014), False, 'from torch import nn\n'), ((6823, 6871), 'torch.load', 'torch.load', (['"""./data_fashion_mnist/best_model.pt"""'], {}), "('./data_fashion_mnist/best_model.pt')\n", (6833, 6871), False, 'import torch\n'), ((971, 988), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (981, 988), True, 'import numpy as np\n'), ((2100, 2125), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2123, 2125), False, 'import torch\n'), ((2241, 2262), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2260, 2262), False, 'from torchvision import transforms\n'), ((3152, 3186), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (3171, 3186), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3376, 3410), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_indices'], {}), '(valid_indices)\n', (3395, 3410), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((4458, 4493), 'torch.nn.Linear', 'nn.Linear', (['(7 * 7 * 64)', 'num_of_class'], {}), '(7 * 7 * 64, num_of_class)\n', (4467, 4493), False, 'from torch import nn\n'), ((6942, 6957), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6955, 6957), False, 'import torch\n'), ((3736, 3788), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(16)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(1, 16, kernel_size=3, stride=1, padding=1)\n', (3745, 3788), False, 'from torch import nn\n'), ((3817, 3835), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (3831, 3835), False, 'from torch import nn\n'), ((3849, 3858), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3856, 3858), False, 'from torch import nn\n'), ((3872, 3909), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (3884, 3909), False, 'from torch import nn\n'), ((3984, 4037), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(16, 32, kernel_size=3, stride=1, padding=1)\n', (3993, 4037), False, 'from torch import nn\n'), ((4066, 4084), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (4080, 4084), False, 'from torch import nn\n'), ((4098, 4107), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4105, 4107), False, 'from torch import nn\n'), ((4232, 4285), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, 64, kernel_size=3, stride=1, padding=1)\n', (4241, 4285), False, 'from torch import nn\n'), ((4314, 4332), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (4328, 4332), False, 'from torch import nn\n'), ((4346, 4355), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4353, 4355), False, 'from torch import nn\n'), ((4369, 4406), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (4381, 4406), False, 'from torch import nn\n'), ((6494, 6549), 'torch.save', 'torch.save', (['model', '"""./data_fashion_mnist/best_model.pt"""'], {}), "(model, './data_fashion_mnist/best_model.pt')\n", (6504, 6549), False, 'import torch\n'), ((6644, 6711), 'torch.save', 'torch.save', (['valid_indices', '"""./data_fashion_mnist/valid_indices.pth"""'], {}), "(valid_indices, './data_fashion_mnist/valid_indices.pth')\n", (6654, 6711), False, 'import torch\n'), ((7257, 7271), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (7265, 7271), True, 'import numpy as np\n'), ((1008, 1023), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1018, 1023), True, 'import numpy as np\n'), ((1682, 1697), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1692, 1697), True, 'import numpy as np\n'), ((7161, 7189), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (7173, 7189), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 23:53:36 2020
@author: <NAME>
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from numpy import trapz
#https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
os.getcwd()
#q = variable de posición, dq0 = \dot{q}(0) = valor inicial de la derivada
#d = granularidad del parámetro temporal
def deriv(q,dq0,d):
#dq = np.empty([len(q)])
dq = (q[1:len(q)]-q[0:(len(q)-1)])/d
dq = np.insert(dq,0,dq0) #dq = np.concatenate(([dq0],dq))
return dq
#Ecuación de un sistema dinámico continuo
#Ejemplo de oscilador simple
def F(q):
ddq = - 2*q*(q**2 - 1)
return ddq
#Resolución de la ecuación dinámica \ddot{q} = F(q), obteniendo la órbita q(t)
#Los valores iniciales son la posición q0 := q(0) y la derivada dq0 := \dot{q}(0)
def orb(n,q0,dq0,F, args=None, d=0.001):
#q = [0.0]*(n+1)
q = np.empty([n+1])
q[0] = q0
q[1] = q0 + dq0*d
for i in np.arange(2,n+1):
args = q[i-2]
q[i] = - q[i-2] + d**2*F(args) + 2*q[i-1]
return q #np.array(q),
def periodos(q,d,max=True):
#Si max = True, tomamos las ondas a partir de los máximos/picos
#Si max == False, tomamos los las ondas a partir de los mínimos/valles
epsilon = 5*d
dq = deriv(q,dq0=None,d=d) #La primera derivada es irrelevante
if max == True:
waves = np.where((np.round(dq,int(-np.log10(epsilon))) == 0) & (q >0))
if max != True:
waves = np.where((np.round(dq,int(-np.log10(epsilon))) == 0) & (q <0))
diff_waves = np.diff(waves)
waves = waves[0][1:][diff_waves[0]>1]
pers = diff_waves[diff_waves>1]*d
return pers, waves
#################################################################
# CÁLUCLO DE ÓRBITAS
#################################################################
#Ejemplo gráfico del oscilador simple
q0 = 0.
dq0 = 1.
fig, ax = plt.subplots(figsize=(12,5))
plt.ylim(-1.5, 1.5)
plt.rcParams["legend.markerscale"] = 6
ax.set_xlabel("t = n $\delta$", fontsize=12)
ax.set_ylabel("q(t)", fontsize=12)
iseq = np.array([1,1.1,1.5,1.8,3])
for i in iseq:
d = 10**(-i)
n = int(32/d)
t = np.arange(n+1)*d
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
plt.plot(t, q, 'ro', markersize=0.5/i,label='$\delta$ ='+str(np.around(d,3)), \
c=plt.get_cmap("winter")(i/np.max(iseq)))
ax.legend(loc=3, frameon=False, fontsize=12)
#plt.savefig('Time_granularity.png', dpi=250)
#Ejemplo de coordenadas canónicas (q, p)
#Nos quedamos con el más fino y calculamos la coordenada canónica 'p'
q0 = 0.
dq0 = 1.
d = 10**(-3)
n = int(32/d)
t = np.arange(n+1)*d
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
dq = deriv(q,dq0=dq0,d=d)
p = dq/2
#Ejemplo gráfico de la derivada de q(t)
fig, ax = plt.subplots(figsize=(12,5))
plt.ylim(-1.5, 1.5)
plt.rcParams["legend.markerscale"] = 6
ax.set_xlabel("t = n $\delta$", fontsize=12)
ax.set_ylabel("dq(t)", fontsize=12)
plt.plot(t, dq, '-')
#Ejemplo de diagrama de fases (q, p)
fig, ax = plt.subplots(figsize=(5,5))
plt.xlim(-1.1, 1.1)
plt.ylim(-1, 1)
plt.rcParams["legend.markerscale"] = 6
ax.set_xlabel("q(t)", fontsize=12)
ax.set_ylabel("p(t)", fontsize=12)
plt.plot(q, p, '-')
plt.show()
#################################################################
# ESPACIO FÁSICO
#################################################################
## Pintamos el espacio de fases
def simplectica(q0,dq0,F,col=0,d = 10**(-4),n = int(16/d),marker='-'):
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
dq = deriv(q,dq0=dq0,d=d)
p = dq/2
plt.plot(q, p, marker,c=plt.get_cmap("winter")(col))
#Dibuja el espacio de fases para las condiciones iniciales en los rangos por parámetro
def dibujarEspacioFases(q0Min, q0Max, dq0Min, dq0Max):
fig = plt.figure(figsize=(8,5))
fig.subplots_adjust(hspace=0.4, wspace=0.2)
seq_q0 = np.linspace(q0Min, q0Max, num=12)
seq_dq0 = np.linspace(dq0Min, dq0Max, num=12)
for i in range(len(seq_q0)):
for j in range(len(seq_dq0)):
q0 = seq_q0[i]
dq0 = seq_dq0[j]
ax = fig.add_subplot(1,1, 1)
col = (1+i+j*(len(seq_q0)))/(len(seq_q0)*len(seq_dq0))
#ax = fig.add_subplot(len(seq_q0), len(seq_dq0), 1+i+j*(len(seq_q0)))
simplectica(q0=q0,dq0=dq0,F=F,col=col,marker='ro',d= 10**(-3),n = int(16/d))
ax.set_xlabel("q(t)", fontsize=12)
ax.set_ylabel("p(t)", fontsize=12)
#fig.savefig('Simplectic.png', dpi=250)
plt.show()
##Espacio fásico con condiciones iniciales [0,1]x[0,1]
dibujarEspacioFases(0., 1., 0., 2.)
#################################################################
# CÁLCULO DEL ÁREA DEL ESPACIO FÁSICO
#################################################################
#Tomamos un par (q(0), p(0)) y nos quedamos sólo en un trozo/onda de la órbita, sin repeticiones
#Para eso, tomaremos los periodos de la órbita, que definen las ondas
def calculaArea(q0, dq0, delta):
d = delta
n = int(32/d)
t = np.arange(n+1)*d
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
dq = deriv(q,dq0=dq0,d=d)
p = dq/2
fig, ax = plt.subplots(figsize=(12,5))
plt.plot(t, q, '-')
#Nos aseguramos que tiene buen aspecto
fig, ax = plt.subplots(figsize=(5,5))
plt.rcParams["legend.markerscale"] = 6
ax.set_xlabel("q(t)", fontsize=12)
ax.set_ylabel("p(t)", fontsize=12)
plt.plot(q, p, '-')
plt.show()
#Tomaremos los periodos de la órbita, que definen las ondas
T, W = periodos(q,d,max=False)
#Nos quedamos con el primer trozo
plt.plot(q[W[0]:W[1]])
plt.show()
plt.plot(p[W[0]:W[1]])
plt.show()
plt.plot(q[W[0]:W[1]],p[W[0]:W[1]])
plt.show()
#Tomamos la mitad de la "curva cerrada" para integrar más fácilmente
mitad = np.arange(W[0],W[0]+np.int((W[1]-W[0])/2),1)
plt.plot(q[mitad],p[mitad])
plt.show()
# Regla del trapezoide
return 2*trapz(p[mitad],q[mitad])
#Calcula el área sin hacer los dibujos de comprobación
def calculaAreaSinDibujar(q0, dq0, delta):
d = delta
n = int(32/d)
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
dq = deriv(q,dq0=dq0,d=d)
p = dq/2
#Tomaremos los periodos de la órbita, que definen las ondas
T, W = periodos(q,d,max=False)
if W.size < 2:
return - W.size
else:
#Tomamos la mitad de la "curva cerrada" para integrar más fácilmente
mitad = np.arange(W[0],W[0]+np.int((W[1]-W[0])/2),1)
# Regla del trapezoide
return 2*trapz(p[mitad],q[mitad])
#Calcula las condiciones iniciales que maximizan y minimizan el área.
#Además, devuelve la correspondiente área máxima y mínima
def calculaExtremos(q0Min, q0Max, dq0Min, dq0Max, delta):
areaMax = 0.
qpMax = [0.,0.]
areaMin = 30.
qpMin = [0.,0.]
paso = 0.1
for q0 in np.arange(q0Min, q0Max + paso, paso):
for dq0 in np.arange(dq0Min, dq0Max + paso, paso):
area = calculaAreaSinDibujar(q0, dq0, delta)
if area > 0:
if area > areaMax:
areaMax = area
qpMax = [q0,dq0]
if area < areaMin:
areaMin = area
qpMin = [q0,dq0]
return areaMax, qpMax, areaMin, qpMin
#Vamos a calcular el error variando delta
def calculaErrores(deltaMin, deltaMax, areaRef, qpMin, qpMax):
errores = np.array([])
for deltaAux in np.arange(deltaMin,deltaMax,0.05):
areaAuxMax = calculaAreaSinDibujar(qpMax[0], qpMax[1], 10**(-deltaAux))
areaAuxMin = calculaAreaSinDibujar(qpMin[0], qpMin[1], 10**(-deltaAux))
if areaAuxMin > 0 and areaAuxMax > 0:
areaAux = areaAuxMax - areaAuxMin
errores = np.append(errores, areaRef - areaAux)
errores = np.abs(errores)
return errores
delta = 10**(-3.5)
#Calculamos el área para D0 por tanto q esta en [0,1] y dq en [0,2]
areaMax, qpMax, areaMin, qpMin = calculaExtremos(0. ,1. , 0. ,2. , delta)
area = areaMax - areaMin
error = np.amax(calculaErrores(3, 3.5, area, qpMin, qpMax))
print("El área total del espacio de fases con condiciones iniciales D0 es: " + str(area))
print("Con la órbita de área máxima en condiciones iniciales " + str(qpMax) \
+ " y la mínima en " + str(qpMin))
print("Con un error de: " + str(error))
| [
"numpy.log10",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.max",
"numpy.linspace",
"numpy.empty",
"matplotlib.pyplot.ylim",
"numpy.abs",
"numpy.trapz",
"numpy.around",
"matplotlib.pyplot.xlim",
"numpy.int",
"matplotlib.pyplot.get_cmap",
"numpy.insert",... | [((233, 244), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (242, 244), False, 'import os\n'), ((1878, 1907), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (1890, 1907), True, 'import matplotlib.pyplot as plt\n'), ((1907, 1926), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1915, 1926), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2086), 'numpy.array', 'np.array', (['[1, 1.1, 1.5, 1.8, 3]'], {}), '([1, 1.1, 1.5, 1.8, 3])\n', (2063, 2086), True, 'import numpy as np\n'), ((2729, 2758), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (2741, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2777), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (2766, 2777), True, 'import matplotlib.pyplot as plt\n'), ((2900, 2920), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'dq', '"""-"""'], {}), "(t, dq, '-')\n", (2908, 2920), True, 'import matplotlib.pyplot as plt\n'), ((2969, 2997), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2981, 2997), True, 'import matplotlib.pyplot as plt\n'), ((2997, 3016), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (3005, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3034), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3027, 3034), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3164), 'matplotlib.pyplot.plot', 'plt.plot', (['q', 'p', '"""-"""'], {}), "(q, p, '-')\n", (3153, 3164), True, 'import matplotlib.pyplot as plt\n'), ((3165, 3175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3173, 3175), True, 'import matplotlib.pyplot as plt\n'), ((458, 479), 'numpy.insert', 'np.insert', (['dq', '(0)', 'dq0'], {}), '(dq, 0, dq0)\n', (467, 479), True, 'import numpy as np\n'), ((880, 897), 'numpy.empty', 'np.empty', (['[n + 1]'], {}), '([n + 1])\n', (888, 897), True, 'import numpy as np\n'), ((945, 964), 'numpy.arange', 'np.arange', (['(2)', '(n + 1)'], {}), '(2, n + 1)\n', (954, 964), True, 'import numpy as np\n'), ((1534, 1548), 'numpy.diff', 'np.diff', (['waves'], {}), '(waves)\n', (1541, 1548), True, 'import numpy as np\n'), ((2593, 2609), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (2602, 2609), True, 'import numpy as np\n'), ((3727, 3753), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (3737, 3753), True, 'import matplotlib.pyplot as plt\n'), ((3814, 3847), 'numpy.linspace', 'np.linspace', (['q0Min', 'q0Max'], {'num': '(12)'}), '(q0Min, q0Max, num=12)\n', (3825, 3847), True, 'import numpy as np\n'), ((3862, 3897), 'numpy.linspace', 'np.linspace', (['dq0Min', 'dq0Max'], {'num': '(12)'}), '(dq0Min, dq0Max, num=12)\n', (3873, 3897), True, 'import numpy as np\n'), ((4430, 4440), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4438, 4440), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5092), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (5075, 5092), True, 'import matplotlib.pyplot as plt\n'), ((5096, 5115), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'q', '"""-"""'], {}), "(t, q, '-')\n", (5104, 5115), True, 'import matplotlib.pyplot as plt\n'), ((5178, 5206), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5190, 5206), True, 'import matplotlib.pyplot as plt\n'), ((5332, 5351), 'matplotlib.pyplot.plot', 'plt.plot', (['q', 'p', '"""-"""'], {}), "(q, p, '-')\n", (5340, 5351), True, 'import matplotlib.pyplot as plt\n'), ((5356, 5366), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5364, 5366), True, 'import matplotlib.pyplot as plt\n'), ((5513, 5535), 'matplotlib.pyplot.plot', 'plt.plot', (['q[W[0]:W[1]]'], {}), '(q[W[0]:W[1]])\n', (5521, 5535), True, 'import matplotlib.pyplot as plt\n'), ((5540, 5550), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5548, 5550), True, 'import matplotlib.pyplot as plt\n'), ((5555, 5577), 'matplotlib.pyplot.plot', 'plt.plot', (['p[W[0]:W[1]]'], {}), '(p[W[0]:W[1]])\n', (5563, 5577), True, 'import matplotlib.pyplot as plt\n'), ((5582, 5592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5590, 5592), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5633), 'matplotlib.pyplot.plot', 'plt.plot', (['q[W[0]:W[1]]', 'p[W[0]:W[1]]'], {}), '(q[W[0]:W[1]], p[W[0]:W[1]])\n', (5605, 5633), True, 'import matplotlib.pyplot as plt\n'), ((5637, 5647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5645, 5647), True, 'import matplotlib.pyplot as plt\n'), ((5792, 5820), 'matplotlib.pyplot.plot', 'plt.plot', (['q[mitad]', 'p[mitad]'], {}), '(q[mitad], p[mitad])\n', (5800, 5820), True, 'import matplotlib.pyplot as plt\n'), ((5824, 5834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5832, 5834), True, 'import matplotlib.pyplot as plt\n'), ((6780, 6816), 'numpy.arange', 'np.arange', (['q0Min', '(q0Max + paso)', 'paso'], {}), '(q0Min, q0Max + paso, paso)\n', (6789, 6816), True, 'import numpy as np\n'), ((7360, 7372), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7368, 7372), True, 'import numpy as np\n'), ((7393, 7428), 'numpy.arange', 'np.arange', (['deltaMin', 'deltaMax', '(0.05)'], {}), '(deltaMin, deltaMax, 0.05)\n', (7402, 7428), True, 'import numpy as np\n'), ((7759, 7774), 'numpy.abs', 'np.abs', (['errores'], {}), '(errores)\n', (7765, 7774), True, 'import numpy as np\n'), ((2141, 2157), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (2150, 2157), True, 'import numpy as np\n'), ((4952, 4968), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (4961, 4968), True, 'import numpy as np\n'), ((5880, 5905), 'numpy.trapz', 'trapz', (['p[mitad]', 'q[mitad]'], {}), '(p[mitad], q[mitad])\n', (5885, 5905), False, 'from numpy import trapz\n'), ((6837, 6875), 'numpy.arange', 'np.arange', (['dq0Min', '(dq0Max + paso)', 'paso'], {}), '(dq0Min, dq0Max + paso, paso)\n', (6846, 6875), True, 'import numpy as np\n'), ((5763, 5788), 'numpy.int', 'np.int', (['((W[1] - W[0]) / 2)'], {}), '((W[1] - W[0]) / 2)\n', (5769, 5788), True, 'import numpy as np\n'), ((6464, 6489), 'numpy.trapz', 'trapz', (['p[mitad]', 'q[mitad]'], {}), '(p[mitad], q[mitad])\n', (6469, 6489), False, 'from numpy import trapz\n'), ((7702, 7739), 'numpy.append', 'np.append', (['errores', '(areaRef - areaAux)'], {}), '(errores, areaRef - areaAux)\n', (7711, 7739), True, 'import numpy as np\n'), ((2298, 2320), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""winter"""'], {}), "('winter')\n", (2310, 2320), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3567), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""winter"""'], {}), "('winter')\n", (3557, 3567), True, 'import matplotlib.pyplot as plt\n'), ((6382, 6407), 'numpy.int', 'np.int', (['((W[1] - W[0]) / 2)'], {}), '((W[1] - W[0]) / 2)\n', (6388, 6407), True, 'import numpy as np\n'), ((2260, 2275), 'numpy.around', 'np.around', (['d', '(3)'], {}), '(d, 3)\n', (2269, 2275), True, 'import numpy as np\n'), ((2323, 2335), 'numpy.max', 'np.max', (['iseq'], {}), '(iseq)\n', (2329, 2335), True, 'import numpy as np\n'), ((1382, 1399), 'numpy.log10', 'np.log10', (['epsilon'], {}), '(epsilon)\n', (1390, 1399), True, 'import numpy as np\n'), ((1481, 1498), 'numpy.log10', 'np.log10', (['epsilon'], {}), '(epsilon)\n', (1489, 1498), True, 'import numpy as np\n')] |
#import director
from director import cameraview
from director import transformUtils
from director import visualization as vis
from director import objectmodel as om
from director.ikparameters import IkParameters
from director.ikplanner import ConstraintSet
from director import polarisplatformplanner
from director import robotstate
from director import segmentation
from director import sitstandplanner
from director.timercallback import TimerCallback
from director import visualization as vis
from director import planplayback
from director import lcmUtils
from director.uuidutil import newUUID
import os
import functools
import numpy as np
import scipy.io
import vtkAll as vtk
import bot_core as lcmbotcore
from director.tasks.taskuserpanel import TaskUserPanel
import director.tasks.robottasks as rt
from director import filterUtils
from director import ioUtils
import director
from numpy import array
class CourseModel(object):
def __init__(self):
pose = transformUtils.poseFromTransform(vtk.vtkTransform())
self.pointcloud = ioUtils.readPolyData(director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp')
self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None)
segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0])))
self.originFrame = self.pointcloudPD.getChildFrame()
t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951]))
self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691]))
self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972]))
self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893]))
self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022]))
self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451]))
self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374]))
self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575]))
self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ]))
self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.]))
self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
self.frameSync = vis.FrameSync()
self.frameSync.addFrame(self.originFrame)
self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True)
self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
| [
"director.visualization.updateFrame",
"director.getDRCBaseDir",
"numpy.array",
"director.visualization.FrameSync",
"director.visualization.showPolyData",
"vtkAll.vtkTransform"
] | [((1181, 1242), 'director.visualization.showPolyData', 'vis.showPolyData', (['self.pointcloud', '"""coursemodel"""'], {'parent': 'None'}), "(self.pointcloud, 'coursemodel', parent=None)\n", (1197, 1242), True, 'from director import visualization as vis\n'), ((1641, 1728), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""ValveWalk"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'ValveWalk', scale=0.2, visible=True, parent=self.\n pointcloudPD)\n", (1656, 1728), True, 'from director import visualization as vis\n'), ((1911, 2001), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""DrillPreWalk"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'DrillPreWalk', scale=0.2, visible=True, parent=self.\n pointcloudPD)\n", (1926, 2001), True, 'from director import visualization as vis\n'), ((2182, 2269), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""DrillWalk"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'DrillWalk', scale=0.2, visible=True, parent=self.\n pointcloudPD)\n", (2197, 2269), True, 'from director import visualization as vis\n'), ((2467, 2569), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""DrillWallWalkFarthestSafe"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'DrillWallWalkFarthestSafe', scale=0.2, visible=True,\n parent=self.pointcloudPD)\n", (2482, 2569), True, 'from director import visualization as vis\n'), ((2759, 2854), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""DrillWallWalkBack"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'DrillWallWalkBack', scale=0.2, visible=True, parent=\n self.pointcloudPD)\n", (2774, 2854), True, 'from director import visualization as vis\n'), ((3041, 3134), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""SurprisePreWalk"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'SurprisePreWalk', scale=0.2, visible=True, parent=self.\n pointcloudPD)\n", (3056, 3134), True, 'from director import visualization as vis\n'), ((3318, 3408), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""SurpriseWalk"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'SurpriseWalk', scale=0.2, visible=True, parent=self.\n pointcloudPD)\n", (3333, 3408), True, 'from director import visualization as vis\n'), ((3596, 3690), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""SurpriseWalkBack"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'SurpriseWalkBack', scale=0.2, visible=True, parent=self\n .pointcloudPD)\n", (3611, 3690), True, 'from director import visualization as vis\n'), ((3875, 3967), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""TerrainPreWalk"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'TerrainPreWalk', scale=0.2, visible=True, parent=self.\n pointcloudPD)\n", (3890, 3967), True, 'from director import visualization as vis\n'), ((4119, 4210), 'director.visualization.updateFrame', 'vis.updateFrame', (['t', '"""StairsPreWalk"""'], {'scale': '(0.2)', 'visible': '(True)', 'parent': 'self.pointcloudPD'}), "(t, 'StairsPreWalk', scale=0.2, visible=True, parent=self.\n pointcloudPD)\n", (4134, 4210), True, 'from director import visualization as vis\n'), ((4231, 4246), 'director.visualization.FrameSync', 'vis.FrameSync', ([], {}), '()\n', (4244, 4246), True, 'from director import visualization as vis\n'), ((1011, 1029), 'vtkAll.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (1027, 1029), True, 'import vtkAll as vtk\n'), ((1501, 1547), 'numpy.array', 'array', (['[-4.39364111, -0.51507392, -0.73125563]'], {}), '([-4.39364111, -0.51507392, -0.73125563])\n', (1506, 1547), False, 'from numpy import array\n'), ((1549, 1591), 'numpy.array', 'array', (['[0.93821625, 0.0, 0.0, -0.34604951]'], {}), '([0.93821625, 0.0, 0.0, -0.34604951])\n', (1554, 1591), False, 'from numpy import array\n'), ((1769, 1814), 'numpy.array', 'array', (['[-3.31840048, 0.36408685, -0.67413123]'], {}), '([-3.31840048, 0.36408685, -0.67413123])\n', (1774, 1814), False, 'from numpy import array\n'), ((1817, 1859), 'numpy.array', 'array', (['[0.93449475, 0.0, 0.0, -0.35597691]'], {}), '([0.93449475, 0.0, 0.0, -0.35597691])\n', (1822, 1859), False, 'from numpy import array\n'), ((2042, 2088), 'numpy.array', 'array', (['[-2.24553758, -0.52990939, -0.73255338]'], {}), '([-2.24553758, -0.52990939, -0.73255338])\n', (2047, 2088), False, 'from numpy import array\n'), ((2090, 2132), 'numpy.array', 'array', (['[0.93697004, 0.0, 0.0, -0.34940972]'], {}), '([0.93697004, 0.0, 0.0, -0.34940972])\n', (2095, 2132), False, 'from numpy import array\n'), ((2310, 2356), 'numpy.array', 'array', (['[-2.51306835, -0.92994004, -0.74173541]'], {}), '([-2.51306835, -0.92994004, -0.74173541])\n', (2315, 2356), False, 'from numpy import array\n'), ((2359, 2401), 'numpy.array', 'array', (['[-0.40456572, 0.0, 0.0, 0.91450893]'], {}), '([-0.40456572, 0.0, 0.0, 0.91450893])\n', (2364, 2401), False, 'from numpy import array\n'), ((2611, 2656), 'numpy.array', 'array', (['[-2.5314524, -0.27401861, -0.71302976]'], {}), '([-2.5314524, -0.27401861, -0.71302976])\n', (2616, 2656), False, 'from numpy import array\n'), ((2659, 2701), 'numpy.array', 'array', (['[0.98691519, 0.0, 0.0, -0.16124022]'], {}), '([0.98691519, 0.0, 0.0, -0.16124022])\n', (2664, 2701), False, 'from numpy import array\n'), ((2895, 2940), 'numpy.array', 'array', (['[-1.16122318, 0.04723203, -0.67493468]'], {}), '([-1.16122318, 0.04723203, -0.67493468])\n', (2900, 2940), False, 'from numpy import array\n'), ((2943, 2985), 'numpy.array', 'array', (['[0.93163145, 0.0, 0.0, -0.36340451]'], {}), '([0.93163145, 0.0, 0.0, -0.36340451])\n', (2948, 2985), False, 'from numpy import array\n'), ((3175, 3220), 'numpy.array', 'array', (['[-0.5176186, -1.00151554, -0.70650799]'], {}), '([-0.5176186, -1.00151554, -0.70650799])\n', (3180, 3220), False, 'from numpy import array\n'), ((3223, 3265), 'numpy.array', 'array', (['[0.84226497, 0.0, 0.0, -0.53906374]'], {}), '([0.84226497, 0.0, 0.0, -0.53906374])\n', (3228, 3265), False, 'from numpy import array\n'), ((3449, 3495), 'numpy.array', 'array', (['[-0.69100097, -0.43713269, -0.68495922]'], {}), '([-0.69100097, -0.43713269, -0.68495922])\n', (3454, 3495), False, 'from numpy import array\n'), ((3497, 3539), 'numpy.array', 'array', (['[0.98625075, 0.0, 0.0, -0.16525575]'], {}), '([0.98625075, 0.0, 0.0, -0.16525575])\n', (3502, 3539), False, 'from numpy import array\n'), ((3731, 3776), 'numpy.array', 'array', (['[0.65827322, -0.08028796, -0.77370834]'], {}), '([0.65827322, -0.08028796, -0.77370834])\n', (3736, 3776), False, 'from numpy import array\n'), ((3779, 3820), 'numpy.array', 'array', (['[0.94399977, 0.0, 0.0, -0.3299461]'], {}), '([0.94399977, 0.0, 0.0, -0.3299461])\n', (3784, 3820), False, 'from numpy import array\n'), ((4008, 4053), 'numpy.array', 'array', (['[5.47126425, -0.09790393, -0.70504679]'], {}), '([5.47126425, -0.09790393, -0.70504679])\n', (4013, 4053), False, 'from numpy import array\n'), ((4056, 4083), 'numpy.array', 'array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (4061, 4083), False, 'from numpy import array\n'), ((1081, 1105), 'director.getDRCBaseDir', 'director.getDRCBaseDir', ([], {}), '()\n', (1103, 1105), False, 'import director\n'), ((1328, 1344), 'numpy.array', 'array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1333, 1344), False, 'from numpy import array\n'), ((1346, 1373), 'numpy.array', 'array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (1351, 1373), False, 'from numpy import array\n')] |
import abc
from typing import Dict, List, Deque
import datetime
import numpy
from agnes.algos.base import _BaseAlgo
from agnes.common import logger
from agnes.common.schedules import Saver
from agnes.nns.initializer import _BaseChooser
from agnes.common.envs_prep import DummyVecEnv
class BaseRunner(abc.ABC):
logger = logger.ListLogger()
saver: Saver = Saver()
workers_num = 1
trainer: _BaseAlgo
worker: _BaseAlgo
env: DummyVecEnv
state: numpy.ndarray
done: numpy.ndarray
def __init__(self, env, algo, nn: _BaseChooser, config: Dict):
self.env = env["env"]
self.nn_name = nn.meta
self.cnfg, self.env_type = algo.get_config(env["env_type"])
if config is not None:
self.cnfg = config
self.timesteps = self.cnfg['timesteps']
self.nsteps = self.cnfg['nsteps']
self.vec_num = env["env_num"]
self.env_id = env["env_name"]
def is_trainer(self) -> bool:
return True
def load(self, filename) -> None:
if self.is_trainer():
self.trainer.load(filename)
if hasattr(self, "worker"):
self.worker.load(filename)
def log(self, *args) -> None:
if self.is_trainer():
self.logger = logger.ListLogger(*args)
self.logger.info({
"envs_num": self.vec_num * self.workers_num,
"device": self.trainer.device_info(),
"env_type": self.env_type,
"NN type": self.nn_name,
"algo": self.trainer.meta,
"env_name": self.env_id,
"config": self.cnfg
})
def run(self, log_interval: int = 1):
pass
def save_every(self, filename: str, frames_period: int) -> None:
if self.is_trainer():
self.saver = Saver(filename, frames_period)
def save(self, filename: str) -> None:
if self.is_trainer():
self.trainer.save(filename)
def _one_log(self,
lr_things: List[dict],
epinfobuf: Deque[dict],
nbatch: int,
tfirststart: float,
tstart: float, tnow: float, nupdates: int,
stepping_to_learning: float = None,
print_out: bool = True):
train_dict = {k: logger.safemean([dic[k] for dic in lr_things]) for k in lr_things[0]}
etc = (tnow - tfirststart) * (self.timesteps / (self.nsteps * nupdates) - 1.)
kvpairs = {
"eplenmean": logger.safemean(numpy.asarray([epinfo['l'] for epinfo in epinfobuf]).reshape(-1)),
"eprewmean": logger.safemean(numpy.asarray([epinfo['r'] for epinfo in epinfobuf]).reshape(-1)),
"fps": int(nbatch / (tnow - tstart + 1e-20)),
"misc/nupdates": nupdates,
"misc/serial_timesteps": self.nsteps * nupdates,
"misc/time_elapsed": str(datetime.timedelta(seconds=round(tnow - tfirststart))),
"misc/total_timesteps": self.nsteps * nupdates * self.workers_num * self.vec_num,
"misc/etc": str(datetime.timedelta(seconds=round(etc)))
}
kvpairs.update(train_dict)
if stepping_to_learning is not None:
kvpairs['misc/stepping_to_learning'] = stepping_to_learning
self.logger(kvpairs, nupdates, print_out=print_out)
def _one_run(self):
data = None
epinfos = []
for step in range(self.nsteps):
action, pred_action, out = self.worker(self.state, self.done)
nstate, reward, done, infos = self.env.step(action)
self.done = done
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo:
epinfos.append(maybeepinfo)
transition = {
"state": self.state,
"action": pred_action,
"new_state": nstate,
"reward": reward,
"done": done
}
transition.update(out)
data = self.worker.experience(transition)
self.state = nstate
return data, epinfos
def __del__(self):
self.env.close()
del self.env
| [
"agnes.common.schedules.Saver",
"numpy.asarray",
"agnes.common.logger.safemean",
"agnes.common.logger.ListLogger"
] | [((327, 346), 'agnes.common.logger.ListLogger', 'logger.ListLogger', ([], {}), '()\n', (344, 346), False, 'from agnes.common import logger\n'), ((366, 373), 'agnes.common.schedules.Saver', 'Saver', ([], {}), '()\n', (371, 373), False, 'from agnes.common.schedules import Saver\n'), ((1269, 1293), 'agnes.common.logger.ListLogger', 'logger.ListLogger', (['*args'], {}), '(*args)\n', (1286, 1293), False, 'from agnes.common import logger\n'), ((1840, 1870), 'agnes.common.schedules.Saver', 'Saver', (['filename', 'frames_period'], {}), '(filename, frames_period)\n', (1845, 1870), False, 'from agnes.common.schedules import Saver\n'), ((2338, 2384), 'agnes.common.logger.safemean', 'logger.safemean', (['[dic[k] for dic in lr_things]'], {}), '([dic[k] for dic in lr_things])\n', (2353, 2384), False, 'from agnes.common import logger\n'), ((2556, 2608), 'numpy.asarray', 'numpy.asarray', (["[epinfo['l'] for epinfo in epinfobuf]"], {}), "([epinfo['l'] for epinfo in epinfobuf])\n", (2569, 2608), False, 'import numpy\n'), ((2664, 2716), 'numpy.asarray', 'numpy.asarray', (["[epinfo['r'] for epinfo in epinfobuf]"], {}), "([epinfo['r'] for epinfo in epinfobuf])\n", (2677, 2716), False, 'import numpy\n')] |
from sklearn.preprocessing import OneHotEncoder
import numpy as np
fuel = ['Diesel', 'Petrol', 'LPG', 'CNG']
seller_type = ['Individual', 'Dealer', 'Trustmark Dealer']
transmission = ['Manual', 'Automatic']
owner = ['First Owner', 'Second Owner', 'Third Owner', 'Fourth & Above Owner', 'Test Drive Car']
enc1 = OneHotEncoder(handle_unknown='ignore')
enc1.fit(np.array(fuel).reshape(-1,1))
enc2 = OneHotEncoder(handle_unknown='ignore')
enc2.fit(np.array(seller_type).reshape(-1,1))
enc3 = OneHotEncoder(handle_unknown='ignore')
enc3.fit(np.array(transmission).reshape(-1,1))
enc4 = OneHotEncoder(handle_unknown='ignore')
enc4.fit(np.array(owner).reshape(-1,1))
def get_input(values):
a1 = enc1.transform([[values[1]]]).toarray()
a2 = enc2.transform([[values[2]]]).toarray()
a3 = enc3.transform([[values[3]]]).toarray()
a4 = enc4.transform([[values[4]]]).toarray()
array1 = np.append(a1, a2)
array2 = np.append(array1, a3)
array3 = np.append(array2, a4)
last_five = values[5:]
last_five.insert(0, values[0])
final_array = np.append(array3, np.array(last_five).astype('float'))
return final_array.reshape(1,-1)
| [
"numpy.append",
"sklearn.preprocessing.OneHotEncoder",
"numpy.array"
] | [((315, 353), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (328, 353), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((401, 439), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (414, 439), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((494, 532), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (507, 532), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((588, 626), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (601, 626), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((903, 920), 'numpy.append', 'np.append', (['a1', 'a2'], {}), '(a1, a2)\n', (912, 920), True, 'import numpy as np\n'), ((934, 955), 'numpy.append', 'np.append', (['array1', 'a3'], {}), '(array1, a3)\n', (943, 955), True, 'import numpy as np\n'), ((969, 990), 'numpy.append', 'np.append', (['array2', 'a4'], {}), '(array2, a4)\n', (978, 990), True, 'import numpy as np\n'), ((363, 377), 'numpy.array', 'np.array', (['fuel'], {}), '(fuel)\n', (371, 377), True, 'import numpy as np\n'), ((449, 470), 'numpy.array', 'np.array', (['seller_type'], {}), '(seller_type)\n', (457, 470), True, 'import numpy as np\n'), ((542, 564), 'numpy.array', 'np.array', (['transmission'], {}), '(transmission)\n', (550, 564), True, 'import numpy as np\n'), ((636, 651), 'numpy.array', 'np.array', (['owner'], {}), '(owner)\n', (644, 651), True, 'import numpy as np\n'), ((1089, 1108), 'numpy.array', 'np.array', (['last_five'], {}), '(last_five)\n', (1097, 1108), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
def plot_interval(X,Y,ratio=1):
m = int(len(X) * ratio)
X = X[0:m]
Y = Y[0:m]
c = list()
for idx in range(m):
if Y[idx]==1:
c.append('r')
else:
c.append('b')
fig = plt.figure()
plt.scatter(X,Y,c=c)
plt.savefig('image/interval.eps')
plt.close(fig)
return 1
def plot_circle(X,Y,ratio=1):
m = int(len(X) * ratio)
shufflelist = np.random.choice(len(X),size=m,replace=False)
X = X[shufflelist,:]
Y = Y[shufflelist]
B = X[np.where(Y==-1)]
R = X[np.where(Y==1)]
fig = plt.figure()
plt.scatter(B[:,0],B[:,1],c='b',marker='x')
plt.scatter(R[:,0],R[:,1],facecolors='none',edgecolors='r',marker='o')
circle = plt.Circle((0,0),1,fill=False)
plt.gcf().gca().add_artist(circle)
plt.axis('equal')
plt.savefig('image/circle.eps')
plt.close(fig)
return 1
| [
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.savefig",
"numpy.where",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis"
] | [((285, 297), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (295, 297), True, 'from matplotlib import pyplot as plt\n'), ((302, 324), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'Y'], {'c': 'c'}), '(X, Y, c=c)\n', (313, 324), True, 'from matplotlib import pyplot as plt\n'), ((327, 360), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""image/interval.eps"""'], {}), "('image/interval.eps')\n", (338, 360), True, 'from matplotlib import pyplot as plt\n'), ((365, 379), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (374, 379), True, 'from matplotlib import pyplot as plt\n'), ((627, 639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (637, 639), True, 'from matplotlib import pyplot as plt\n'), ((644, 692), 'matplotlib.pyplot.scatter', 'plt.scatter', (['B[:, 0]', 'B[:, 1]'], {'c': '"""b"""', 'marker': '"""x"""'}), "(B[:, 0], B[:, 1], c='b', marker='x')\n", (655, 692), True, 'from matplotlib import pyplot as plt\n'), ((692, 768), 'matplotlib.pyplot.scatter', 'plt.scatter', (['R[:, 0]', 'R[:, 1]'], {'facecolors': '"""none"""', 'edgecolors': '"""r"""', 'marker': '"""o"""'}), "(R[:, 0], R[:, 1], facecolors='none', edgecolors='r', marker='o')\n", (703, 768), True, 'from matplotlib import pyplot as plt\n'), ((776, 809), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(1)'], {'fill': '(False)'}), '((0, 0), 1, fill=False)\n', (786, 809), True, 'from matplotlib import pyplot as plt\n'), ((850, 867), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (858, 867), True, 'from matplotlib import pyplot as plt\n'), ((872, 903), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""image/circle.eps"""'], {}), "('image/circle.eps')\n", (883, 903), True, 'from matplotlib import pyplot as plt\n'), ((908, 922), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (917, 922), True, 'from matplotlib import pyplot as plt\n'), ((574, 591), 'numpy.where', 'np.where', (['(Y == -1)'], {}), '(Y == -1)\n', (582, 591), True, 'import numpy as np\n'), ((601, 617), 'numpy.where', 'np.where', (['(Y == 1)'], {}), '(Y == 1)\n', (609, 617), True, 'import numpy as np\n'), ((811, 820), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (818, 820), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
from math import pi
from os.path import join
import matplotlib.pyplot as plt
from src import MLEnergy, list_tl_files
plt.ion()
source_depth = 'shallow'
#source_depth = 'deep'
def one_freq(fc):
tl_list = list_tl_files(fc, source_depth=source_depth)
x_s = []
e_ri = []
e_ri_0 = []
loop_len = []
for tl in tl_list:
ml_eng = MLEnergy(tl, source_depth=source_depth, bg_only=True)
x_s.append(ml_eng.xs)
e, e_0 = ml_eng.background_diffraction()
loop_len.append(ml_eng.llen['bg'])
e_ri.append(e)
e_ri_0.append(e_0)
e_ri = np.array(e_ri)
e_ri_0 = np.array(e_ri_0)
loop_len = np.array(loop_len, dtype='object')
r_a = ml_eng.r_a
x_s = np.array(x_s)
save_dict = {'r_a':r_a, 'x_s':x_s, 'e_ri':e_ri, 'e_ri_0':e_ri_0,
'loop_len':loop_len}
return save_dict
save_dict = one_freq(400)
save_dict['e_ri_400'] = save_dict.pop('e_ri')
save_dict['e_ri_0_400'] = save_dict.pop('e_ri_0')
save_dict['loop_len_400'] = save_dict.pop('loop_len')
"""
tmp_dict = one_freq(1e3)
save_dict['e_ri_1000'] = tmp_dict.pop('e_ri')
save_dict['e_ri_0_1000'] = tmp_dict.pop('e_ri_0')
save_dict['loop_len_1000'] = tmp_dict.pop('loop_len')
"""
np.savez("data/processed/bg_ri_eng_" + source_depth + ".npz", **save_dict)
| [
"numpy.savez",
"src.MLEnergy",
"numpy.array",
"matplotlib.pyplot.ion",
"src.list_tl_files"
] | [((138, 147), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (145, 147), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1319), 'numpy.savez', 'np.savez', (["('data/processed/bg_ri_eng_' + source_depth + '.npz')"], {}), "('data/processed/bg_ri_eng_' + source_depth + '.npz', **save_dict)\n", (1253, 1319), True, 'import numpy as np\n'), ((229, 273), 'src.list_tl_files', 'list_tl_files', (['fc'], {'source_depth': 'source_depth'}), '(fc, source_depth=source_depth)\n', (242, 273), False, 'from src import MLEnergy, list_tl_files\n'), ((615, 629), 'numpy.array', 'np.array', (['e_ri'], {}), '(e_ri)\n', (623, 629), True, 'import numpy as np\n'), ((643, 659), 'numpy.array', 'np.array', (['e_ri_0'], {}), '(e_ri_0)\n', (651, 659), True, 'import numpy as np\n'), ((675, 709), 'numpy.array', 'np.array', (['loop_len'], {'dtype': '"""object"""'}), "(loop_len, dtype='object')\n", (683, 709), True, 'import numpy as np\n'), ((742, 755), 'numpy.array', 'np.array', (['x_s'], {}), '(x_s)\n', (750, 755), True, 'import numpy as np\n'), ((376, 429), 'src.MLEnergy', 'MLEnergy', (['tl'], {'source_depth': 'source_depth', 'bg_only': '(True)'}), '(tl, source_depth=source_depth, bg_only=True)\n', (384, 429), False, 'from src import MLEnergy, list_tl_files\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import cntk as C
import pytest
def test_slice_stride():
c = C.constant(value=list(range(0, 10)))
assert np.all(c[0:3:2].eval() == [0, 2])
def test_eval_scalar():
c = C.constant(value=2)
assert (c+3).eval() == 5.0
assert np.all((c+[3,4]).eval() == [5,6])
def test_numpy_conversion():
from cntk.internal import sanitize_value
from ..cntk_py import Value
# check NDArrayView
ndav = sanitize_value((2,3), 1, np.float32, None)
assert np.all(ndav.asarray() == np.ones((2,3)))
# check Value
assert np.all(Value(ndav).asarray() == np.ones((2,3)))
# check Constant
c = C.constant(1, shape=(2,3))
assert np.all(c.asarray() == np.ones((2,3)))
#check Parameter
p = C.parameter(shape=(2,3), init=1)
assert np.all(p.asarray() == np.ones((2,3)))
| [
"cntk.internal.sanitize_value",
"numpy.ones",
"cntk.constant",
"cntk.parameter"
] | [((438, 457), 'cntk.constant', 'C.constant', ([], {'value': '(2)'}), '(value=2)\n', (448, 457), True, 'import cntk as C\n'), ((677, 720), 'cntk.internal.sanitize_value', 'sanitize_value', (['(2, 3)', '(1)', 'np.float32', 'None'], {}), '((2, 3), 1, np.float32, None)\n', (691, 720), False, 'from cntk.internal import sanitize_value\n'), ((880, 907), 'cntk.constant', 'C.constant', (['(1)'], {'shape': '(2, 3)'}), '(1, shape=(2, 3))\n', (890, 907), True, 'import cntk as C\n'), ((990, 1023), 'cntk.parameter', 'C.parameter', ([], {'shape': '(2, 3)', 'init': '(1)'}), '(shape=(2, 3), init=1)\n', (1001, 1023), True, 'import cntk as C\n'), ((756, 771), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (763, 771), True, 'import numpy as np\n'), ((834, 849), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (841, 849), True, 'import numpy as np\n'), ((940, 955), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (947, 955), True, 'import numpy as np\n'), ((1056, 1071), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (1063, 1071), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import unittest
import io
import numpy as np
from itertools import islice
from xnmt.input import PlainTextReader
from xnmt.embedder import PretrainedSimpleWordEmbedder
from xnmt.model_context import ModelContext, PersistentParamCollection
import xnmt.events
class PretrainedSimpleWordEmbedderSanityTest(unittest.TestCase):
def setUp(self):
xnmt.events.clear()
self.input_reader = PlainTextReader()
list(self.input_reader.read_sents('examples/data/head.ja'))
self.input_reader.freeze()
self.context = ModelContext()
self.context.dynet_param_collection = PersistentParamCollection(None, 0)
def test_load(self):
"""
Checks that the embeddings can be loaded, have the right dimension, and that one line matches.
"""
embedder = PretrainedSimpleWordEmbedder(self.context, self.input_reader.vocab, 'examples/data/wiki.ja.vec.small', 300)
# self.assertEqual(embedder.embeddings.shape()[::-1], (self.input_reader.vocab_size(), 300))
with io.open('examples/data/wiki.ja.vec.small', encoding='utf-8') as vecfile:
test_line = next(islice(vecfile, 9, None)).split() # Select the vector for '日'
test_word = test_line[0]
test_id = self.input_reader.vocab.w2i[test_word]
test_emb = test_line[1:]
self.assertTrue(np.allclose(embedder.embeddings.batch([test_id]).npvalue().tolist(),
np.array(test_emb, dtype=float).tolist(), rtol=1e-5))
| [
"xnmt.embedder.PretrainedSimpleWordEmbedder",
"itertools.islice",
"xnmt.model_context.PersistentParamCollection",
"io.open",
"numpy.array",
"xnmt.input.PlainTextReader",
"xnmt.model_context.ModelContext"
] | [((419, 436), 'xnmt.input.PlainTextReader', 'PlainTextReader', ([], {}), '()\n', (434, 436), False, 'from xnmt.input import PlainTextReader\n'), ((551, 565), 'xnmt.model_context.ModelContext', 'ModelContext', ([], {}), '()\n', (563, 565), False, 'from xnmt.model_context import ModelContext, PersistentParamCollection\n'), ((608, 642), 'xnmt.model_context.PersistentParamCollection', 'PersistentParamCollection', (['None', '(0)'], {}), '(None, 0)\n', (633, 642), False, 'from xnmt.model_context import ModelContext, PersistentParamCollection\n'), ((797, 908), 'xnmt.embedder.PretrainedSimpleWordEmbedder', 'PretrainedSimpleWordEmbedder', (['self.context', 'self.input_reader.vocab', '"""examples/data/wiki.ja.vec.small"""', '(300)'], {}), "(self.context, self.input_reader.vocab,\n 'examples/data/wiki.ja.vec.small', 300)\n", (825, 908), False, 'from xnmt.embedder import PretrainedSimpleWordEmbedder\n'), ((1012, 1072), 'io.open', 'io.open', (['"""examples/data/wiki.ja.vec.small"""'], {'encoding': '"""utf-8"""'}), "('examples/data/wiki.ja.vec.small', encoding='utf-8')\n", (1019, 1072), False, 'import io\n'), ((1108, 1132), 'itertools.islice', 'islice', (['vecfile', '(9)', 'None'], {}), '(vecfile, 9, None)\n', (1114, 1132), False, 'from itertools import islice\n'), ((1404, 1435), 'numpy.array', 'np.array', (['test_emb'], {'dtype': 'float'}), '(test_emb, dtype=float)\n', (1412, 1435), True, 'import numpy as np\n')] |
"""Hierarchical clustering of the data"""
from functools import partial
import logging
import os
import pickle
from typing import Callable, Dict, NamedTuple, NewType
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as hcl
import scipy.io as sio
from skimage.io import imsave
import spdivik._scripting as scr
from spdivik.kmeans._scripting.parsers import assert_configured
import spdivik.types as ty
import spdivik.visualize as vis
LinkageMatrix = NewType('LinkageMatrix', np.ndarray)
LinkageBackend = Callable[[ty.Data], LinkageMatrix]
Dendrogram = NewType('Dendrogram', Dict)
DendrogramBackend = Callable[[LinkageMatrix], Dendrogram]
SaveFigureBackend = Callable[[str], None]
Experiment = NamedTuple('Experiment', [
('linkage', LinkageBackend),
('dendrogram', DendrogramBackend),
('save_figure', SaveFigureBackend)
])
def flatten_linkage(linkage_matrix: LinkageMatrix) -> ty.IntLabels:
"""Flatten partition of the dataset"""
# default MATLAB behavior, seen on the dendrogram with default color
# threshold
cophenetic_distance_threshold = 0.7 * np.max(linkage_matrix[:, 2])
partition = hcl.fcluster(linkage_matrix,
t=cophenetic_distance_threshold,
criterion='distance').astype(int)
return partition
def compute_centroids(data: ty.Data, partition: ty.IntLabels) -> ty.Data:
"""Find centroids of flat clusters"""
return pd.DataFrame(data).groupby(partition).mean().values
def build_experiment(config) -> Experiment:
"""Create experiment from configuration"""
assert_configured(config, 'linkage')
linkage_config = config['linkage']
assert_configured(linkage_config, 'method')
assert_configured(linkage_config, 'metric')
assert_configured(linkage_config, 'optimal_ordering')
linkage = partial(hcl.linkage, **linkage_config)
assert_configured(config, 'dendrogram')
dendrogram_config = config['dendrogram']
assert_configured(dendrogram_config, 'truncate_mode')
assert_configured(dendrogram_config, 'p')
assert_configured(dendrogram_config, 'color_threshold')
assert_configured(dendrogram_config, 'orientation')
assert_configured(dendrogram_config, 'count_sort')
assert_configured(dendrogram_config, 'distance_sort')
assert_configured(dendrogram_config, 'show_leaf_counts')
assert_configured(dendrogram_config, 'leaf_font_size')
assert_configured(dendrogram_config, 'show_contracted')
dendrogram = partial(hcl.dendrogram, **dendrogram_config)
assert_configured(config, 'plot')
plot_config = config['plot']
assert_configured(plot_config, 'dpi')
assert_configured(plot_config, 'facecolor')
assert_configured(plot_config, 'edgecolor')
assert_configured(plot_config, 'orientation')
assert_configured(plot_config, 'transparent')
assert_configured(plot_config, 'frameon')
assert_configured(plot_config, 'bbox_inches')
assert_configured(plot_config, 'pad_inches')
save_figure = partial(plt.savefig, **plot_config)
experiment = Experiment(linkage, dendrogram, save_figure)
return experiment
def save_linkage(fname, linkage: LinkageMatrix):
logging.info('Saving linkage in numpy format.')
np.save(fname('linkage.npy'), linkage)
logging.info('Converting linkage to MATLAB format.')
matlab_linkage = hcl.to_mlab_linkage(linkage)
logging.info('Saving linkage in MATLAB format.')
sio.savemat(fname('linkage.mat'), {'linkage': matlab_linkage})
def save_partition(fname, partition: ty.IntLabels, xy: np.ndarray=None):
logging.info('Saving flat partition.')
np.save(fname('partition.npy'), partition)
np.savetxt(fname('partition.csv'), partition, fmt='%i', delimiter=', ')
if xy is not None:
logging.info('Generating visulization.')
visualization = vis.visualize(partition, xy)
imsave(fname('partition.png'), visualization)
def save_centroids(fname, centroids: np.ndarray):
logging.info('Saving centroids.')
np.save(fname('centroids.npy'), centroids)
np.savetxt(fname('centroids.csv'), centroids, delimiter=', ')
def save_dendrogram(fname, save_figure: SaveFigureBackend, dendrogram: Dendrogram):
logging.info('Pickling dendrogram data.')
with open(fname('dendrogram.pkl'), 'wb') as file:
pickle.dump(dendrogram, file)
logging.info('Saving dendrogram plot as PDF.')
save_figure(fname('dendrogram.pdf'))
logging.info('Saving dendrogram plot as PNG.')
save_figure(fname('dendrogram.png'))
plt.close('all')
def main():
"""Entry point of the script"""
data, config, destination, xy = scr.initialize()
experiment = build_experiment(config)
fname = partial(os.path.join, destination)
try:
linkage = experiment.linkage(data)
save_linkage(fname, linkage)
dendrogram = experiment.dendrogram(linkage)
save_dendrogram(fname, experiment.save_figure, dendrogram)
partition = flatten_linkage(linkage)
save_partition(fname, partition, xy)
centroids = compute_centroids(data, partition)
save_centroids(fname, centroids)
except Exception as ex:
logging.error("Failed with exception.")
logging.error(repr(ex))
raise
if __name__ == '__main__':
main()
| [
"pickle.dump",
"spdivik._scripting.initialize",
"spdivik.visualize.visualize",
"scipy.cluster.hierarchy.to_mlab_linkage",
"typing.NewType",
"numpy.max",
"spdivik.kmeans._scripting.parsers.assert_configured",
"matplotlib.pyplot.close",
"scipy.cluster.hierarchy.fcluster",
"functools.partial",
"log... | [((503, 539), 'typing.NewType', 'NewType', (['"""LinkageMatrix"""', 'np.ndarray'], {}), "('LinkageMatrix', np.ndarray)\n", (510, 539), False, 'from typing import Callable, Dict, NamedTuple, NewType\n'), ((605, 632), 'typing.NewType', 'NewType', (['"""Dendrogram"""', 'Dict'], {}), "('Dendrogram', Dict)\n", (612, 632), False, 'from typing import Callable, Dict, NamedTuple, NewType\n'), ((746, 876), 'typing.NamedTuple', 'NamedTuple', (['"""Experiment"""', "[('linkage', LinkageBackend), ('dendrogram', DendrogramBackend), (\n 'save_figure', SaveFigureBackend)]"], {}), "('Experiment', [('linkage', LinkageBackend), ('dendrogram',\n DendrogramBackend), ('save_figure', SaveFigureBackend)])\n", (756, 876), False, 'from typing import Callable, Dict, NamedTuple, NewType\n'), ((1629, 1665), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['config', '"""linkage"""'], {}), "(config, 'linkage')\n", (1646, 1665), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((1709, 1752), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['linkage_config', '"""method"""'], {}), "(linkage_config, 'method')\n", (1726, 1752), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((1757, 1800), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['linkage_config', '"""metric"""'], {}), "(linkage_config, 'metric')\n", (1774, 1800), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((1805, 1858), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['linkage_config', '"""optimal_ordering"""'], {}), "(linkage_config, 'optimal_ordering')\n", (1822, 1858), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((1873, 1911), 'functools.partial', 'partial', (['hcl.linkage'], {}), '(hcl.linkage, **linkage_config)\n', (1880, 1911), False, 'from functools import partial\n'), ((1917, 1956), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['config', '"""dendrogram"""'], {}), "(config, 'dendrogram')\n", (1934, 1956), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2006, 2059), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""truncate_mode"""'], {}), "(dendrogram_config, 'truncate_mode')\n", (2023, 2059), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2064, 2105), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""p"""'], {}), "(dendrogram_config, 'p')\n", (2081, 2105), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2110, 2165), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""color_threshold"""'], {}), "(dendrogram_config, 'color_threshold')\n", (2127, 2165), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2170, 2221), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""orientation"""'], {}), "(dendrogram_config, 'orientation')\n", (2187, 2221), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2226, 2276), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""count_sort"""'], {}), "(dendrogram_config, 'count_sort')\n", (2243, 2276), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2281, 2334), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""distance_sort"""'], {}), "(dendrogram_config, 'distance_sort')\n", (2298, 2334), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2339, 2395), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""show_leaf_counts"""'], {}), "(dendrogram_config, 'show_leaf_counts')\n", (2356, 2395), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2400, 2454), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""leaf_font_size"""'], {}), "(dendrogram_config, 'leaf_font_size')\n", (2417, 2454), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2459, 2514), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['dendrogram_config', '"""show_contracted"""'], {}), "(dendrogram_config, 'show_contracted')\n", (2476, 2514), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2532, 2576), 'functools.partial', 'partial', (['hcl.dendrogram'], {}), '(hcl.dendrogram, **dendrogram_config)\n', (2539, 2576), False, 'from functools import partial\n'), ((2582, 2615), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['config', '"""plot"""'], {}), "(config, 'plot')\n", (2599, 2615), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2653, 2690), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""dpi"""'], {}), "(plot_config, 'dpi')\n", (2670, 2690), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2695, 2738), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""facecolor"""'], {}), "(plot_config, 'facecolor')\n", (2712, 2738), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2743, 2786), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""edgecolor"""'], {}), "(plot_config, 'edgecolor')\n", (2760, 2786), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2791, 2836), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""orientation"""'], {}), "(plot_config, 'orientation')\n", (2808, 2836), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2841, 2886), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""transparent"""'], {}), "(plot_config, 'transparent')\n", (2858, 2886), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2891, 2932), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""frameon"""'], {}), "(plot_config, 'frameon')\n", (2908, 2932), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2937, 2982), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""bbox_inches"""'], {}), "(plot_config, 'bbox_inches')\n", (2954, 2982), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((2987, 3031), 'spdivik.kmeans._scripting.parsers.assert_configured', 'assert_configured', (['plot_config', '"""pad_inches"""'], {}), "(plot_config, 'pad_inches')\n", (3004, 3031), False, 'from spdivik.kmeans._scripting.parsers import assert_configured\n'), ((3050, 3085), 'functools.partial', 'partial', (['plt.savefig'], {}), '(plt.savefig, **plot_config)\n', (3057, 3085), False, 'from functools import partial\n'), ((3227, 3274), 'logging.info', 'logging.info', (['"""Saving linkage in numpy format."""'], {}), "('Saving linkage in numpy format.')\n", (3239, 3274), False, 'import logging\n'), ((3322, 3374), 'logging.info', 'logging.info', (['"""Converting linkage to MATLAB format."""'], {}), "('Converting linkage to MATLAB format.')\n", (3334, 3374), False, 'import logging\n'), ((3396, 3424), 'scipy.cluster.hierarchy.to_mlab_linkage', 'hcl.to_mlab_linkage', (['linkage'], {}), '(linkage)\n', (3415, 3424), True, 'import scipy.cluster.hierarchy as hcl\n'), ((3429, 3477), 'logging.info', 'logging.info', (['"""Saving linkage in MATLAB format."""'], {}), "('Saving linkage in MATLAB format.')\n", (3441, 3477), False, 'import logging\n'), ((3624, 3662), 'logging.info', 'logging.info', (['"""Saving flat partition."""'], {}), "('Saving flat partition.')\n", (3636, 3662), False, 'import logging\n'), ((4021, 4054), 'logging.info', 'logging.info', (['"""Saving centroids."""'], {}), "('Saving centroids.')\n", (4033, 4054), False, 'import logging\n'), ((4258, 4299), 'logging.info', 'logging.info', (['"""Pickling dendrogram data."""'], {}), "('Pickling dendrogram data.')\n", (4270, 4299), False, 'import logging\n'), ((4396, 4442), 'logging.info', 'logging.info', (['"""Saving dendrogram plot as PDF."""'], {}), "('Saving dendrogram plot as PDF.')\n", (4408, 4442), False, 'import logging\n'), ((4488, 4534), 'logging.info', 'logging.info', (['"""Saving dendrogram plot as PNG."""'], {}), "('Saving dendrogram plot as PNG.')\n", (4500, 4534), False, 'import logging\n'), ((4580, 4596), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4589, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4683, 4699), 'spdivik._scripting.initialize', 'scr.initialize', ([], {}), '()\n', (4697, 4699), True, 'import spdivik._scripting as scr\n'), ((4754, 4788), 'functools.partial', 'partial', (['os.path.join', 'destination'], {}), '(os.path.join, destination)\n', (4761, 4788), False, 'from functools import partial\n'), ((1131, 1159), 'numpy.max', 'np.max', (['linkage_matrix[:, 2]'], {}), '(linkage_matrix[:, 2])\n', (1137, 1159), True, 'import numpy as np\n'), ((3817, 3857), 'logging.info', 'logging.info', (['"""Generating visulization."""'], {}), "('Generating visulization.')\n", (3829, 3857), False, 'import logging\n'), ((3882, 3910), 'spdivik.visualize.visualize', 'vis.visualize', (['partition', 'xy'], {}), '(partition, xy)\n', (3895, 3910), True, 'import spdivik.visualize as vis\n'), ((4362, 4391), 'pickle.dump', 'pickle.dump', (['dendrogram', 'file'], {}), '(dendrogram, file)\n', (4373, 4391), False, 'import pickle\n'), ((1176, 1264), 'scipy.cluster.hierarchy.fcluster', 'hcl.fcluster', (['linkage_matrix'], {'t': 'cophenetic_distance_threshold', 'criterion': '"""distance"""'}), "(linkage_matrix, t=cophenetic_distance_threshold, criterion=\n 'distance')\n", (1188, 1264), True, 'import scipy.cluster.hierarchy as hcl\n'), ((5219, 5258), 'logging.error', 'logging.error', (['"""Failed with exception."""'], {}), "('Failed with exception.')\n", (5232, 5258), False, 'import logging\n'), ((1480, 1498), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1492, 1498), True, 'import pandas as pd\n')] |
import os
import numpy as np
from sklearn.utils import shuffle
from keras.utils import to_categorical
__author__ = '<NAME>'
def get_risk_group(x_trn, c_trn, s_trn, high_risk_th, low_risk_th):
hg = []
lg = []
for n,os in enumerate(s_trn):
if os <= high_risk_th and c_trn[n] == 0:
hg.append(x_trn[n])
if os > low_risk_th:
lg.append(x_trn[n])
return np.asarray(hg), np.asarray(lg)
def get_train_val(hg, lg, is_categori_y, seed):
x_all = np.concatenate([hg, lg])
hg_y = np.ones(len(hg))
lg_y = np.zeros(len(lg))
y_all = np.concatenate([hg_y, lg_y])
if is_categori_y:
y_all = to_categorical(y_all, num_classes=2)
x_all, y_all = shuffle(x_all, y_all, random_state=seed)
n = len(x_all)
dev_index = n * 4 // 5
return x_all[:dev_index], y_all[:dev_index], x_all[dev_index:], y_all[dev_index:]
def get_train_val_dfs(x_all, c_all, s_all, seed):
e_all = 1 - c_all
x_all, e_all, s_all = shuffle(x_all, e_all, s_all, random_state=seed)
n = len(x_all)
dev_index = n * 4 // 5
return x_all[:dev_index], e_all[:dev_index], s_all[:dev_index], x_all[dev_index:], e_all[dev_index:], s_all[dev_index:]
def get_train(hg, lg, is_categori_y, seed):
x_all = np.concatenate([hg, lg])
hg_y = np.ones(len(hg))
lg_y = np.zeros(len(lg))
y_all = np.concatenate([hg_y, lg_y])
if is_categori_y:
y_all = to_categorical(y_all, num_classes=2)
x_all, y_all = shuffle(x_all, y_all, random_state=seed)
return x_all, y_all
| [
"sklearn.utils.shuffle",
"numpy.asarray",
"numpy.concatenate",
"keras.utils.to_categorical"
] | [((498, 522), 'numpy.concatenate', 'np.concatenate', (['[hg, lg]'], {}), '([hg, lg])\n', (512, 522), True, 'import numpy as np\n'), ((592, 620), 'numpy.concatenate', 'np.concatenate', (['[hg_y, lg_y]'], {}), '([hg_y, lg_y])\n', (606, 620), True, 'import numpy as np\n'), ((727, 767), 'sklearn.utils.shuffle', 'shuffle', (['x_all', 'y_all'], {'random_state': 'seed'}), '(x_all, y_all, random_state=seed)\n', (734, 767), False, 'from sklearn.utils import shuffle\n'), ((1000, 1047), 'sklearn.utils.shuffle', 'shuffle', (['x_all', 'e_all', 's_all'], {'random_state': 'seed'}), '(x_all, e_all, s_all, random_state=seed)\n', (1007, 1047), False, 'from sklearn.utils import shuffle\n'), ((1276, 1300), 'numpy.concatenate', 'np.concatenate', (['[hg, lg]'], {}), '([hg, lg])\n', (1290, 1300), True, 'import numpy as np\n'), ((1370, 1398), 'numpy.concatenate', 'np.concatenate', (['[hg_y, lg_y]'], {}), '([hg_y, lg_y])\n', (1384, 1398), True, 'import numpy as np\n'), ((1493, 1533), 'sklearn.utils.shuffle', 'shuffle', (['x_all', 'y_all'], {'random_state': 'seed'}), '(x_all, y_all, random_state=seed)\n', (1500, 1533), False, 'from sklearn.utils import shuffle\n'), ((406, 420), 'numpy.asarray', 'np.asarray', (['hg'], {}), '(hg)\n', (416, 420), True, 'import numpy as np\n'), ((422, 436), 'numpy.asarray', 'np.asarray', (['lg'], {}), '(lg)\n', (432, 436), True, 'import numpy as np\n'), ((671, 707), 'keras.utils.to_categorical', 'to_categorical', (['y_all'], {'num_classes': '(2)'}), '(y_all, num_classes=2)\n', (685, 707), False, 'from keras.utils import to_categorical\n'), ((1437, 1473), 'keras.utils.to_categorical', 'to_categorical', (['y_all'], {'num_classes': '(2)'}), '(y_all, num_classes=2)\n', (1451, 1473), False, 'from keras.utils import to_categorical\n')] |
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Full DrQA pipeline."""
import heapq
import logging
import math
import time
from multiprocessing import Pool as ProcessPool
from multiprocessing.util import Finalize
import numpy as np
import regex
import torch
from . import DEFAULTS
from .. import reader
from .. import tokenizers
from ..reader.data import ReaderDataset, SortedBatchSampler
from ..reader.vector import batchify
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Multiprocessing functions to fetch and tokenize text
# ------------------------------------------------------------------------------
PROCESS_TOK = None
PROCESS_CANDS = None
# DOC_MEAN = 8.5142
# DOC_STD = 2.8324
def init(tokenizer_class, tokenizer_opts, candidates=None):
global PROCESS_TOK, PROCESS_CANDS
PROCESS_TOK = tokenizer_class(**tokenizer_opts)
Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)
PROCESS_CANDS = candidates
def tokenize_text(text):
global PROCESS_TOK
return PROCESS_TOK.tokenize(text)
# ------------------------------------------------------------------------------
# Main DrQA pipeline
# ------------------------------------------------------------------------------
class DrQA(object):
# Target size for squashing short paragraphs together.
# 0 = read every paragraph independently
# infty = read all paragraphs together
GROUP_LENGTH = 0
def __init__(
self,
reader_model=None,
normalize=False,
embedding_file=None,
tokenizer=None,
fixed_candidates=None,
batch_size=128,
cuda=True,
data_parallel=False,
max_loaders=5,
num_workers=None,
ranker=None,
et_model=None,
et_threshold=None
):
"""Initialize the pipeline.
Args:
reader_model: model file from which to load the DocReader.
embedding_file: if given, will expand DocReader dictionary to use
all available pretrained embeddings.
tokenizer: string option to specify tokenizer used on docs.
fixed_candidates: if given, all predictions will be constrated to
the set of candidates contained in the file. One entry per line.
batch_size: batch size when processing paragraphs.
cuda: whether to use the gpu.
data_parallel: whether to use multile gpus.
max_loaders: max number of async data loading workers when reading.
(default is fine).
num_workers: number of parallel CPU processes to use for tokenizing
and post processing resuls.
"""
self.batch_size = batch_size
self.max_loaders = max_loaders
self.fixed_candidates = fixed_candidates is not None
self.cuda = cuda
logger.info('Initializing document ranker...')
self.ranker = ranker
logger.info('Initializing document reader...')
t0 = time.time()
reader_model = reader_model or DEFAULTS['reader_model']
self.reader = reader.DocReader.load(reader_model, normalize=normalize)
t1 = time.time()
logger.info('document reader model load [time]: %.4f s' % (t1 - t0))
if embedding_file:
logger.info('embedding_file')
logger.info('Expanding dictionary...')
words = reader.utils.index_embedding_words(embedding_file)
added = self.reader.expand_dictionary(words)
self.reader.load_embeddings(added, embedding_file)
if cuda:
logger.info('cuda')
self.reader.cuda()
t2 = time.time()
logger.info('cuda initialized [time]: %.4f s' % (t2 - t1))
if data_parallel:
logger.info('data_parallel')
self.reader.parallelize()
annotators = tokenizers.get_annotators_for_model(self.reader)
tok_opts = {'annotators': annotators}
logger.debug('tokenizer')
if not tokenizer:
tok_class = DEFAULTS['tokenizer']
else:
tok_class = tokenizers.get_class(tokenizer)
logger.debug('annotators')
self.num_workers = num_workers
self.processes = ProcessPool(num_workers,
initializer=init,
initargs=(tok_class, tok_opts, fixed_candidates))
if et_model:
self.et_threshold = et_threshold if 0 < et_threshold < 1 else 0.5
logger.info('Initializing early stopping model...')
import treelite.runtime
self.et_model = treelite.runtime.Predictor(et_model, verbose=True)
logger.info('early stopping model (et threshold: %s) loaded.' % self.et_threshold)
else:
self.et_threshold = None
def _split_doc(self, doc):
"""Given a doc, split it into chunks (by paragraph)."""
curr = []
curr_len = 0
for split in regex.split(r'\n+', doc):
split = split.strip()
if len(split) == 0:
continue
# Maybe group paragraphs together until we hit a length limit
if len(curr) > 0 and curr_len + len(split) > self.GROUP_LENGTH:
yield ' '.join(curr)
curr = []
curr_len = 0
curr.append(split)
curr_len += len(split)
if len(curr) > 0:
yield ' '.join(curr)
def _get_loader(self, data, num_loaders):
"""Return a pytorch data iterator for provided examples."""
dataset = ReaderDataset(data, self.reader)
sampler = SortedBatchSampler(
dataset.lengths(),
self.batch_size,
shuffle=False
)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
sampler=sampler,
num_workers=num_loaders,
collate_fn=batchify,
pin_memory=self.cuda,
)
return loader
def process_single(self, query, top_n=1, n_docs=5,
return_context=False):
"""Run a single query."""
predictions = self.process_batch(
[query],
top_n, n_docs, return_context
)
return predictions[0]
def process(self, query, top_n=1, n_docs=5):
if self.et_threshold:
predictions = self.process_batch_et(query, n_docs)
else:
predictions = self.process_batch(query, top_n=top_n, n_docs=n_docs)
return predictions
def process_batch(self, queries, top_n=1, n_docs=5,
return_context=False):
"""Run a batch of queries (more efficient)."""
t3 = time.time()
logger.info('Processing %d queries...' % len(queries))
logger.info('Retrieving top %d docs...' % n_docs)
# Rank documents for queries.
if len(queries) == 1:
ranked = [self.ranker.closest_docs(queries[0], k=n_docs)]
else:
ranked = self.ranker.batch_closest_docs(queries, k=n_docs, num_workers=self.num_workers)
t4 = time.time()
logger.info('docs retrieved [time]: %.4f s' % (t4 - t3))
all_docids, all_doc_scores, all_doc_texts = zip(*ranked)
# Flatten document ids and retrieve text from database.
# We remove duplicates for processing efficiency.
flat_docids, flat_doc_texts = zip(*{(d, t) for doc_ids, doc_texts in zip(all_docids, all_doc_texts)
for d, t in zip(doc_ids, doc_texts)})
# flat_docids = list({d for docids in all_docids for d in docids})
did2didx = {did: didx for didx, did in enumerate(flat_docids)}
# flat_doc_texts = list({t for doc_texts in all_doc_texts for t in doc_texts})
# logger.info('doc_texts for top %d docs extracted' % n_docs)
# Split and flatten documents. Maintain a mapping from doc (index in
# flat list) to split (index in flat list).
flat_splits = []
didx2sidx = []
for text in flat_doc_texts:
splits = self._split_doc(text)
didx2sidx.append([len(flat_splits), -1])
for split in splits:
flat_splits.append(split)
didx2sidx[-1][1] = len(flat_splits)
t5 = time.time()
# logger.debug('doc_texts flattened')
# Push through the tokenizers as fast as possible.
q_tokens = self.processes.map_async(tokenize_text, queries)
s_tokens = self.processes.map_async(tokenize_text, flat_splits)
q_tokens = q_tokens.get()
s_tokens = s_tokens.get()
# logger.info('q_tokens: %s' % q_tokens)
# logger.info('s_tokens: %s' % s_tokens)
t6 = time.time()
logger.info('doc texts tokenized [time]: %.4f s' % (t6 - t5))
# Group into structured example inputs. Examples' ids represent
# mappings to their question, document, and split ids.
examples = []
for qidx in range(len(queries)):
q_text = q_tokens[qidx].words()
para_lens = []
for rel_didx, did in enumerate(all_docids[qidx]):
start, end = didx2sidx[did2didx[did]]
for sidx in range(start, end):
para_text = s_tokens[sidx].words()
if len(q_text) > 0 and len(para_text) > 0:
examples.append({
'id': (qidx, rel_didx, sidx),
'question': q_text,
# 'qlemma': q_tokens[qidx].lemmas(),
'document': para_text,
'document_char': s_tokens[sidx].chars(),
'question_char': q_tokens[qidx].chars(),
# 'lemma': s_tokens[sidx].lemmas(),
# 'pos': s_tokens[sidx].pos(),
# 'ner': s_tokens[sidx].entities(),
'doc_score': float(all_doc_scores[qidx][rel_didx])
})
# r = {'w': para_text}
# f = open('/tmp/data.json', 'w')
# f.write(json.dumps(r))
# f.close()
# exit(0)
para_lens.append(len(s_tokens[sidx].words()))
# logger.debug('question_p: %s paragraphs: %s' % (queries[qidx], para_lens))
t7 = time.time()
logger.info('paragraphs prepared [time]: %.4f s' % (t7 - t6))
result_handles = []
num_loaders = min(self.max_loaders, int(math.floor(len(examples) / 1e3)))
for batch in self._get_loader(examples, num_loaders):
handle = self.reader.predict(batch, async_pool=self.processes)
result_handles.append((handle, batch[-1], batch[0].size(0)))
t8 = time.time()
logger.info('paragraphs predicted [time]: %.4f s' % (t8 - t7))
# Iterate through the predictions, and maintain priority queues for
# top scored answers for each question in the batch.
queues = [[] for _ in range(len(queries))]
for result, ex_ids, batch_size in result_handles:
s, e, score = result.get()
for i in range(batch_size):
# We take the top prediction per split.
if len(score[i]) > 0:
item = (score[i][0], ex_ids[i], s[i][0], e[i][0])
queue = queues[ex_ids[i][0]]
if len(queue) < top_n:
heapq.heappush(queue, item)
else:
heapq.heappushpop(queue, item)
logger.info('answers processed...')
# Arrange final top prediction data.
all_predictions = []
for queue in queues:
predictions = []
while len(queue) > 0:
score, (qidx, rel_didx, sidx), s, e = heapq.heappop(queue)
prediction = {
'doc_id': all_docids[qidx][rel_didx],
'start': int(s),
'end': int(e),
'span': s_tokens[sidx].slice(s, e + 1).untokenize(),
'doc_score': float(all_doc_scores[qidx][rel_didx]),
'span_score': float(score)
}
if return_context:
prediction['context'] = {
'text': s_tokens[sidx].untokenize(),
'start': s_tokens[sidx].offsets()[s][0],
'end': s_tokens[sidx].offsets()[e][1],
}
predictions.append(prediction)
all_predictions.append(predictions[-1::-1])
logger.info('%d queries processed [time]: %.4f s' %
(len(queries), time.time() - t3))
return all_predictions
def process_batch_et(self, queries, n_docs):
"""Run a batch of queries (more efficient)."""
t3 = time.time()
logger.info('ET Processing %d queries...' % len(queries))
logger.info('ET Retrieving top %d docs...' % n_docs)
# Rank documents for queries.
if len(queries) == 1:
ranked = [self.ranker.closest_docs(queries[0], k=n_docs)]
else:
ranked = self.ranker.batch_closest_docs(queries, k=n_docs, num_workers=self.num_workers)
t4 = time.time()
logger.info('ET docs retrieved [time]: %.4f s' % (t4 - t3))
all_docids, all_doc_scores, all_doc_texts = zip(*ranked)
# Flatten document ids and retrieve text from database.
# We remove duplicates for processing efficiency.
flat_docids, flat_doc_texts = zip(*{(d, t) for doc_ids, doc_texts in zip(all_docids, all_doc_texts)
for d, t in zip(doc_ids, doc_texts)})
# flat_docids = list({d for docids in all_docids for d in docids})
did2didx = {did: didx for didx, did in enumerate(flat_docids)}
# flat_doc_texts = list({t for doc_texts in all_doc_texts for t in doc_texts})
# logger.info('doc_texts for top %d docs extracted' % n_docs)
# Split and flatten documents. Maintain a mapping from doc (index in
# flat list) to split (index in flat list).
flat_splits = []
didx2sidx = []
for text in flat_doc_texts:
splits = self._split_doc(text)
didx2sidx.append([len(flat_splits), -1])
for split in splits:
flat_splits.append(split)
didx2sidx[-1][1] = len(flat_splits)
t5 = time.time()
logger.debug('ET doc_texts flattened')
# Push through the tokenizers as fast as possible.
q_tokens = self.processes.map_async(tokenize_text, queries)
s_tokens = self.processes.map_async(tokenize_text, flat_splits)
q_tokens = q_tokens.get()
s_tokens = s_tokens.get()
# logger.info('q_tokens: %s' % q_tokens)
# logger.info('s_tokens: %s' % s_tokens)
t6 = time.time()
logger.info('ET doc texts tokenized [time]: %.4f s' % (t6 - t5))
# Group into structured example inputs. Examples' ids represent
# mappings to their question, document, and split ids.
examples = []
q_text = q_tokens[0].words()
para_lens = []
for rel_didx, did in enumerate(all_docids[0]):
start, end = didx2sidx[did2didx[did]]
for sidx in range(start, end):
para_text = s_tokens[sidx].words()
if len(q_text) > 0 and len(para_text) > 10:
examples.append({
'id': (rel_didx, sidx),
'question': q_text,
'qlemma': q_tokens[0].lemmas(),
'document': para_text,
'document_char': s_tokens[sidx].chars(),
'question_char': q_tokens[0].chars(),
# 'lemma': s_tokens[sidx].lemmas(),
# 'pos': s_tokens[sidx].pos(),
# 'ner': s_tokens[sidx].entities(),
'doc_score': float(all_doc_scores[0][rel_didx])
})
para_lens.append(len(para_text))
logger.debug('question_p: %s paragraphs: %s' % (queries[0], para_lens))
t7 = time.time()
logger.info('paragraphs prepared [time]: %.4f s' % (t7 - t6))
num_loaders = min(self.max_loaders, int(math.floor(len(examples) / 1e3)))
all_predictions = []
predictions = []
all_a_scores = []
all_p_scores = []
all_spans = []
all_a_z_scores = []
processed_count = 0
repeats = 0
for batch in self._get_loader(examples, num_loaders):
handle = self.reader.predict(batch, async_pool=self.processes)
starts, ends, ans_scores = handle.get()
starts = [s[0] for s in starts]
ends = [e[0] for e in ends]
ans_scores = [float(a[0]) for a in ans_scores]
all_a_scores.extend(ans_scores)
doc_ids = [all_docids[0][ids_[0]] for ids_ in batch[-1]]
doc_scores = [float(all_doc_scores[0][ids_[0]]) for ids_ in batch[-1]]
sids = [ids_[1] for ids_ in batch[-1]]
all_p_scores.extend(doc_scores)
f_d = (doc_ids, sids)
f_score = (all_p_scores, all_a_scores, all_a_z_scores)
f_ans = (starts, ends, ans_scores)
processed_count += len(ans_scores)
stop, batch_predictions, repeats_, all_spans_, all_a_z_scores_ = self.batch_predict_stop(f_d, f_score,
f_ans, s_tokens,
repeats, all_spans,
processed_count)
all_spans = all_spans_
all_a_z_scores = all_a_z_scores_
repeats = repeats_
predictions.extend(batch_predictions)
if stop:
break
else:
continue
t8 = time.time()
logger.info('paragraphs predicted [time]: %.4f s' % (t8 - t7))
all_predictions.append(predictions[-1::-1])
logger.info('%d queries processed [time]: %.4f s' %
(len(queries), time.time() - t3))
return all_predictions
def batch_predict_stop(self, f_d, f_s, f_a, s_tokens, repeats, all_spans, p_count=None):
doc_ids, sids = f_d
all_p_scores, all_a_scores, all_a_z_scores = f_s
sample_mean = np.mean(all_a_scores)
sample_std = np.std(all_a_scores)
starts, ends, ans_scores = f_a
batch_size = len(doc_ids)
# all_s_p, all_np, all_na = f_s
doc_scores, ans_scores = all_p_scores[-batch_size:], all_a_scores[-batch_size:]
predictions_ = []
should_stop = False
for i, item in enumerate(zip(doc_ids, sids, doc_scores, starts, ends, ans_scores)):
doc_id, sid, doc_score, start, end, a_score = item
span = s_tokens[sid].slice(start, end + 1).untokenize()
prediction = {
'doc_id': doc_id,
'start': int(start),
'end': int(end),
'span': span,
'doc_score': doc_score,
'span_score': a_score,
}
predictions_.append(prediction)
if span in all_spans:
repeats += 1
all_spans.append(span)
# repeats_2 = 1 if repeats == 2 else 0
# repeats_3 = 1 if repeats == 3 else 0
# repeats_4 = 1 if repeats == 4 else 0
# repeats_5 = 1 if repeats >= 5 else 0
past20 = 1 if i + p_count >= 20 else 0
if len(all_a_scores) <= 1: # don't use a_z_score feature at the beginning
a_z_score = 0
else:
a_z_score = (a_score - sample_mean) / sample_std
all_a_z_scores.append(a_z_score)
max_z_score = max(all_a_z_scores)
x = np.array([max_z_score, a_score, doc_score, repeats, past20])
et_prob = self.et_model.predict_instance(x)
if et_prob > self.et_threshold:
should_stop = True
break
return should_stop, predictions_, repeats, all_spans, all_a_z_scores
| [
"logging.getLogger",
"numpy.mean",
"heapq.heappushpop",
"numpy.std",
"regex.split",
"numpy.array",
"heapq.heappop",
"multiprocessing.Pool",
"torch.utils.data.DataLoader",
"multiprocessing.util.Finalize",
"heapq.heappush",
"time.time"
] | [((603, 630), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (620, 630), False, 'import logging\n'), ((1087, 1148), 'multiprocessing.util.Finalize', 'Finalize', (['PROCESS_TOK', 'PROCESS_TOK.shutdown'], {'exitpriority': '(100)'}), '(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)\n', (1095, 1148), False, 'from multiprocessing.util import Finalize\n'), ((3214, 3225), 'time.time', 'time.time', ([], {}), '()\n', (3223, 3225), False, 'import time\n'), ((3382, 3393), 'time.time', 'time.time', ([], {}), '()\n', (3391, 3393), False, 'import time\n'), ((3877, 3888), 'time.time', 'time.time', ([], {}), '()\n', (3886, 3888), False, 'import time\n'), ((4457, 4553), 'multiprocessing.Pool', 'ProcessPool', (['num_workers'], {'initializer': 'init', 'initargs': '(tok_class, tok_opts, fixed_candidates)'}), '(num_workers, initializer=init, initargs=(tok_class, tok_opts,\n fixed_candidates))\n', (4468, 4553), True, 'from multiprocessing import Pool as ProcessPool\n'), ((5204, 5228), 'regex.split', 'regex.split', (['"""\\\\n+"""', 'doc'], {}), "('\\\\n+', doc)\n", (5215, 5228), False, 'import regex\n'), ((6005, 6164), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'sampler': 'sampler', 'num_workers': 'num_loaders', 'collate_fn': 'batchify', 'pin_memory': 'self.cuda'}), '(dataset, batch_size=self.batch_size, sampler=\n sampler, num_workers=num_loaders, collate_fn=batchify, pin_memory=self.cuda\n )\n', (6032, 6164), False, 'import torch\n'), ((6975, 6986), 'time.time', 'time.time', ([], {}), '()\n', (6984, 6986), False, 'import time\n'), ((7376, 7387), 'time.time', 'time.time', ([], {}), '()\n', (7385, 7387), False, 'import time\n'), ((8581, 8592), 'time.time', 'time.time', ([], {}), '()\n', (8590, 8592), False, 'import time\n'), ((9018, 9029), 'time.time', 'time.time', ([], {}), '()\n', (9027, 9029), False, 'import time\n'), ((10742, 10753), 'time.time', 'time.time', ([], {}), '()\n', (10751, 10753), False, 'import time\n'), ((11159, 11170), 'time.time', 'time.time', ([], {}), '()\n', (11168, 11170), False, 'import time\n'), ((13274, 13285), 'time.time', 'time.time', ([], {}), '()\n', (13283, 13285), False, 'import time\n'), ((13681, 13692), 'time.time', 'time.time', ([], {}), '()\n', (13690, 13692), False, 'import time\n'), ((14889, 14900), 'time.time', 'time.time', ([], {}), '()\n', (14898, 14900), False, 'import time\n'), ((15327, 15338), 'time.time', 'time.time', ([], {}), '()\n', (15336, 15338), False, 'import time\n'), ((16671, 16682), 'time.time', 'time.time', ([], {}), '()\n', (16680, 16682), False, 'import time\n'), ((18593, 18604), 'time.time', 'time.time', ([], {}), '()\n', (18602, 18604), False, 'import time\n'), ((19076, 19097), 'numpy.mean', 'np.mean', (['all_a_scores'], {}), '(all_a_scores)\n', (19083, 19097), True, 'import numpy as np\n'), ((19119, 19139), 'numpy.std', 'np.std', (['all_a_scores'], {}), '(all_a_scores)\n', (19125, 19139), True, 'import numpy as np\n'), ((20582, 20642), 'numpy.array', 'np.array', (['[max_z_score, a_score, doc_score, repeats, past20]'], {}), '([max_z_score, a_score, doc_score, repeats, past20])\n', (20590, 20642), True, 'import numpy as np\n'), ((12222, 12242), 'heapq.heappop', 'heapq.heappop', (['queue'], {}), '(queue)\n', (12235, 12242), False, 'import heapq\n'), ((11848, 11875), 'heapq.heappush', 'heapq.heappush', (['queue', 'item'], {}), '(queue, item)\n', (11862, 11875), False, 'import heapq\n'), ((11926, 11956), 'heapq.heappushpop', 'heapq.heappushpop', (['queue', 'item'], {}), '(queue, item)\n', (11943, 11956), False, 'import heapq\n'), ((13105, 13116), 'time.time', 'time.time', ([], {}), '()\n', (13114, 13116), False, 'import time\n'), ((18823, 18834), 'time.time', 'time.time', ([], {}), '()\n', (18832, 18834), False, 'import time\n')] |
import os
import sys
import csv
import json
import math
import enum
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from os import listdir
from os.path import isfile, join
from skimage import measure
from skimage import filters
from scipy import ndimage
class OutputShapeType(enum.Enum):
Constant = 1
Input = 2
Unknown = 3
class HypothesisType(enum.Enum):
SymbolTx = 1
BG_SYMBOL = 0
NUM_SYMBOLS = 10
def get_symbol_cmap_and_norm():
cmap = colors.ListedColormap(
['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
return cmap, norm
# https://towardsdatascience.com/canny-edge-detection-step-by-step-in-python-computer-vision-b49c3a2d8123
def gaussian_kernel(size, sigma=1):
size = int(size) // 2
x, y = np.mgrid[-size:size+1, -size:size+1]
normal = 1 / (2.0 * np.pi * sigma**2)
g = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal
return g
def sobel_filters(img):
Kx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32)
Ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32)
Ix = ndimage.filters.convolve(img, Kx)
Iy = ndimage.filters.convolve(img, Ky)
G = np.hypot(Ix, Iy)
G = G / G.max() * 255
theta = np.arctan2(Iy, Ix)
return (G, theta)
def non_max_suppression(img, D):
M, N = img.shape
Z = np.zeros((M,N), dtype=np.int32)
angle = D * 180. / np.pi
angle[angle < 0] += 180
for i in range(1,M-1):
for j in range(1,N-1):
try:
q = 255
r = 255
#angle 0
if (0 <= angle[i,j] < 22.5) or (157.5 <= angle[i,j] <= 180):
q = img[i, j+1]
r = img[i, j-1]
#angle 45
elif (22.5 <= angle[i,j] < 67.5):
q = img[i+1, j-1]
r = img[i-1, j+1]
#angle 90
elif (67.5 <= angle[i,j] < 112.5):
q = img[i+1, j]
r = img[i-1, j]
#angle 135
elif (112.5 <= angle[i,j] < 157.5):
q = img[i-1, j-1]
r = img[i+1, j+1]
if (img[i,j] >= q) and (img[i,j] >= r):
Z[i,j] = img[i,j]
else:
Z[i,j] = 0
except IndexError as e:
pass
return Z
def plot_one(task,ax, i,train_or_test,input_or_output):
cmap, norm = get_symbol_cmap_and_norm()
input_matrix = task[train_or_test][i][input_or_output]
ax.imshow(input_matrix, cmap=cmap, norm=norm)
ax.grid(True,which='both',color='lightgrey', linewidth=0.5)
ax.set_yticks([x-0.5 for x in range(1+len(input_matrix))])
ax.set_xticks([x-0.5 for x in range(1+len(input_matrix[0]))])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title(train_or_test + ' '+input_or_output)
def plot_ans(ans,ax):
cmap, norm = get_symbol_cmap_and_norm()
ax.imshow(ans, cmap=cmap, norm=norm)
ax.grid(True,which='both',color='lightgrey', linewidth=0.5)
ax.set_yticks([x-0.5 for x in range(1+len(ans))])
ax.set_xticks([x-0.5 for x in range(1+len(ans[0]))])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title('Hypothesis')
def plot_task(task, ans=None):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
num_train = len(task['train'])
print('num_train',num_train)
fig, axs = plt.subplots(2, num_train, figsize=(3*num_train,3*2))
for i in range(num_train):
plot_one(task,axs[0,i],i,'train','input')
plot_one(task,axs[1,i],i,'train','output')
plt.tight_layout()
plt.show()
num_test = len(task['test'])
print('num_test',num_test)
num_subplots = 2
if ans is not None:
num_subplots = 3
fig, axs = plt.subplots(num_subplots, num_test, figsize=(3*num_test,3*num_subplots))
if num_test==1:
plot_one(task,axs[0],0,'test','input')
plot_one(task,axs[1],0,'test','output')
else:
for i in range(num_test):
plot_one(task,axs[0,i],i,'test','input')
plot_one(task,axs[1,i],i,'test','output')
if ans is not None:
plot_ans(ans,axs[2])
plt.tight_layout()
plt.show()
def plot_components(symbols, component_labels, bb_image):
# https://matplotlib.org/3.1.3/api/_as_gen/matplotlib.pyplot.subplot.html
# Either a 3-digit integer or three separate integers describing the position of the subplot.
# If the three integers are nrows, ncols, and index in order, the subplot will take the index
# position on a grid with nrows rows and ncols columns. index starts at 1 in the upper
# left corner and increases to the right.
cmap, norm = get_symbol_cmap_and_norm()
plt.figure(figsize=(9, 3.5))
ax1 = plt.subplot(131)
plt.imshow(symbols, cmap=cmap, norm=norm)
ax1.title.set_text('symbols')
plt.axis('off')
ax2 = plt.subplot(132)
plt.imshow(component_labels, cmap='nipy_spectral')
ax2.title.set_text('all labels')
plt.axis('off')
ax3 = plt.subplot(133)
plt.imshow(bb_image, cmap='nipy_spectral')
ax3.title.set_text('bounding boxes')
plt.axis('off')
plt.tight_layout()
plt.show()
def find_output_shape(input_shapes,output_shapes):
num_shapes = len(input_shapes)
assert(num_shapes > 0)
# constant shape
h0 = output_shapes[0][0]
w0 = output_shapes[0][1]
# all hypotheses are true until proven false
constant_shape = True
input_shape = True
for i in range(0,num_shapes):
h = output_shapes[i][0]
w = output_shapes[i][1]
if (h != h0) or (w != w0):
constant_shape = False
#print('w/h',w,h)
hi = input_shapes[i][0]
wi = input_shapes[i][1]
if (h != hi) or (w != wi):
input_shape = False
if constant_shape:
return OutputShapeType.Constant, None
elif input_shape:
return OutputShapeType.Input, None
return OutputShapeType.Unknown, None
def get_percentage(n, total):
return 100.0*(float(n)/float(total))
# Colours
# 0 and 5 seem to have special meanings. 0 is background.
# 5 may be some sort of separator structure.
# How preserved is this?
# 0 1 2 3 4
# ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00',
# 5 6 7 8 9
# '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
def rgb_2_grey(rgb):
"""A "grid" is a rectangular matrix (list of lists) of integers between 0 and 9 (inclusive).
The smallest possible grid size is 1x1 and the largest is 30x30."""
# symbol (integer between 0 and 9, which are visualized as colors).
#rgb_weights = [0.2989, 0.5870, 0.1140]
rgb_weights = [0.333, 0.333, 0.333]
grey = np.dot(rgb[...,:3], rgb_weights)
return grey
def symbol_2_grey(symbols):
"""A "grid" is a rectangular matrix (list of lists) of integers between 0 and 9 (inclusive).
The smallest possible grid size is 1x1 and the largest is 30x30."""
# symbol (integer between 0 and 9, which are visualized as colors).
# all symbol values are equally different. So to convert, make a series of masks.
# e.g. * --> 1
for symbol in range(0,9):
is_true = 1.0
is_false = 0.0
x = symbols.where(symbols == 0, is_true, is_false)
def symbol_2_edges(symbols):
# 0 1 2
# a b
# 0 1 2 3
# a b c
h = symbols.shape[0]
w = symbols.shape[1]
eh = h + h -1
ew = w + w -1
edges = np.zeros((eh,ew))
for y in range(0,h):
for x in range(0,w):
#is_edge = 0.0
s = symbols[y][x]
for dy in range(-1,2):
for dx in range(-1,2):
y2 = y+dy
x2 = x+dx
if (x2 == x) and (y2 == y):
continue # Non edge
if (y2 < 0) or (x2 < 0) or (y2 >= h) or (x2 >= w):
continue # ignore image edges - non edge
s2 = symbols[y2][x2]
if s2 != s:
#is_edge = 1.0
# 1 2 < 3*2=6
# 1 2 < 2*2=4
# 0 1 2 3 4 5
# a b c d e
# 0123456789
ey = y * 2 + dy
ex = x * 2 + dx
edges[ey][ex] = 1.0
return edges
def find_density(symbols):
h = symbols.shape[0]
w = symbols.shape[1]
mass = 0.0
for y in range(0,h):
for x in range(0,w):
#is_edge = 0.0
s = symbols[y][x]
if s != 0:
mass += 1
area = h * w
density = mass / area
return density
def find_bounding_boxes(component_labels, num_labels):
bounding_boxes = {}
bb_image = np.zeros(component_labels.shape)
h = component_labels.shape[0]
w = component_labels.shape[1]
mass = 0.0
symbols = []
for y in range(0,h):
for x in range(0,w):
label_value = component_labels[y][x]
# if label_value == 0:
# print('has bg')
# has_background = True
if label_value in bounding_boxes.keys():
bounding_box = bounding_boxes[label_value]
x_min = bounding_box[0]
y_min = bounding_box[1]
x_max = bounding_box[2]
y_max = bounding_box[3]
x_min = min(x,x_min)
y_min = min(y,y_min)
x_max = max(x,x_max)
y_max = max(y,y_max)
bounding_box[0] = x_min
bounding_box[1] = y_min
bounding_box[2] = x_max
bounding_box[3] = y_max
else:
symbols.append(label_value)
bounding_box = [x,y,x,y]
bounding_boxes[label_value] = bounding_box
# if has_background:
# num_labels += 1
print('all BBs ', bounding_boxes)
num_symbols = len(symbols)
for i in range(0, num_symbols):
label = symbols[i]
if label == 0:
continue # don't draw
bounding_box = bounding_boxes[label]
#print('bb of label', label, bounding_box)
x_min = bounding_box[0]
y_min = bounding_box[1]
x_max = bounding_box[2]
y_max = bounding_box[3]
bw = x_max - x_min +1
bh = y_max - y_min +1
for x in range(0,bw):
bb_image[y_min][x+x_min] = 1.0
bb_image[y_max][x+x_min] = 1.0
for y in range(0,bh):
bb_image[y+y_min][x_min] = 1.0
bb_image[y+y_min][x_max] = 1.0
return bounding_boxes, bb_image
def find_symmetry(image):
plt.figure()
plt.imshow(image, cmap='gray')
plt.tight_layout()
plt.show()
class Hypothesis:
def __init__(self):
pass
def apply(self, example_input, output_shape_type, output_shape):
output = np.zeros(output_shape)
return output
class SymbolTxHypo(Hypothesis):
def __init__(self, s1, s2):
self.s1 = s1
self.s2 = s2
def apply(self, example_input, output_shape_type, output_shape):
if output_shape_type != OutputShapeType.Input:
print('shape mismatch')
return
output = np.zeros(example_input.shape)
h = example_input.shape[0]
w = example_input.shape[1]
for y in range(0,h):
for x in range(0,w):
s1 = example_input[y][x]
s2 = s1
if s1 == float(self.s1):
#print('$$$', self.s2)
s2 = int(self.s2)
output[y][x] = s2
return output
def evaluate_output(example_output, hypo_output):
errors = 0
h = example_output.shape[0]
w = example_output.shape[1]
if hypo_output is None:
return h*w # all wrong
for y in range(0,h):
for x in range(0,w):
s1 = example_output[y][x]
s2 = hypo_output[y][x]
if s1 != s2:
errors += 1
return errors
def find_hypothesis(train, test, output_shape_type, output_shape):
hypo_type = HypothesisType.SymbolTx
# Build hypotheses
hypos = []
for s1 in range(0,NUM_SYMBOLS):
for s2 in range(0,NUM_SYMBOLS):
hypo = SymbolTxHypo(s1, s2)
#print('*******************',hypo.s1, hypo.s2)
hypos.append(hypo)
# Evaluate hypotheses
num_hypotheses = len(hypos)
num_train_examples = len(train)
best_hypo_errors = 0
best_hypo = None
for h in range(0,num_hypotheses):
hypo = hypos[h]
hypo_errors = 0
for e in range(0,num_train_examples):
example_input = train[e]['input']
example_input = np.array(example_input)
example_output = train[e]['output']
example_output = np.array(example_output)
hypo_output = hypo.apply(example_input, output_shape_type, output_shape)
#print('hypo:',hypo_output)
errors = evaluate_output(example_output, hypo_output)
hypo_errors += errors
#print('- - -> Train Eg Errors ', errors)
if (best_hypo is None) or (hypo_errors < best_hypo_errors):
best_hypo = hypo
best_hypo_errors = hypo_errors
#print('-----> Train Errors for H', h,'are',hypo_errors, 'best=', best_hypo_errors)
# Keep the simplest hypo that has no error, or failing that, with min error
test_input = test[0]['input']
test_input = np.array(test_input)
test_output = test[0]['output']
test_output = np.array(test_output)
hypo_output = best_hypo.apply(test_input, output_shape_type, output_shape)
test_errors = evaluate_output(test_output, hypo_output)
print('=====> Test Errors ', test_errors)
return hypo_output, test_errors
def process_file(file_path, do_plot=False, do_hypo=False, e_sel=None):
print('Reading file: ', file_path)
with open(file_path) as json_file:
js = json.load(json_file)
#print(js)
if do_plot:
plot_task(js)
# https://scipy-lectures.org/packages/scikit-image/auto_examples/plot_labels.html
example = 2
train = js['train']
test = js['test']
num_train_examples = len(train)
input_shapes = []
output_shapes = []
sum_densities = 0.0
for e in range(0,num_train_examples):
if e_sel is not None:
if e != e_sel:
continue
example_input = train[e]['input']
#print('example_input', example_input)
example_input = np.array(example_input)
input_shapes.append(example_input.shape)
print('example_input.shape', example_input.shape)
density = find_density(example_input)
sum_densities += density
#find_symmetry(example_input)
#edges = symbol_2_edges(example_input)
#find_symmetry(edges)
example_output = train[e]['output']
#print('example_output', example_output)
example_output = np.array(example_output)
print('output.shape', example_output.shape)
output_shapes.append(example_output.shape)
# https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.label
#connectivity = 1
connectivity = 2
# TODO separate objects by colour
# https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.label
# "background: Consider all pixels with this value as background pixels, and label them as 0."
#all_labels = measure.label(example_input, connectivity=connectivity)
# Returns: Labeled array, where all connected regions are assigned the same integer value.
component_labels, num_labels = measure.label(example_input, connectivity=connectivity, background=0, return_num=True)
bounding_boxes, bb_image = find_bounding_boxes(component_labels, num_labels)
if False: #do_plot:
plot_components(example_input, component_labels, bb_image)
output_shape_type, output_shape = find_output_shape(input_shapes,output_shapes)
mean_density = sum_densities / float(num_train_examples)
correct = False
if do_hypo:
if output_shape_type != OutputShapeType.Unknown:
hypo_output, test_errors = find_hypothesis(train, test, output_shape_type, output_shape)
#plot_task(js,hypo_output)
if test_errors == 0:
correct = True
return output_shape_type, mean_density, correct
# Priors:
# Connected components
# Channels == symbol value
# 0 = background = free space
# Bounding boxes
# Area of components
# Centroids of components
# https://www.kaggle.com/boliu0/visualizing-all-task-pairs-with-gridlines/notebook
print('hello world')
file_path = './data/training/b1948b0a.json'
#file_path = './data/training/ff28f65a.json'
#data_dir = sys.argv[1]
data_dir = './data/training'
files = [f for f in listdir(data_dir) if isfile(join(data_dir, f))]
num_files = len(files)
num_constant = 0
num_input = 0
num_unknown = 0
num_correct = 0
densities = []
density_threshold = 0.7
do_hypo = True
for i in range(0, num_files):
file_name = files[i]
file_path = os.path.join(data_dir,file_name)
output_shape_type, density, correct = process_file(file_path, do_hypo=do_hypo)
densities.append(density)
if correct:
num_correct += 1
# if density >= density_threshold:
# process_file(file_path, do_plot=True, do_hypo=do_hypo) # debug it
if output_shape_type == OutputShapeType.Constant:
num_constant += 1
elif output_shape_type == OutputShapeType.Input:
num_input += 1
else:
num_unknown += 1
density_bins = 30
plt.hist(densities, bins = density_bins)
plt.show()
total = num_constant + num_input + num_unknown
print('Constant:', num_constant, get_percentage(num_constant, total),'%')
print('Input:', num_input, get_percentage(num_input, total),'%')
print('Unknown:', num_unknown, get_percentage(num_unknown, total),'%')
print('Correct:', num_correct, get_percentage(num_correct, total),'%')
| [
"matplotlib.pyplot.hist",
"numpy.array",
"numpy.arctan2",
"matplotlib.pyplot.imshow",
"os.listdir",
"matplotlib.colors.ListedColormap",
"numpy.exp",
"numpy.dot",
"matplotlib.pyplot.axis",
"numpy.hypot",
"scipy.ndimage.filters.convolve",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.show"... | [((16786, 16824), 'matplotlib.pyplot.hist', 'plt.hist', (['densities'], {'bins': 'density_bins'}), '(densities, bins=density_bins)\n', (16794, 16824), True, 'import matplotlib.pyplot as plt\n'), ((16827, 16837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16835, 16837), True, 'import matplotlib.pyplot as plt\n'), ((491, 628), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00', '#AAAAAA',\n '#F012BE', '#FF851B', '#7FDBFF', '#870C25']"], {}), "(['#000000', '#0074D9', '#FF4136', '#2ECC40',\n '#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])\n", (512, 628), False, 'from matplotlib import colors\n'), ((645, 677), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(9)'}), '(vmin=0, vmax=9)\n', (661, 677), False, 'from matplotlib import colors\n'), ((1064, 1122), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]', 'np.float32'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32)\n', (1072, 1122), True, 'import numpy as np\n'), ((1132, 1190), 'numpy.array', 'np.array', (['[[1, 2, 1], [0, 0, 0], [-1, -2, -1]]', 'np.float32'], {}), '([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32)\n', (1140, 1190), True, 'import numpy as np\n'), ((1205, 1238), 'scipy.ndimage.filters.convolve', 'ndimage.filters.convolve', (['img', 'Kx'], {}), '(img, Kx)\n', (1229, 1238), False, 'from scipy import ndimage\n'), ((1248, 1281), 'scipy.ndimage.filters.convolve', 'ndimage.filters.convolve', (['img', 'Ky'], {}), '(img, Ky)\n', (1272, 1281), False, 'from scipy import ndimage\n'), ((1295, 1311), 'numpy.hypot', 'np.hypot', (['Ix', 'Iy'], {}), '(Ix, Iy)\n', (1303, 1311), True, 'import numpy as np\n'), ((1350, 1368), 'numpy.arctan2', 'np.arctan2', (['Iy', 'Ix'], {}), '(Iy, Ix)\n', (1360, 1368), True, 'import numpy as np\n'), ((1459, 1491), 'numpy.zeros', 'np.zeros', (['(M, N)'], {'dtype': 'np.int32'}), '((M, N), dtype=np.int32)\n', (1467, 1491), True, 'import numpy as np\n'), ((3640, 3698), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', 'num_train'], {'figsize': '(3 * num_train, 3 * 2)'}), '(2, num_train, figsize=(3 * num_train, 3 * 2))\n', (3652, 3698), True, 'import matplotlib.pyplot as plt\n'), ((3831, 3849), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3847, 3849), True, 'import matplotlib.pyplot as plt\n'), ((3852, 3862), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3860, 3862), True, 'import matplotlib.pyplot as plt\n'), ((4013, 4091), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_subplots', 'num_test'], {'figsize': '(3 * num_test, 3 * num_subplots)'}), '(num_subplots, num_test, figsize=(3 * num_test, 3 * num_subplots))\n', (4025, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4388, 4406), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4404, 4406), True, 'import matplotlib.pyplot as plt\n'), ((4409, 4419), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4417, 4419), True, 'import matplotlib.pyplot as plt\n'), ((4927, 4955), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 3.5)'}), '(figsize=(9, 3.5))\n', (4937, 4955), True, 'import matplotlib.pyplot as plt\n'), ((4964, 4980), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (4975, 4980), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5024), 'matplotlib.pyplot.imshow', 'plt.imshow', (['symbols'], {'cmap': 'cmap', 'norm': 'norm'}), '(symbols, cmap=cmap, norm=norm)\n', (4993, 5024), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5074), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5067, 5074), True, 'import matplotlib.pyplot as plt\n'), ((5083, 5099), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (5094, 5099), True, 'import matplotlib.pyplot as plt\n'), ((5102, 5152), 'matplotlib.pyplot.imshow', 'plt.imshow', (['component_labels'], {'cmap': '"""nipy_spectral"""'}), "(component_labels, cmap='nipy_spectral')\n", (5112, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5190, 5205), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5198, 5205), True, 'import matplotlib.pyplot as plt\n'), ((5214, 5230), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (5225, 5230), True, 'import matplotlib.pyplot as plt\n'), ((5233, 5275), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bb_image'], {'cmap': '"""nipy_spectral"""'}), "(bb_image, cmap='nipy_spectral')\n", (5243, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5317, 5332), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5325, 5332), True, 'import matplotlib.pyplot as plt\n'), ((5336, 5354), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5352, 5354), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5367), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5365, 5367), True, 'import matplotlib.pyplot as plt\n'), ((6863, 6896), 'numpy.dot', 'np.dot', (['rgb[..., :3]', 'rgb_weights'], {}), '(rgb[..., :3], rgb_weights)\n', (6869, 6896), True, 'import numpy as np\n'), ((7560, 7578), 'numpy.zeros', 'np.zeros', (['(eh, ew)'], {}), '((eh, ew))\n', (7568, 7578), True, 'import numpy as np\n'), ((8668, 8700), 'numpy.zeros', 'np.zeros', (['component_labels.shape'], {}), '(component_labels.shape)\n', (8676, 8700), True, 'import numpy as np\n'), ((10303, 10315), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10313, 10315), True, 'import matplotlib.pyplot as plt\n'), ((10318, 10348), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (10328, 10348), True, 'import matplotlib.pyplot as plt\n'), ((10351, 10369), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10367, 10369), True, 'import matplotlib.pyplot as plt\n'), ((10372, 10382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10380, 10382), True, 'import matplotlib.pyplot as plt\n'), ((12844, 12864), 'numpy.array', 'np.array', (['test_input'], {}), '(test_input)\n', (12852, 12864), True, 'import numpy as np\n'), ((12915, 12936), 'numpy.array', 'np.array', (['test_output'], {}), '(test_output)\n', (12923, 12936), True, 'import numpy as np\n'), ((16303, 16336), 'os.path.join', 'os.path.join', (['data_dir', 'file_name'], {}), '(data_dir, file_name)\n', (16315, 16336), False, 'import os\n'), ((966, 1015), 'numpy.exp', 'np.exp', (['(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))'], {}), '(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))\n', (972, 1015), True, 'import numpy as np\n'), ((10514, 10536), 'numpy.zeros', 'np.zeros', (['output_shape'], {}), '(output_shape)\n', (10522, 10536), True, 'import numpy as np\n'), ((10828, 10857), 'numpy.zeros', 'np.zeros', (['example_input.shape'], {}), '(example_input.shape)\n', (10836, 10857), True, 'import numpy as np\n'), ((13305, 13325), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (13314, 13325), False, 'import json\n'), ((13816, 13839), 'numpy.array', 'np.array', (['example_input'], {}), '(example_input)\n', (13824, 13839), True, 'import numpy as np\n'), ((14226, 14250), 'numpy.array', 'np.array', (['example_output'], {}), '(example_output)\n', (14234, 14250), True, 'import numpy as np\n'), ((14911, 15001), 'skimage.measure.label', 'measure.label', (['example_input'], {'connectivity': 'connectivity', 'background': '(0)', 'return_num': '(True)'}), '(example_input, connectivity=connectivity, background=0,\n return_num=True)\n', (14924, 15001), False, 'from skimage import measure\n'), ((16047, 16064), 'os.listdir', 'listdir', (['data_dir'], {}), '(data_dir)\n', (16054, 16064), False, 'from os import listdir\n'), ((12139, 12162), 'numpy.array', 'np.array', (['example_input'], {}), '(example_input)\n', (12147, 12162), True, 'import numpy as np\n'), ((12229, 12253), 'numpy.array', 'np.array', (['example_output'], {}), '(example_output)\n', (12237, 12253), True, 'import numpy as np\n'), ((16075, 16092), 'os.path.join', 'join', (['data_dir', 'f'], {}), '(data_dir, f)\n', (16079, 16092), False, 'from os.path import isfile, join\n')] |
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pyDeltaRCM
# filter out the warning raised about no netcdf being found
warnings.filterwarnings("ignore", category=UserWarning)
n = 10
cm = matplotlib.cm.get_cmap('tab10')
# init delta model
with pyDeltaRCM.shared_tools._docs_temp_directory() as output_dir:
delta = pyDeltaRCM.DeltaModel(
out_dir=output_dir)
delta_later = pyDeltaRCM.DeltaModel(
out_dir=output_dir,
resume_checkpoint='../../_resources/checkpoint')
_shp = delta_later.eta.shape
# manually call only the necessary paths
delta.init_water_iteration()
delta.run_water_iteration()
delta_later.init_water_iteration()
delta_later.run_water_iteration()
# define a function to fill in the walks of given idx
def _plot_idxs_walks_to_step(delta_inds, _step, _idxs, _ax) -> None:
for i in range(len(_idxs)):
iidx = _idxs[i]
walk = delta_inds[iidx, :]
walk = walk[:_step]
pyDeltaRCM.debug_tools.plot_line(
walk, shape=_shp, color=cm(i),
nozeros=True)
yend, xend = pyDeltaRCM.shared_tools.custom_unravel(
walk[-1], _shp)
_ax.plot(xend, yend,
marker='o', ms=3, color=cm(i))
# declare the idxs to use:
idxs = np.random.randint(low=0, high=delta._Np_water, size=n)
ps = [5, 25, 75]
# set up axis
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True, figsize=(12, 4))
vmin, vmax = delta.eta.min(), delta.eta.max()
# fill in axis0
pyDeltaRCM.debug_tools.plot_domain(
delta.eta, ax=ax[0, 0], grid=False, cmap='cividis')
_plot_idxs_walks_to_step(
delta.free_surf_walk_inds, _step=ps[0], _idxs=idxs, _ax=ax[0, 0])
ax[0, 0].set_title('after {} steps'.format(ps[0]))
pyDeltaRCM.debug_tools.plot_domain(
delta_later.eta, ax=ax[1, 0], grid=False, vmin=vmin, vmax=vmax, cmap='cividis')
_plot_idxs_walks_to_step(
delta_later.free_surf_walk_inds, _step=ps[0], _idxs=idxs, _ax=ax[1, 0])
# ax[1, 0].set_title('after {} steps'.format(ps[0]))
# fill in axis1
pyDeltaRCM.debug_tools.plot_domain(
delta.eta, ax=ax[0, 1], grid=False, cmap='cividis')
_plot_idxs_walks_to_step(
delta.free_surf_walk_inds, _step=ps[1], _idxs=idxs, _ax=ax[0, 1])
ax[0, 1].set_title('after {} steps'.format(ps[1]))
pyDeltaRCM.debug_tools.plot_domain(
delta_later.eta, ax=ax[1, 1], grid=False, vmin=vmin, vmax=vmax, cmap='cividis')
_plot_idxs_walks_to_step(
delta_later.free_surf_walk_inds, _step=ps[1], _idxs=idxs, _ax=ax[1, 1])
# ax[1, 1].set_title('after {} steps'.format(ps[1]))
# fill in axis2
pyDeltaRCM.debug_tools.plot_domain(
delta.eta, ax=ax[0, 2], grid=False, cmap='cividis')
_plot_idxs_walks_to_step(
delta.free_surf_walk_inds, _step=ps[2], _idxs=idxs, _ax=ax[0, 2])
ax[0, 2].set_title('after {} steps'.format(ps[2]))
pyDeltaRCM.debug_tools.plot_domain(
delta_later.eta, ax=ax[1, 2], grid=False, vmin=vmin, vmax=vmax, cmap='cividis')
_plot_idxs_walks_to_step(
delta_later.free_surf_walk_inds, _step=ps[2], _idxs=idxs, _ax=ax[1, 2])
# ax[1, 3].set_title('after {} steps'.format(ps[3]))
plt.tight_layout()
plt.show()
| [
"warnings.filterwarnings",
"pyDeltaRCM.DeltaModel",
"matplotlib.cm.get_cmap",
"pyDeltaRCM.debug_tools.plot_domain",
"pyDeltaRCM.shared_tools.custom_unravel",
"numpy.random.randint",
"pyDeltaRCM.shared_tools._docs_temp_directory",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"mat... | [((167, 222), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (190, 222), False, 'import warnings\n'), ((237, 268), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (259, 268), False, 'import matplotlib\n'), ((1305, 1359), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'delta._Np_water', 'size': 'n'}), '(low=0, high=delta._Np_water, size=n)\n', (1322, 1359), True, 'import numpy as np\n'), ((1402, 1463), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(12, 4)'}), '(2, 3, sharex=True, sharey=True, figsize=(12, 4))\n', (1414, 1463), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1618), 'pyDeltaRCM.debug_tools.plot_domain', 'pyDeltaRCM.debug_tools.plot_domain', (['delta.eta'], {'ax': 'ax[0, 0]', 'grid': '(False)', 'cmap': '"""cividis"""'}), "(delta.eta, ax=ax[0, 0], grid=False, cmap\n ='cividis')\n", (1561, 1618), False, 'import pyDeltaRCM\n'), ((1766, 1884), 'pyDeltaRCM.debug_tools.plot_domain', 'pyDeltaRCM.debug_tools.plot_domain', (['delta_later.eta'], {'ax': 'ax[1, 0]', 'grid': '(False)', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': '"""cividis"""'}), "(delta_later.eta, ax=ax[1, 0], grid=False,\n vmin=vmin, vmax=vmax, cmap='cividis')\n", (1800, 1884), False, 'import pyDeltaRCM\n'), ((2059, 2150), 'pyDeltaRCM.debug_tools.plot_domain', 'pyDeltaRCM.debug_tools.plot_domain', (['delta.eta'], {'ax': 'ax[0, 1]', 'grid': '(False)', 'cmap': '"""cividis"""'}), "(delta.eta, ax=ax[0, 1], grid=False, cmap\n ='cividis')\n", (2093, 2150), False, 'import pyDeltaRCM\n'), ((2298, 2416), 'pyDeltaRCM.debug_tools.plot_domain', 'pyDeltaRCM.debug_tools.plot_domain', (['delta_later.eta'], {'ax': 'ax[1, 1]', 'grid': '(False)', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': '"""cividis"""'}), "(delta_later.eta, ax=ax[1, 1], grid=False,\n vmin=vmin, vmax=vmax, cmap='cividis')\n", (2332, 2416), False, 'import pyDeltaRCM\n'), ((2591, 2682), 'pyDeltaRCM.debug_tools.plot_domain', 'pyDeltaRCM.debug_tools.plot_domain', (['delta.eta'], {'ax': 'ax[0, 2]', 'grid': '(False)', 'cmap': '"""cividis"""'}), "(delta.eta, ax=ax[0, 2], grid=False, cmap\n ='cividis')\n", (2625, 2682), False, 'import pyDeltaRCM\n'), ((2830, 2948), 'pyDeltaRCM.debug_tools.plot_domain', 'pyDeltaRCM.debug_tools.plot_domain', (['delta_later.eta'], {'ax': 'ax[1, 2]', 'grid': '(False)', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': '"""cividis"""'}), "(delta_later.eta, ax=ax[1, 2], grid=False,\n vmin=vmin, vmax=vmax, cmap='cividis')\n", (2864, 2948), False, 'import pyDeltaRCM\n'), ((3107, 3125), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3123, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3136), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3134, 3136), True, 'import matplotlib.pyplot as plt\n'), ((295, 341), 'pyDeltaRCM.shared_tools._docs_temp_directory', 'pyDeltaRCM.shared_tools._docs_temp_directory', ([], {}), '()\n', (339, 341), False, 'import pyDeltaRCM\n'), ((369, 410), 'pyDeltaRCM.DeltaModel', 'pyDeltaRCM.DeltaModel', ([], {'out_dir': 'output_dir'}), '(out_dir=output_dir)\n', (390, 410), False, 'import pyDeltaRCM\n'), ((439, 534), 'pyDeltaRCM.DeltaModel', 'pyDeltaRCM.DeltaModel', ([], {'out_dir': 'output_dir', 'resume_checkpoint': '"""../../_resources/checkpoint"""'}), "(out_dir=output_dir, resume_checkpoint=\n '../../_resources/checkpoint')\n", (460, 534), False, 'import pyDeltaRCM\n'), ((1124, 1178), 'pyDeltaRCM.shared_tools.custom_unravel', 'pyDeltaRCM.shared_tools.custom_unravel', (['walk[-1]', '_shp'], {}), '(walk[-1], _shp)\n', (1162, 1178), False, 'import pyDeltaRCM\n')] |
import matplotlib as mpl
import uproot3 as uproot
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import scipy
import numpy as np
import math
import pandas as pd
import seaborn as sns
import mplhep as hep
#import zfit
import inspect
import sys
import argparse
from concurrent.futures import ThreadPoolExecutor
plt.style.use(hep.style.ATLAS)
plt.rcParams.update({'font.sans-serif': "Arial",
'font.family': "sans-serif",
'font.size': 30,
'mathtext.fontset': 'custom',
'mathtext.rm': 'Arial',
})
import EICAnalysisTools as eat
# Computational Functions
def IPSignificance(row):
JetPt = row["Jet.PT"]
JetEta = row["Jet.Eta"]
JetPhi = row["Jet.Phi"]
JetConstituents = row["Jet.Particles"]
TrackPt = row["Track.PT"]
TrackEta = row["Track.Eta"]
TrackPhi = row["Track.Phi"]
TrackD0 = row["Track.D0"]
TrackErrD0 = row["Track.ErrorD0"]
TrackDZ = row["Track.DZ"]
TrackErrDZ = row["Track.ErrorDZ"]
TrackUID = row["Track.fUniqueID"]
TrackXd = row["Track.Xd"]
TrackYd = row["Track.Yd"]
TrackZd = row["Track.Zd"]
#TrackParentFlavor = np.zeros(len(TrackPt))
JetTrackIPs = []
for jet in JetPt:
JetTrackIPs.append([])
for jet in range(len(JetPt)):
jet_eta = JetEta[jet]
if np.abs(jet_eta) > 3.5:
continue
jet_phi = JetPhi[jet]
jet_pt = JetPt[jet]
track_ips = []
for constituent in JetConstituents[jet]:
track = -1
print(constituent)
print(TrackUID)
if constituent in TrackUID:
track = TrackUID.index(constituent)
else:
continue
track_pt = TrackPt[track]
if track_pt < 1.0:
continue
deltaR = np.sqrt( (TrackEta[track] - JetEta[jet])**2 + (TrackPhi[track] - JetPhi[jet])**2 )
if deltaR > 0.5:
continue
jpx = jet_pt*math.cos(jet_phi)
jpy = jet_pt*math.sin(jet_phi)
jpz = jet_pt*math.sinh(jet_eta)
tx = TrackXd[track]
ty = TrackYd[track]
tz = TrackZd[track]
sign = -1
if (jpx * tx + jpy * ty + jpz * tz) > 0.0:
sign = 1
d0 = TrackD0[track]
d0_error = TrackErrD0[track]
dz = TrackDZ[track]
dz_error = TrackErrDZ[track]
track_ips.append(sign * math.fabs( (d0/d0_error)**2 + (dz/dz_error)**2 ))
JetTrackIPs[jet] = track_ips
return JetTrackIPs
# Computational Functions
def IPSignificanceOld(row):
JetPt = row["Jet.PT"]
JetEta = row["Jet.Eta"]
JetPhi = row["Jet.Phi"]
TrackPt = row["Track.PT"]
TrackEta = row["Track.Eta"]
TrackPhi = row["Track.Phi"]
TrackD0 = row["Track.D0"]
TrackErrD0 = row["Track.ErrorD0"]
TrackDZ = row["Track.DZ"]
TrackErrDZ = row["Track.ErrorDZ"]
TrackUID = row["Track.fUniqueID"]
TrackXd = row["Track.Xd"]
TrackYd = row["Track.Yd"]
TrackZd = row["Track.Zd"]
#TrackParentFlavor = np.zeros(len(TrackPt))
TrackIP = np.ones(len(TrackPt))*-999.0
for jet in range(len(JetPt)):
jet_eta = JetEta[jet]
if np.abs(jet_eta) > 3.5:
continue
jet_phi = JetPhi[jet]
jet_pt = JetPt[jet]
for track in np.arange(len(TrackPt)):
if TrackIP[track] != -999.0:
continue
track_pt = TrackPt[track]
if track_pt < 1.0:
continue
deltaR = np.sqrt( (TrackEta[track] - JetEta[jet])**2 + (TrackPhi[track] - JetPhi[jet])**2 )
if deltaR > 0.5:
continue
jpx = jet_pt*math.cos(jet_phi)
jpy = jet_pt*math.sin(jet_phi)
jpz = jet_pt*math.sinh(jet_eta)
tx = TrackXd[track]
ty = TrackYd[track]
tz = TrackZd[track]
sign = -1
if (jpx * tx + jpy * ty + jpz * tz) > 0.0:
sign = 1
d0 = TrackD0[track]
d0_error = TrackErrD0[track]
dz = TrackDZ[track]
dz_error = TrackErrDZ[track]
TrackIP[track] = sign * math.fabs( (d0/d0_error)**2 + (dz/dz_error)**2 )
return TrackIP
def TrackSource(row):
JetPt = row["Jet.PT"]
JetEta = row["Jet.Eta"]
JetPhi = row["Jet.Phi"]
JetFlavor = row["Jet.Flavor"]
TrackPt = row["Track.PT"]
TrackEta = row["Track.Eta"]
TrackPhi = row["Track.Phi"]
TrackParentFlavor = np.zeros(len(TrackPt))
for jet in range(len(JetPt)):
jet_eta = JetEta[jet]
if np.abs(jet_eta) > 3.5:
continue
jet_pt = JetPt[jet]
for track in np.arange(len(TrackPt)):
parent_flavor = TrackParentFlavor[track]
if parent_flavor != -999.0 and (parent_flavor == 4 or parent_flavor == 5):
continue
track_pt = TrackPt[track]
if track_pt < 1.0:
continue
deltaR = np.sqrt( (TrackEta[track] - JetEta[jet])**2 + (TrackPhi[track] - JetPhi[jet])**2 )
if deltaR > 0.5:
continue
TrackParentFlavor[track] = JetFlavor[jet]
return TrackParentFlavor
def histplot(x, xrange, xbins, density=False):
(counts, bins) = np.histogram(x, range=xrange,
bins=xbins)
bin_widths = np.diff(bins)
bin_centers = bins[:-1] + bin_widths/2
errors = np.sqrt(counts)
rel_errors = errors/counts
# convert counts to dsigma/dpT * 100/fb
y = counts #/ bin_widths
if density:
y = y/len(x)/bin_widths
y_errors = rel_errors * y
return (bin_centers, bin_widths, y, y_errors)
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str,
help="Directory containing input files")
parser.add_argument("-i", "--input", type=str,
help="Main input subfolder")
args = parser.parse_args()
df = eat.UprootLoad([f"{args.dir}/{args.input}/[0-4]/out.root"], "Delphes",
branches=["Jet.Flavor", "Jet.PT", "Jet.Eta", "Jet.Phi", "Jet.Particles",
"Track.fUniqueID", "Track.PT", "Track.Eta", "Track.Phi", "Track.D0", "Track.DZ",
"Track.ErrorDZ", "Track.ErrorD0", "Track.Xd", "Track.Yd", "Track.Zd"])
#df = df[:100]
n_gen = len(df)
print(f"n_gen = {n_gen}")
df["Track.IPSignificance"] = df.apply( IPSignificanceOld , axis=1)
df["Track.Source"] = df.apply( TrackSource , axis=1)
print(df.head())
track_ips = np.concatenate(df['Track.IPSignificance'].to_numpy()).ravel()
track_flavor = np.concatenate(df['Track.Source'].to_numpy()).ravel()
matched_ips = track_ips[ track_flavor >= 0 ]
matched_flavor = track_flavor[ track_flavor >= 0 ]
charm_ips = track_ips[ matched_flavor == 4 ]
light_ips = track_ips[ (matched_flavor < 4) | (matched_flavor == 21) ]
print(matched_ips)
# Draw the IP significance plot
fig, ax = plt.subplots(figsize=(12,8))
plt.axis('off')
gridspec = fig.add_gridspec(ncols=1, nrows=1, width_ratios=[1], height_ratios=[1])
ax1 = fig.add_subplot(gridspec[0, 0])
ax1.grid(which='both', axis='both')
ax1.xaxis.set_major_locator(MultipleLocator(10))
ax1.xaxis.set_major_formatter('{x:.0f}')
# For the minor ticks, use no labels; default NullFormatter.
ax1.xaxis.set_minor_locator(MultipleLocator(2))
xrange = [-30, 30]
#xbins = np.concatenate( ( np.arange(-30,-5,5),np.arange(-5,5,1),np.arange(5, 30, 5) ) )
#xbins = np.arange(-300,300,1)
xbins = np.concatenate( ( np.arange(-300,-30,10),np.arange(-30,30,1),np.arange(30, 300, 10) ) )
(bins, bin_widths, y, y_error) = histplot(light_ips, xrange=xrange, xbins=xbins, density=True)
ax1.errorbar(bins, y, xerr = bin_widths/2, yerr=y_error, label='light jets', marker='o', ms=10, ls='none', linewidth=2, color='red')
(bins, bin_widths, y, y_error) = histplot(charm_ips, xrange=xrange, xbins=xbins, density=True)
ax1.errorbar(bins, y, xerr = bin_widths/2, yerr=y_error, label='charm jets', marker='D', ms=10, ls='none', linewidth=2, color='blue')
plt.ylabel('$\mathrm{P(sIP_{3D} \, | \, Jet \; Flavor)}$')
plt.xlabel('$\mathrm{sIP_{3D}}$')
plt.title("CC-DIS, 10x275GeV, $Q^2>100\\mathrm{GeV^2}$", fontsize=20)
ax1.set_ylim([1e-6,2e0])
ax1.set_xlim(xrange)
ax1.legend(fontsize=18)
plt.yscale('log')
y_minor = mpl.ticker.LogLocator(base = 10.0, subs = np.arange(2.0, 10.0) * 0.1, numticks = 100)
ax1.yaxis.set_minor_locator(y_minor)
ax1.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax1.yaxis.set_major_locator(mpl.ticker.LogLocator(base = 10.0, subs = np.arange(1.0, 2.0), numticks = 100))
plt.tight_layout()
plt.savefig(f"track_ip_significance_{args.input}.png")
plt.savefig(f"track_ip_significance_{args.input}.pdf")
| [
"matplotlib.ticker.NullFormatter",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"math.cos",
"math.sinh",
"numpy.arange",
"numpy.histogram",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.diff",
"math.fabs",
"matplotlib.pyplot.axis",
"matplotlib.py... | [((389, 419), 'matplotlib.pyplot.style.use', 'plt.style.use', (['hep.style.ATLAS'], {}), '(hep.style.ATLAS)\n', (402, 419), True, 'import matplotlib.pyplot as plt\n'), ((421, 578), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.sans-serif': 'Arial', 'font.family': 'sans-serif', 'font.size': 30,\n 'mathtext.fontset': 'custom', 'mathtext.rm': 'Arial'}"], {}), "({'font.sans-serif': 'Arial', 'font.family':\n 'sans-serif', 'font.size': 30, 'mathtext.fontset': 'custom',\n 'mathtext.rm': 'Arial'})\n", (440, 578), True, 'import matplotlib.pyplot as plt\n'), ((6001, 6026), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6024, 6026), False, 'import argparse\n'), ((6265, 6576), 'EICAnalysisTools.UprootLoad', 'eat.UprootLoad', (["[f'{args.dir}/{args.input}/[0-4]/out.root']", '"""Delphes"""'], {'branches': "['Jet.Flavor', 'Jet.PT', 'Jet.Eta', 'Jet.Phi', 'Jet.Particles',\n 'Track.fUniqueID', 'Track.PT', 'Track.Eta', 'Track.Phi', 'Track.D0',\n 'Track.DZ', 'Track.ErrorDZ', 'Track.ErrorD0', 'Track.Xd', 'Track.Yd',\n 'Track.Zd']"}), "([f'{args.dir}/{args.input}/[0-4]/out.root'], 'Delphes',\n branches=['Jet.Flavor', 'Jet.PT', 'Jet.Eta', 'Jet.Phi', 'Jet.Particles',\n 'Track.fUniqueID', 'Track.PT', 'Track.Eta', 'Track.Phi', 'Track.D0',\n 'Track.DZ', 'Track.ErrorDZ', 'Track.ErrorD0', 'Track.Xd', 'Track.Yd',\n 'Track.Zd'])\n", (6279, 6576), True, 'import EICAnalysisTools as eat\n'), ((7264, 7293), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (7276, 7293), True, 'import matplotlib.pyplot as plt\n'), ((7293, 7308), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7301, 7308), True, 'import matplotlib.pyplot as plt\n'), ((8368, 8430), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathrm{P(sIP_{3D} \\\\, | \\\\, Jet \\\\; Flavor)}$"""'], {}), "('$\\\\mathrm{P(sIP_{3D} \\\\, | \\\\, Jet \\\\; Flavor)}$')\n", (8378, 8430), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8461), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathrm{sIP_{3D}}$"""'], {}), "('$\\\\mathrm{sIP_{3D}}$')\n", (8437, 8461), True, 'import matplotlib.pyplot as plt\n'), ((8462, 8531), 'matplotlib.pyplot.title', 'plt.title', (['"""CC-DIS, 10x275GeV, $Q^2>100\\\\mathrm{GeV^2}$"""'], {'fontsize': '(20)'}), "('CC-DIS, 10x275GeV, $Q^2>100\\\\mathrm{GeV^2}$', fontsize=20)\n", (8471, 8531), True, 'import matplotlib.pyplot as plt\n'), ((8605, 8622), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8615, 8622), True, 'import matplotlib.pyplot as plt\n'), ((8924, 8942), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8940, 8942), True, 'import matplotlib.pyplot as plt\n'), ((8944, 8998), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""track_ip_significance_{args.input}.png"""'], {}), "(f'track_ip_significance_{args.input}.png')\n", (8955, 8998), True, 'import matplotlib.pyplot as plt\n'), ((8999, 9053), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""track_ip_significance_{args.input}.pdf"""'], {}), "(f'track_ip_significance_{args.input}.pdf')\n", (9010, 9053), True, 'import matplotlib.pyplot as plt\n'), ((5550, 5591), 'numpy.histogram', 'np.histogram', (['x'], {'range': 'xrange', 'bins': 'xbins'}), '(x, range=xrange, bins=xbins)\n', (5562, 5591), True, 'import numpy as np\n'), ((5645, 5658), 'numpy.diff', 'np.diff', (['bins'], {}), '(bins)\n', (5652, 5658), True, 'import numpy as np\n'), ((5716, 5731), 'numpy.sqrt', 'np.sqrt', (['counts'], {}), '(counts)\n', (5723, 5731), True, 'import numpy as np\n'), ((7498, 7517), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (7513, 7517), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator\n'), ((7650, 7668), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(2)'], {}), '(2)\n', (7665, 7668), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator\n'), ((8786, 8812), 'matplotlib.ticker.NullFormatter', 'mpl.ticker.NullFormatter', ([], {}), '()\n', (8810, 8812), True, 'import matplotlib as mpl\n'), ((7837, 7861), 'numpy.arange', 'np.arange', (['(-300)', '(-30)', '(10)'], {}), '(-300, -30, 10)\n', (7846, 7861), True, 'import numpy as np\n'), ((7860, 7881), 'numpy.arange', 'np.arange', (['(-30)', '(30)', '(1)'], {}), '(-30, 30, 1)\n', (7869, 7881), True, 'import numpy as np\n'), ((7880, 7902), 'numpy.arange', 'np.arange', (['(30)', '(300)', '(10)'], {}), '(30, 300, 10)\n', (7889, 7902), True, 'import numpy as np\n'), ((1456, 1471), 'numpy.abs', 'np.abs', (['jet_eta'], {}), '(jet_eta)\n', (1462, 1471), True, 'import numpy as np\n'), ((1976, 2065), 'numpy.sqrt', 'np.sqrt', (['((TrackEta[track] - JetEta[jet]) ** 2 + (TrackPhi[track] - JetPhi[jet]) ** 2)'], {}), '((TrackEta[track] - JetEta[jet]) ** 2 + (TrackPhi[track] - JetPhi[\n jet]) ** 2)\n', (1983, 2065), True, 'import numpy as np\n'), ((3427, 3442), 'numpy.abs', 'np.abs', (['jet_eta'], {}), '(jet_eta)\n', (3433, 3442), True, 'import numpy as np\n'), ((3768, 3857), 'numpy.sqrt', 'np.sqrt', (['((TrackEta[track] - JetEta[jet]) ** 2 + (TrackPhi[track] - JetPhi[jet]) ** 2)'], {}), '((TrackEta[track] - JetEta[jet]) ** 2 + (TrackPhi[track] - JetPhi[\n jet]) ** 2)\n', (3775, 3857), True, 'import numpy as np\n'), ((4848, 4863), 'numpy.abs', 'np.abs', (['jet_eta'], {}), '(jet_eta)\n', (4854, 4863), True, 'import numpy as np\n'), ((5258, 5347), 'numpy.sqrt', 'np.sqrt', (['((TrackEta[track] - JetEta[jet]) ** 2 + (TrackPhi[track] - JetPhi[jet]) ** 2)'], {}), '((TrackEta[track] - JetEta[jet]) ** 2 + (TrackPhi[track] - JetPhi[\n jet]) ** 2)\n', (5265, 5347), True, 'import numpy as np\n'), ((8675, 8695), 'numpy.arange', 'np.arange', (['(2.0)', '(10.0)'], {}), '(2.0, 10.0)\n', (8684, 8695), True, 'import numpy as np\n'), ((8884, 8903), 'numpy.arange', 'np.arange', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (8893, 8903), True, 'import numpy as np\n'), ((2139, 2156), 'math.cos', 'math.cos', (['jet_phi'], {}), '(jet_phi)\n', (2147, 2156), False, 'import math\n'), ((2182, 2199), 'math.sin', 'math.sin', (['jet_phi'], {}), '(jet_phi)\n', (2190, 2199), False, 'import math\n'), ((2225, 2243), 'math.sinh', 'math.sinh', (['jet_eta'], {}), '(jet_eta)\n', (2234, 2243), False, 'import math\n'), ((3931, 3948), 'math.cos', 'math.cos', (['jet_phi'], {}), '(jet_phi)\n', (3939, 3948), False, 'import math\n'), ((3974, 3991), 'math.sin', 'math.sin', (['jet_phi'], {}), '(jet_phi)\n', (3982, 3991), False, 'import math\n'), ((4017, 4035), 'math.sinh', 'math.sinh', (['jet_eta'], {}), '(jet_eta)\n', (4026, 4035), False, 'import math\n'), ((4421, 4475), 'math.fabs', 'math.fabs', (['((d0 / d0_error) ** 2 + (dz / dz_error) ** 2)'], {}), '((d0 / d0_error) ** 2 + (dz / dz_error) ** 2)\n', (4430, 4475), False, 'import math\n'), ((2629, 2683), 'math.fabs', 'math.fabs', (['((d0 / d0_error) ** 2 + (dz / dz_error) ** 2)'], {}), '((d0 / d0_error) ** 2 + (dz / dz_error) ** 2)\n', (2638, 2683), False, 'import math\n')] |
import setuptools
from typing import List
import glob
from Cython.Build import cythonize
import numpy as np
def get_scripts_from_bin() -> List[str]:
"""Get all local scripts from bin so they are included in the package."""
return glob.glob("bin/*")
def get_package_description() -> str:
"""Returns a description of this package from the markdown files."""
with open("README.md", "r") as stream:
readme: str = stream.read()
return readme
setuptools.setup(
name="my_ml",
version="",
author="<NAME>",
author_email="<EMAIL>",
description="ML Algos from Scratch, implemented in Python and Numpy.",
long_description=get_package_description(),
long_description_content_type="text/markdown",
url="https://github.com/big-c-note/my_ml_from_scratch",
ext_modules=cythonize("my_ml/model/_split_data_fast.pyx"),
include_dirs=[np.get_include()],
zip_safe=False,
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
scripts=get_scripts_from_bin(),
python_requires=">=3.7",
)
| [
"Cython.Build.cythonize",
"setuptools.find_packages",
"glob.glob",
"numpy.get_include"
] | [((240, 258), 'glob.glob', 'glob.glob', (['"""bin/*"""'], {}), "('bin/*')\n", (249, 258), False, 'import glob\n'), ((822, 867), 'Cython.Build.cythonize', 'cythonize', (['"""my_ml/model/_split_data_fast.pyx"""'], {}), "('my_ml/model/_split_data_fast.pyx')\n", (831, 867), False, 'from Cython.Build import cythonize\n'), ((939, 965), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (963, 965), False, 'import setuptools\n'), ((887, 903), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (901, 903), True, 'import numpy as np\n')] |
import numpy as np
class NN_Model:
LEARNING_RATE = 0.1
LAYERS = 2
@staticmethod
def cost_mse(prediction: float):
return (prediction - NN_Model.TARGET) ** 2
@staticmethod
def sigmoid(x: float):
return 1 / (1 + np.exp(-x))
@staticmethod
def sigmoid_deriv(x: float):
return
def __init__(self):
self.weight = np.array([])
self.bias = 0
self.target = 0
# squared error used --> d/dx (x^2) = 2 * x
def perform_gradient_descent(self, layer_results, input):
derror = 2 * NN_Model.cost_mse(layer_results[-1] - self.target)
dlayer1 = NN_Model.sigmoid_deriv(layer_results[-2])
dbias = 1
dweights = input
return [dbias, dweights]
def adjust_parameters(self, derror):
self.bias -= - derror[0]
self.weights -= derror[1]
def predict(self, input: np.ndarray):
# Perform calculations for layers
layer_results = [0] * NN_Model.LAYERS
# First layer
layer_results[0] = self.weights * input + self.bias
# Last layer -- sigmoid
layer_results[1] = NN_Model.sigmoid(layer_results[0])
return layer_results[1]
def train(self, input: np.ndarray):
# Perform calculations for layers
layer_results = [0] * NN_Model.LAYERS
# First layer
layer_results[0] = self.weights * input + self.bias
# Last layer -- sigmoid
layer_results[-1] = NN_Model.sigmoid(layer_results[-2])
# Perform gradient descent
error = self.perform_gradient_descent(layer_results)
self.adjust_parameters(error)
| [
"numpy.exp",
"numpy.array"
] | [((385, 397), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (393, 397), True, 'import numpy as np\n'), ((258, 268), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (264, 268), True, 'import numpy as np\n')] |
"""
Various functions for model evaluation.
"""
import numpy as np
import pandas as pd
from utils.load_data_raw import DataGenerator_raw
from utils.custom_loss import angle_diff_deg
from utils.plot import plot_history
def model_complete_eval(model, history, part_test, params, batch_size=1024,
add_metrics = [], workers=4):
"""
Evaluate a specified model over all listining positions. Show model
topology, training history and loss on test data set.
Returns the DataGenerator object.
Parameters
----------
model : Keras model
The model to evaluate.
history : dict
Dictionary containing the train history. History.history as
returned by Keras model.fit() function.
part_test : list
List containing the test set/partition.
params : dict
Dictionary containing the parameters for the DataGenerator
object.
batch_size : int, optional
Batch size. Defaults to 1024.
add_metrics : list, optional
Add metrics in list to Keras model to evaluate its performance.
workers : int, optional
Number of workers for the multiprocessing functionalities of
the Keras model methods. Defauts to 4.
Returns
-------
b_gen : DataGenerator object
DataGenerator object for the model to evaluate.
"""
# get metrics from history dict
metrics, v_met, n = get_history_metrics(history)
# show model/net topology
model.summary()
# recompile model with additional metrics
if add_metrics:
add_metrics = model_add_metrics(model, add_metrics)
else:
add_metrics = metrics
# evaluate model
# create batch generator based on params dict
params['batch_size'] = batch_size
b_gen = DataGenerator_raw(part_test, **params)
score = model_eval(model, b_gen, add_metrics, workers)
# plot train history
for j in range(n):
plot_history(history[metrics[j]], history[v_met[j]], metrics[j])
return b_gen
def model_eval_pos(model, history, part_test, params, ID_ref, batch_size=1000, workers=4):
"""
Evaluate a specified model over for each position individually.
Show model topology, training history and loss on test data set.
Parameters
----------
model : Keras model
The model to evaluate.
history : dict
Dictionary containing the train history. History.history as
returned by Keras model.fit() function.
part_test : list
List containing the test set/partition.
params : dict
Dictionary containing the parameters for the DataGenerator
object.
ID_ref : pandas DataFrame object
DataFrame object containing the global ID reference list.
batch_size : int, optional
Batch size. Defaults to 1000.
workers : int, optional
Number of workers for the multiprocessing functionalities of
the Keras model methods. Defauts to 4.
Returns
-------
mae_p : ndarray
Array containing the mean absolute error (or the loss metric)
per position.
mse_p : ndarray
Array containing the mean squared error (or the first metric)
per position.
loc_pred : ndarray
Array containing model predictions per position.
"""
# get metrics from history dict
metrics, _, _ = get_history_metrics(history)
# show model/net topology
model.summary()
il = np.min(part_test)
iu = np.max(part_test)
ID = {}
pos_str = ['pos0', 'pos1', 'pos2', 'pos3', 'pos4', 'pos5', 'pos6', 'pos7',
'pos8', 'pos9']
n_pos = len(pos_str)
for i,s in enumerate(pos_str):
ID[s] = ID_ref[il:iu+1].loc[ID_ref['pos_id'] == i].index.values
mae_p = np.zeros(n_pos)
mse_p = np.zeros(n_pos)
loc_pred = np.zeros([n_pos,len(ID[pos_str[0]])])
for i,s in enumerate(pos_str):
params['batch_size'] = batch_size
b_gen = DataGenerator_raw(ID[s], **params)
mae_p[i], mse_p[i] = model.evaluate_generator(b_gen, verbose=0,
use_multiprocessing=True,
workers=4)
lpred = model.predict_generator(b_gen,verbose=0,
use_multiprocessing=True,
workers=4)
loc_pred[i] = lpred.T
print(s+' : ')
print(mae_p[i])
return mae_p, mse_p, loc_pred
def model_eval(model, b_gen, metric_str, workers=4):
"""
Evaluate model on data generator.
Prints and returns scores for specified metrics.
Parameters
----------
model : Keras model
The model to evaluate.
b_gen : DataGenerator object
DataGenerator object to use for loading the data.
metric_str : list of str
List of names of the metrics to evaluate. For printing.
workers : int, optional
Number of workers for the multiprocessing functionalities of
the Keras model methods. Defauts to 4.
Returns
-------
score : ndarray
Array containing results of the evaluated metrics as returned
by Keras model.evaluate() methods.
"""
print('\nModel Evaluation: \n')
score = model.evaluate_generator(b_gen, verbose=1,
use_multiprocessing=True,
workers=workers)
for i, m in enumerate(metric_str):
print('Test '+m+':', score[i])
return score
def model_pred_on_gen_batch(model, b_gen, b_idx=0):
"""
Predict on model for single batch returned from a data generator.
Returns predictions as well as corresponding targets.
"""
# predict on model
X,y = b_gen.__getitem__(b_idx)
pred = model.predict_on_batch(X)
return pred, y
def calc_errors_m(model, part_x, params, pos_ids, verbose=0, workers=4):
"""
Error analysis based on mean squared error.
Parameters
----------
model : Keras model
The model to evaluate.
part_x : list of ndarray
List of arrays containing the test sets/partitions
corresponding to only one position based on the ordering of
pos_ids.
params : dict
Dictionary containing the parameters for the DataGenerator
object.
pos_ids : ndarray
Array containing position ids.
verbose : int, optional
Verbose parameter for Keras model methods. Either 0, 1 or 2.
Defaults to 0.
workers : int, optional
Number of workers for the multiprocessing functionalities of
the Keras model methods. Defauts to 4.
Returns
-------
MSE_m : float
Mean squared error of particular method.
tau_sq_m : float
Systematic deviations a.k.a. bias for each position.
sigma_sq_m : float
Stochastic variations / variance.
delta_x : float
delta_x.
delta_mean_x : float
delta_mean_x.
"""
# initialization
L = 20 # 20 subjects
B = 3600 # 360 angle * 10 repitions
X = pos_ids.shape[0] # 10 positions
delta_x = []
b_gen = []
delta_mean_x = np.zeros(X)
sigma_sq_m = []
# DELTA_l,m_b(x)
for x in pos_ids:
b_gen.append(DataGenerator_raw(part_x[x], **params))
y_x = get_y_gen(b_gen[x])
pred_x = model.predict_generator(b_gen[x], verbose=verbose,
use_multiprocessing=True,
workers=workers,
max_queue_size=1000)
delta = np.zeros(part_x[x].shape[0])
for i in np.arange(part_x[x].shape[0]):
delta[i] = angle_diff_deg(pred_x[i], y_x[i])
delta_x.append(delta)
# DELTA_MEAN_m(x)
for x in pos_ids:
delta_mean_x[x] = np.sum(delta_x[x]) / (L * B)
# delta_mean_x[x] = np.mean(delta_x[x])
# mean Square Error of particular method
MSE_m = np.sum(np.square(delta_x)) / (L * B * X)
# systematic deviations a.k.a. bias for each position
tau_sq_m = np.sum(np.square(delta_mean_x)) / X
# stochastic variations / variance
for x in pos_ids:
sigma_sq_m.append(delta_x[x] - delta_mean_x[x])
sigma_sq_m = np.sum(np.square(sigma_sq_m)) / (L * B * X)
return MSE_m, tau_sq_m, sigma_sq_m, delta_x, delta_mean_x
def get_y_gen(b_gen):
"""
Gets and returns all targets from data generator.
"""
l = b_gen.__len__()
y = np.array([])
for b_idx in range(l):
_,y_t = b_gen.__getitem__(b_idx)
y = np.append(y,y_t)
return y
def create_test_params(feature_data, target_data, par, batch_size=1024,
shuffle=False):
"""
Create and return a parameter dict for a DataGenerator object.
"""
# check if data is a pandas DataFrame object
if isinstance(feature_data, pd.DataFrame):
feature_data = feature_data.values
if isinstance(target_data, pd.DataFrame):
target_data = target_data.values
# create params dict
params = {'dim': feature_data.shape[1],
'batch_size': batch_size,
'feature_data': feature_data,
'target_data' : target_data,
'shuffle': shuffle,
'n_frames': par['nFrames'].values,
'n_angles': par['nAngles'].values
}
return params
def get_history_metrics(history):
"""
Get metrics from History.history dict as returned by Keras model
fit/fit_generator functions.
"""
metrics = list(history.keys())
v_met = [x for x in metrics if 'val' in x]
n = len(v_met)
metrics = metrics[n:]
return metrics, v_met, n
def model_add_metrics(model, add_metrics):
"""
Recompiling a keras model with additional metrics for evaluation
without affecting model weights. Returns list strings containing
the metric names.
"""
# assure list of unique metrics
metrics = list(set(model.metrics+add_metrics))
# recompile
model.compile(optimizer=model.optimizer, loss=model.loss, metrics=metrics)
# create list of strings of metrics for later evaluation; include loss metric
metrics = [model.loss] + metrics
metrics_str = [metrics[i].__name__ if callable(metrics[i]) else metrics[i] for i in range(len(metrics))]
return metrics_str | [
"utils.plot.plot_history",
"utils.custom_loss.angle_diff_deg",
"numpy.max",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.square",
"numpy.min",
"utils.load_data_raw.DataGenerator_raw",
"numpy.arange"
] | [((1809, 1847), 'utils.load_data_raw.DataGenerator_raw', 'DataGenerator_raw', (['part_test'], {}), '(part_test, **params)\n', (1826, 1847), False, 'from utils.load_data_raw import DataGenerator_raw\n'), ((3489, 3506), 'numpy.min', 'np.min', (['part_test'], {}), '(part_test)\n', (3495, 3506), True, 'import numpy as np\n'), ((3516, 3533), 'numpy.max', 'np.max', (['part_test'], {}), '(part_test)\n', (3522, 3533), True, 'import numpy as np\n'), ((3805, 3820), 'numpy.zeros', 'np.zeros', (['n_pos'], {}), '(n_pos)\n', (3813, 3820), True, 'import numpy as np\n'), ((3833, 3848), 'numpy.zeros', 'np.zeros', (['n_pos'], {}), '(n_pos)\n', (3841, 3848), True, 'import numpy as np\n'), ((7233, 7244), 'numpy.zeros', 'np.zeros', (['X'], {}), '(X)\n', (7241, 7244), True, 'import numpy as np\n'), ((8574, 8586), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8582, 8586), True, 'import numpy as np\n'), ((1964, 2028), 'utils.plot.plot_history', 'plot_history', (['history[metrics[j]]', 'history[v_met[j]]', 'metrics[j]'], {}), '(history[metrics[j]], history[v_met[j]], metrics[j])\n', (1976, 2028), False, 'from utils.plot import plot_history\n'), ((3995, 4029), 'utils.load_data_raw.DataGenerator_raw', 'DataGenerator_raw', (['ID[s]'], {}), '(ID[s], **params)\n', (4012, 4029), False, 'from utils.load_data_raw import DataGenerator_raw\n'), ((7679, 7707), 'numpy.zeros', 'np.zeros', (['part_x[x].shape[0]'], {}), '(part_x[x].shape[0])\n', (7687, 7707), True, 'import numpy as np\n'), ((7725, 7754), 'numpy.arange', 'np.arange', (['part_x[x].shape[0]'], {}), '(part_x[x].shape[0])\n', (7734, 7754), True, 'import numpy as np\n'), ((8667, 8684), 'numpy.append', 'np.append', (['y', 'y_t'], {}), '(y, y_t)\n', (8676, 8684), True, 'import numpy as np\n'), ((7334, 7372), 'utils.load_data_raw.DataGenerator_raw', 'DataGenerator_raw', (['part_x[x]'], {}), '(part_x[x], **params)\n', (7351, 7372), False, 'from utils.load_data_raw import DataGenerator_raw\n'), ((7779, 7812), 'utils.custom_loss.angle_diff_deg', 'angle_diff_deg', (['pred_x[i]', 'y_x[i]'], {}), '(pred_x[i], y_x[i])\n', (7793, 7812), False, 'from utils.custom_loss import angle_diff_deg\n'), ((7914, 7932), 'numpy.sum', 'np.sum', (['delta_x[x]'], {}), '(delta_x[x])\n', (7920, 7932), True, 'import numpy as np\n'), ((8060, 8078), 'numpy.square', 'np.square', (['delta_x'], {}), '(delta_x)\n', (8069, 8078), True, 'import numpy as np\n'), ((8174, 8197), 'numpy.square', 'np.square', (['delta_mean_x'], {}), '(delta_mean_x)\n', (8183, 8197), True, 'import numpy as np\n'), ((8344, 8365), 'numpy.square', 'np.square', (['sigma_sq_m'], {}), '(sigma_sq_m)\n', (8353, 8365), True, 'import numpy as np\n')] |
"""
Created by <NAME>
"""
import numpy as np
from scipy.stats import truncnorm
import statsmodels.api as sm
from py4etrics.base_for_models import GenericLikelihoodModel_TobitTruncreg
class Truncreg(GenericLikelihoodModel_TobitTruncreg):
"""
Method 1:
Truncreg(endog, exog, left=<-np.inf>, right=<np.inf>).fit()
endog = dependent variable
exog = independent variable (add constant if needed)
left = the threshold value for left-truncation (default:-np.inf)
right = the threshold value for right-truncation (default:np.inf)
Method 2:
formula = 'y ~ 1 + x'
Truncreg(formula, left=<-np.inf>, right=<np.inf>, data=<DATA>).fit()
Note:
Left-truncated Regression if 'left' only is set.
Right-truncated Regression if 'right' only is set.
Left- and Right-truncated Regression if 'left' and 'right' both are set.
"""
def __init__(self, endog, exog, left=None, right=None, **kwds):
super(Truncreg, self).__init__(endog, exog, **kwds)
if left == None:
left = -np.inf
self.left = left
if right == None:
right = np.inf
self.right = right
def loglikeobs(self, params):
s = params[-1]
beta = params[:-1]
def _truncreg(y,x,left,right,beta,s):
Xb = np.dot(x, beta)
_l = (left - Xb)/np.exp(s)
_r = (right - Xb)/np.exp(s)
return truncnorm.logpdf(y,a=_l,b=_r,loc=Xb,scale=np.exp(s))
return _truncreg(self.endog, self.exog,
self.left, self.right, beta, s)
def fit(self, cov_type='nonrobust', start_params=None, maxiter=10000, maxfun=10000, **kwds):
# add sigma for summary
if 'Log(Sigma)' not in self.exog_names:
self.exog_names.append('Log(Sigma)')
else:
pass
# initial guess
res_ols = sm.OLS(self.endog, self.exog).fit()
params_ols = res_ols.params
sigma_ols = np.log(np.std(res_ols.resid))
if start_params == None:
start_params = np.append(params_ols, sigma_ols)
return super(Truncreg, self).fit(cov_type=cov_type, start_params=start_params,
maxiter=maxiter, maxfun=maxfun, **kwds)
# EOF
| [
"numpy.append",
"numpy.exp",
"numpy.dot",
"numpy.std",
"statsmodels.api.OLS"
] | [((1308, 1323), 'numpy.dot', 'np.dot', (['x', 'beta'], {}), '(x, beta)\n', (1314, 1323), True, 'import numpy as np\n'), ((1981, 2002), 'numpy.std', 'np.std', (['res_ols.resid'], {}), '(res_ols.resid)\n', (1987, 2002), True, 'import numpy as np\n'), ((2064, 2096), 'numpy.append', 'np.append', (['params_ols', 'sigma_ols'], {}), '(params_ols, sigma_ols)\n', (2073, 2096), True, 'import numpy as np\n'), ((1353, 1362), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (1359, 1362), True, 'import numpy as np\n'), ((1393, 1402), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (1399, 1402), True, 'import numpy as np\n'), ((1882, 1911), 'statsmodels.api.OLS', 'sm.OLS', (['self.endog', 'self.exog'], {}), '(self.endog, self.exog)\n', (1888, 1911), True, 'import statsmodels.api as sm\n'), ((1464, 1473), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (1470, 1473), True, 'import numpy as np\n')] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
from .. import misc
class _FakeTable(object):
def __init__(self, name, columns):
self.name = name
self.columns = columns
@pytest.fixture
def fta():
return _FakeTable('a', ['aa', 'ab', 'ac'])
@pytest.fixture
def ftb():
return _FakeTable('b', ['bx', 'by', 'bz'])
@pytest.fixture
def clean_fake_data_home(request):
def fin():
if os.path.isdir('fake_data_home'):
shutil.rmtree('fake_data_home')
request.addfinalizer(fin)
def test_column_map_raises(fta, ftb):
with pytest.raises(RuntimeError):
misc.column_map([fta, ftb], ['aa', 'by', 'bz', 'cw'])
def test_column_map_none(fta, ftb):
assert misc.column_map([fta, ftb], None) == {'a': None, 'b': None}
def test_column_map(fta, ftb):
assert misc.column_map([fta, ftb], ['aa', 'by', 'bz']) == \
{'a': ['aa'], 'b': ['by', 'bz']}
assert misc.column_map([fta, ftb], ['by', 'bz']) == \
{'a': [], 'b': ['by', 'bz']}
def test_dirs(clean_fake_data_home):
misc._mkifnotexists("fake_data_home")
os.environ["DATA_HOME"] = "fake_data_home"
misc.get_run_number()
misc.get_run_number()
misc.data_dir()
misc.configs_dir()
misc.models_dir()
misc.charts_dir()
misc.maps_dir()
misc.simulations_dir()
misc.reports_dir()
misc.runs_dir()
misc.config("test")
@pytest.fixture
def range_df():
df = pd.DataFrame({'to_zone_id': [2, 3, 4],
'from_zone_id': [1, 1, 1],
'distance': [.1, .2, .9]})
df = df.set_index(['from_zone_id', 'to_zone_id'])
return df
@pytest.fixture
def range_series():
return pd.Series([10, 150, 75, 275], index=[1, 2, 3, 4])
def test_compute_range(range_df, range_series):
assert misc.compute_range(range_df, range_series, "distance", .5).loc[1] == 225
def test_reindex():
s = pd.Series([.5, 1.0, 1.5], index=[2, 1, 3])
s2 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
assert list(misc.reindex(s, s2).values) == [1.0, .5, 1.5]
def test_naics():
assert misc.naicsname(54) == "Professional"
def test_signif():
assert misc.signif(4.0) == '***'
assert misc.signif(3.0) == '**'
assert misc.signif(2.0) == '*'
assert misc.signif(1.5) == '.'
assert misc.signif(1.0) == ''
@pytest.fixture
def simple_dev_inputs():
return pd.DataFrame(
{'residential': [40, 40, 40],
'office': [15, 18, 15],
'retail': [12, 10, 10],
'industrial': [12, 12, 12],
'land_cost': [1000000, 2000000, 3000000],
'parcel_size': [10000, 20000, 30000],
'max_far': [2.0, 3.0, 4.0],
'names': ['a', 'b', 'c'],
'max_height': [40, 60, 80]},
index=['a', 'b', 'c'])
def test_misc_dffunctions(simple_dev_inputs):
misc.df64bitto32bit(simple_dev_inputs)
misc.pandasdfsummarytojson(simple_dev_inputs[['land_cost', 'parcel_size']])
misc.numpymat2df(np.array([[1, 2], [3, 4]]))
def test_column_list(fta, ftb):
assert misc.column_list([fta, ftb], ['aa', 'by', 'bz', 'c']) == \
['aa', 'by', 'bz']
| [
"pandas.Series",
"numpy.array",
"os.path.isdir",
"pytest.raises",
"shutil.rmtree",
"pandas.DataFrame"
] | [((1466, 1565), 'pandas.DataFrame', 'pd.DataFrame', (["{'to_zone_id': [2, 3, 4], 'from_zone_id': [1, 1, 1], 'distance': [0.1, 0.2,\n 0.9]}"], {}), "({'to_zone_id': [2, 3, 4], 'from_zone_id': [1, 1, 1],\n 'distance': [0.1, 0.2, 0.9]})\n", (1478, 1565), True, 'import pandas as pd\n'), ((1722, 1771), 'pandas.Series', 'pd.Series', (['[10, 150, 75, 275]'], {'index': '[1, 2, 3, 4]'}), '([10, 150, 75, 275], index=[1, 2, 3, 4])\n', (1731, 1771), True, 'import pandas as pd\n'), ((1936, 1979), 'pandas.Series', 'pd.Series', (['[0.5, 1.0, 1.5]'], {'index': '[2, 1, 3]'}), '([0.5, 1.0, 1.5], index=[2, 1, 3])\n', (1945, 1979), True, 'import pandas as pd\n'), ((1988, 2031), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'index': "['a', 'b', 'c']"}), "([1, 2, 3], index=['a', 'b', 'c'])\n", (1997, 2031), True, 'import pandas as pd\n'), ((2414, 2736), 'pandas.DataFrame', 'pd.DataFrame', (["{'residential': [40, 40, 40], 'office': [15, 18, 15], 'retail': [12, 10, 10\n ], 'industrial': [12, 12, 12], 'land_cost': [1000000, 2000000, 3000000],\n 'parcel_size': [10000, 20000, 30000], 'max_far': [2.0, 3.0, 4.0],\n 'names': ['a', 'b', 'c'], 'max_height': [40, 60, 80]}"], {'index': "['a', 'b', 'c']"}), "({'residential': [40, 40, 40], 'office': [15, 18, 15], 'retail':\n [12, 10, 10], 'industrial': [12, 12, 12], 'land_cost': [1000000, \n 2000000, 3000000], 'parcel_size': [10000, 20000, 30000], 'max_far': [\n 2.0, 3.0, 4.0], 'names': ['a', 'b', 'c'], 'max_height': [40, 60, 80]},\n index=['a', 'b', 'c'])\n", (2426, 2736), True, 'import pandas as pd\n'), ((453, 484), 'os.path.isdir', 'os.path.isdir', (['"""fake_data_home"""'], {}), "('fake_data_home')\n", (466, 484), False, 'import os\n'), ((609, 636), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (622, 636), False, 'import pytest\n'), ((3000, 3026), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3008, 3026), True, 'import numpy as np\n'), ((498, 529), 'shutil.rmtree', 'shutil.rmtree', (['"""fake_data_home"""'], {}), "('fake_data_home')\n", (511, 529), False, 'import shutil\n')] |
import unittest
import numpy as np
import scipy.stats
import sys
from kldmwr import bivar
from kldmwr import distributions2d
def bvnrm_pdf(x, p):
mu = [0, 0]
sgm = [[p[0], p[1]], [p[1], p[2]]]
return scipy.stats.multivariate_normal.pdf(x, mean=mu, cov=sgm)
def bvnrm_cdf(x, p):
mu = [0, 0]
sgm = [[p[0], p[1]], [p[1], p[2]]]
return scipy.stats.multivariate_normal.cdf(x, mean=mu, cov=sgm)
def bvnrm_marx(t, p):
return scipy.stats.norm.cdf(t, loc=0, scale=p[0]**.5)
def bvnrm_mary(t, p):
return scipy.stats.norm.cdf(t, loc=0, scale=p[2]**.5)
def bvnrm_ptry(p):
p1 = scipy.stats.uniform.rvs() * 2 - 1
return([p[0], p1, p[2]])
class TestBivarBBVT(unittest.TestCase):
def setUp(self):
self.name = 'test_bivar'
self.xy = np.array([
[-1.797, -0.648],
[0.436, 0.812],
[0.436, 0.812],
[-0.044, -0.884],
[-0.064, -0.884],
[1.886, 0.957]
])
self.p_0 = [1, .5, 1]
def test_order_stats(self):
nux, nuy, ijunq, ijcnt, qqs_x, qqs_y = bivar.order_stats(self.xy)
expected_ijunq = [
[1, 2],
[2, 1],
[3, 1],
[4, 3],
[5, 4]
]
expected_ijcnt = [1, 1, 1, 2, 1]
expected_qqs_x = [-1.797, -0.064, -0.044, 0.436, 1.886]
expected_qqs_y = [-0.884, -0.648, 0.812, 0.957]
self.assertEqual(nux, 5)
self.assertEqual(nuy, 4)
np.testing.assert_equal(expected_ijunq, ijunq)
np.testing.assert_equal(expected_ijcnt, ijcnt)
np.testing.assert_almost_equal(expected_qqs_x, qqs_x, decimal=6)
np.testing.assert_almost_equal(expected_qqs_y, qqs_y, decimal=6)
def test_relbbs(self):
ijunq_t = np.array([
[1, 2],
[2, 1],
[3, 1],
[4, 3],
[5, 4]
])
ijcnt_t = [1, 1, 1, 2, 1]
relbbs, w_rbbs = bivar.find_relbbs(ijunq_t, ijcnt_t)
expected_relbbs = np.array([
[1, 2], [1, 3], [2, 3], [2, 2], [2, 1], [3, 2], [3, 1], [4, 2],
[4, 1], [4, 3], [4, 4], [5, 4], [5, 3], [5, 5], [6, 5], [6, 4]
])
expected_w_rbbs = np.array(
[1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 3, 2, 1, 1, 1]
)
np.testing.assert_equal(expected_relbbs, relbbs)
np.testing.assert_equal(expected_w_rbbs, w_rbbs)
def test_find_boundary_bbs(self):
relbbs_t = [
[1, 2], [1, 3], [2, 3], [2, 2], [2, 1], [3, 2], [3, 1], [4, 2],
[4, 1], [4, 3], [4, 4], [5, 4], [5, 3], [5, 5], [6, 5], [6, 4]
]
nux_t = 5
nuy_t = 4
w_bnds = bivar.find_boundary_bbs(relbbs_t, nux_t, nuy_t)
expected_w_bnds = [
2., 2., 1., 1., 2., 1., 2., 1., 2., 1., 1., 1., 1., 2., 4., 2.
]
np.testing.assert_equal(expected_w_bnds, w_bnds)
def test_find_relvts(self):
relbbs_t = np.array([
[1, 2], [1, 3], [2, 3], [2, 2], [2, 1], [3, 2], [3, 1], [4, 2],
[4, 1], [4, 3], [4, 4], [5, 4], [5, 3], [5, 5], [6, 5], [6, 4]
])
relvts = bivar.find_relvts(relbbs_t)
expected_relvts = np.array([
[0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
[2, 1], [2, 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [5, 2], [5, 3],
[5, 4], [5, 5], [6, 3], [6, 4], [6, 5]
])
np.testing.assert_equal(expected_relvts, relvts)
def test_find_relvts_in(self):
relvts_in_t = np.array([
[0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
[2, 1], [2, 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [5, 2], [5, 3],
[5, 4], [5, 5], [6, 3], [6, 4], [6, 5]
])
nux_t = 5
nuy_t = 4
relvts_in = bivar.find_relvts_in(relvts_in_t, nux_t, nuy_t)
expected_relvts_in = np.array([
[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2],
[3, 3], [3, 4], [4, 1], [4, 2], [4, 3], [4, 4], [5, 2], [5, 3],
[5, 4]
])
np.testing.assert_equal(expected_relvts_in, relvts_in)
def test_zbce(self):
res_zbce = bivar.zbce(
self.xy, self.p_0, bvnrm_cdf, bvnrm_marx, bvnrm_mary, bvnrm_ptry
)
expected_res_0 = [1.40075002, 0.67631764, 0.89521719]
expected_res_1 = True
np.testing.assert_almost_equal(expected_res_0, res_zbce[0], decimal=6)
self.assertEqual(expected_res_1, res_zbce[1])
def test_mle(self):
res_mle = bivar.mle(self.xy, self.p_0, bvnrm_pdf, bvnrm_ptry)
expected_res_0 = [1.19534318, 0.62878002, 0.70289784]
expected_res_1 = True
np.testing.assert_almost_equal(expected_res_0, res_mle[0], decimal=6)
self.assertEqual(expected_res_1, res_mle[1])
class TestBivarNormal(unittest.TestCase):
def setUp(self):
self.name = 'test_bivar_normal'
self.xyf = [
-0.7205309867971943, 0.6441177698286161, 0.2705656886204725,
0.47168357423144375, -0.9074452994750679, 0.09476762389772658,
-0.5850334108847408, -0.34369186490160786, -0.664133497596725,
0.15114917436089836, 1.602745021788436, -0.5084754998713794,
-2.4903447379498234, -0.8001755224577575, 0.5613884958479634,
-0.21340871665608885, -1.2699194113545624, -0.4390206699334978,
-1.481148579488512, 0.49020300893012597
]
self.xyf = np.array(self.xyf)
self.xyf = np.reshape(self.xyf, (10, 2))
self.p_i = [1, .5, 1]
def test_zbce(self):
res_zbc = bivar.zbce(
self.xyf, self.p_i, bvnrm_cdf, bvnrm_marx, bvnrm_mary, bvnrm_ptry
)
expected_zbc_0 = [1.68423674, 0.0144281 , 0.24791642]
expected_zbc_1 = True
expected_zbc_2 = -218.26642802609823
expected_zbc_3_is_not = 1
np.testing.assert_almost_equal(expected_zbc_0, res_zbc[0], decimal=4)
self.assertEqual(expected_zbc_1, res_zbc[1])
np.testing.assert_almost_equal(expected_zbc_2, res_zbc[2], decimal=4)
self.assertNotEqual(expected_zbc_3_is_not, res_zbc[3])
def test_zbce_returns_nan(self):
res_zbc = bivar.zbce(
self.xyf, self.p_i, bvnrm_cdf, bvnrm_marx, bvnrm_mary, bvnrm_ptry,
max_count=1
)
expected_zbc_0 = [np.NaN, np.NaN, np.NaN]
expected_zbc_1 = False
expected_zbc_2 = np.NaN
np.testing.assert_equal(expected_zbc_0, res_zbc[0])
self.assertEqual(expected_zbc_1, res_zbc[1])
np.testing.assert_almost_equal(expected_zbc_2, res_zbc[2], decimal=4)
if __name__ == '__main__':
unittest.main()
| [
"kldmwr.bivar.order_stats",
"kldmwr.bivar.find_relvts_in",
"numpy.testing.assert_equal",
"numpy.reshape",
"kldmwr.bivar.mle",
"kldmwr.bivar.find_boundary_bbs",
"kldmwr.bivar.find_relbbs",
"numpy.array",
"numpy.testing.assert_almost_equal",
"kldmwr.bivar.zbce",
"unittest.main",
"kldmwr.bivar.fi... | [((6865, 6880), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6878, 6880), False, 'import unittest\n'), ((789, 906), 'numpy.array', 'np.array', (['[[-1.797, -0.648], [0.436, 0.812], [0.436, 0.812], [-0.044, -0.884], [-\n 0.064, -0.884], [1.886, 0.957]]'], {}), '([[-1.797, -0.648], [0.436, 0.812], [0.436, 0.812], [-0.044, -0.884\n ], [-0.064, -0.884], [1.886, 0.957]])\n', (797, 906), True, 'import numpy as np\n'), ((1092, 1118), 'kldmwr.bivar.order_stats', 'bivar.order_stats', (['self.xy'], {}), '(self.xy)\n', (1109, 1118), False, 'from kldmwr import bivar\n'), ((1490, 1536), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_ijunq', 'ijunq'], {}), '(expected_ijunq, ijunq)\n', (1513, 1536), True, 'import numpy as np\n'), ((1545, 1591), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_ijcnt', 'ijcnt'], {}), '(expected_ijcnt, ijcnt)\n', (1568, 1591), True, 'import numpy as np\n'), ((1600, 1664), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['expected_qqs_x', 'qqs_x'], {'decimal': '(6)'}), '(expected_qqs_x, qqs_x, decimal=6)\n', (1630, 1664), True, 'import numpy as np\n'), ((1673, 1737), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['expected_qqs_y', 'qqs_y'], {'decimal': '(6)'}), '(expected_qqs_y, qqs_y, decimal=6)\n', (1703, 1737), True, 'import numpy as np\n'), ((1785, 1835), 'numpy.array', 'np.array', (['[[1, 2], [2, 1], [3, 1], [4, 3], [5, 4]]'], {}), '([[1, 2], [2, 1], [3, 1], [4, 3], [5, 4]])\n', (1793, 1835), True, 'import numpy as np\n'), ((1965, 2000), 'kldmwr.bivar.find_relbbs', 'bivar.find_relbbs', (['ijunq_t', 'ijcnt_t'], {}), '(ijunq_t, ijcnt_t)\n', (1982, 2000), False, 'from kldmwr import bivar\n'), ((2027, 2170), 'numpy.array', 'np.array', (['[[1, 2], [1, 3], [2, 3], [2, 2], [2, 1], [3, 2], [3, 1], [4, 2], [4, 1], [4,\n 3], [4, 4], [5, 4], [5, 3], [5, 5], [6, 5], [6, 4]]'], {}), '([[1, 2], [1, 3], [2, 3], [2, 2], [2, 1], [3, 2], [3, 1], [4, 2], [\n 4, 1], [4, 3], [4, 4], [5, 4], [5, 3], [5, 5], [6, 5], [6, 4]])\n', (2035, 2170), True, 'import numpy as np\n'), ((2226, 2284), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 3, 2, 1, 1, 1]'], {}), '([1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 3, 2, 1, 1, 1])\n', (2234, 2284), True, 'import numpy as np\n'), ((2315, 2363), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_relbbs', 'relbbs'], {}), '(expected_relbbs, relbbs)\n', (2338, 2363), True, 'import numpy as np\n'), ((2372, 2420), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_w_rbbs', 'w_rbbs'], {}), '(expected_w_rbbs, w_rbbs)\n', (2395, 2420), True, 'import numpy as np\n'), ((2696, 2743), 'kldmwr.bivar.find_boundary_bbs', 'bivar.find_boundary_bbs', (['relbbs_t', 'nux_t', 'nuy_t'], {}), '(relbbs_t, nux_t, nuy_t)\n', (2719, 2743), False, 'from kldmwr import bivar\n'), ((2865, 2913), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_w_bnds', 'w_bnds'], {}), '(expected_w_bnds, w_bnds)\n', (2888, 2913), True, 'import numpy as np\n'), ((2967, 3110), 'numpy.array', 'np.array', (['[[1, 2], [1, 3], [2, 3], [2, 2], [2, 1], [3, 2], [3, 1], [4, 2], [4, 1], [4,\n 3], [4, 4], [5, 4], [5, 3], [5, 5], [6, 5], [6, 4]]'], {}), '([[1, 2], [1, 3], [2, 3], [2, 2], [2, 1], [3, 2], [3, 1], [4, 2], [\n 4, 1], [4, 3], [4, 4], [5, 4], [5, 3], [5, 5], [6, 5], [6, 4]])\n', (2975, 3110), True, 'import numpy as np\n'), ((3157, 3184), 'kldmwr.bivar.find_relvts', 'bivar.find_relvts', (['relbbs_t'], {}), '(relbbs_t)\n', (3174, 3184), False, 'from kldmwr import bivar\n'), ((3211, 3466), 'numpy.array', 'np.array', (['[[0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0], [2, 1], [2,\n 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [4, 0], [4, 1], [4,\n 2], [4, 3], [4, 4], [4, 5], [5, 2], [5, 3], [5, 4], [5, 5], [6, 3], [6,\n 4], [6, 5]]'], {}), '([[0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0], [\n 2, 1], [2, 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [4, 0],\n [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [5, 2], [5, 3], [5, 4], [5, 5],\n [6, 3], [6, 4], [6, 5]])\n', (3219, 3466), True, 'import numpy as np\n'), ((3520, 3568), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_relvts', 'relvts'], {}), '(expected_relvts, relvts)\n', (3543, 3568), True, 'import numpy as np\n'), ((3628, 3883), 'numpy.array', 'np.array', (['[[0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0], [2, 1], [2,\n 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [4, 0], [4, 1], [4,\n 2], [4, 3], [4, 4], [4, 5], [5, 2], [5, 3], [5, 4], [5, 5], [6, 3], [6,\n 4], [6, 5]]'], {}), '([[0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0], [\n 2, 1], [2, 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [4, 0],\n [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [5, 2], [5, 3], [5, 4], [5, 5],\n [6, 3], [6, 4], [6, 5]])\n', (3636, 3883), True, 'import numpy as np\n'), ((3985, 4032), 'kldmwr.bivar.find_relvts_in', 'bivar.find_relvts_in', (['relvts_in_t', 'nux_t', 'nuy_t'], {}), '(relvts_in_t, nux_t, nuy_t)\n', (4005, 4032), False, 'from kldmwr import bivar\n'), ((4062, 4213), 'numpy.array', 'np.array', (['[[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2], [3, 3], [3,\n 4], [4, 1], [4, 2], [4, 3], [4, 4], [5, 2], [5, 3], [5, 4]]'], {}), '([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2], [\n 3, 3], [3, 4], [4, 1], [4, 2], [4, 3], [4, 4], [5, 2], [5, 3], [5, 4]])\n', (4070, 4213), True, 'import numpy as np\n'), ((4263, 4317), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_relvts_in', 'relvts_in'], {}), '(expected_relvts_in, relvts_in)\n', (4286, 4317), True, 'import numpy as np\n'), ((4364, 4440), 'kldmwr.bivar.zbce', 'bivar.zbce', (['self.xy', 'self.p_0', 'bvnrm_cdf', 'bvnrm_marx', 'bvnrm_mary', 'bvnrm_ptry'], {}), '(self.xy, self.p_0, bvnrm_cdf, bvnrm_marx, bvnrm_mary, bvnrm_ptry)\n', (4374, 4440), False, 'from kldmwr import bivar\n'), ((4563, 4633), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['expected_res_0', 'res_zbce[0]'], {'decimal': '(6)'}), '(expected_res_0, res_zbce[0], decimal=6)\n', (4593, 4633), True, 'import numpy as np\n'), ((4732, 4783), 'kldmwr.bivar.mle', 'bivar.mle', (['self.xy', 'self.p_0', 'bvnrm_pdf', 'bvnrm_ptry'], {}), '(self.xy, self.p_0, bvnrm_pdf, bvnrm_ptry)\n', (4741, 4783), False, 'from kldmwr import bivar\n'), ((4884, 4953), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['expected_res_0', 'res_mle[0]'], {'decimal': '(6)'}), '(expected_res_0, res_mle[0], decimal=6)\n', (4914, 4953), True, 'import numpy as np\n'), ((5662, 5680), 'numpy.array', 'np.array', (['self.xyf'], {}), '(self.xyf)\n', (5670, 5680), True, 'import numpy as np\n'), ((5700, 5729), 'numpy.reshape', 'np.reshape', (['self.xyf', '(10, 2)'], {}), '(self.xyf, (10, 2))\n', (5710, 5729), True, 'import numpy as np\n'), ((5804, 5881), 'kldmwr.bivar.zbce', 'bivar.zbce', (['self.xyf', 'self.p_i', 'bvnrm_cdf', 'bvnrm_marx', 'bvnrm_mary', 'bvnrm_ptry'], {}), '(self.xyf, self.p_i, bvnrm_cdf, bvnrm_marx, bvnrm_mary, bvnrm_ptry)\n', (5814, 5881), False, 'from kldmwr import bivar\n'), ((6083, 6152), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['expected_zbc_0', 'res_zbc[0]'], {'decimal': '(4)'}), '(expected_zbc_0, res_zbc[0], decimal=4)\n', (6113, 6152), True, 'import numpy as np\n'), ((6214, 6283), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['expected_zbc_2', 'res_zbc[2]'], {'decimal': '(4)'}), '(expected_zbc_2, res_zbc[2], decimal=4)\n', (6244, 6283), True, 'import numpy as np\n'), ((6403, 6497), 'kldmwr.bivar.zbce', 'bivar.zbce', (['self.xyf', 'self.p_i', 'bvnrm_cdf', 'bvnrm_marx', 'bvnrm_mary', 'bvnrm_ptry'], {'max_count': '(1)'}), '(self.xyf, self.p_i, bvnrm_cdf, bvnrm_marx, bvnrm_mary,\n bvnrm_ptry, max_count=1)\n', (6413, 6497), False, 'from kldmwr import bivar\n'), ((6649, 6700), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected_zbc_0', 'res_zbc[0]'], {}), '(expected_zbc_0, res_zbc[0])\n', (6672, 6700), True, 'import numpy as np\n'), ((6762, 6831), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['expected_zbc_2', 'res_zbc[2]'], {'decimal': '(4)'}), '(expected_zbc_2, res_zbc[2], decimal=4)\n', (6792, 6831), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 11:28:21 2019
@author: keelin
"""
from numpy import fliplr, flipud
from opencxr.utils.resize_rescale import rescale_to_min_max
from skimage import util
from skimage.transform import rotate
def invert_grayscale(np_array_in, preserve_dtype=True):
"""
A method to invert pixel grayvalues
If preserve_dtype is true then the result will be returned as the original
data type.
Resulting grey-values will be rescaled to min-max values of that dtype
:param np_array_in: input image
:param preserve_dtype: whether to preserve the input dtype
:return: the image with intensities inverted
"""
inverted_np = util.invert(np_array_in)
if preserve_dtype:
# cast back to original type, first rescaling to min/max for that type
inverted_np = rescale_to_min_max(
inverted_np,
np_array_in.dtype,
new_min=None,
new_max=None,
)
return inverted_np
def rotate_img(np_array_in, rot_angle, preserve_dtype=True):
"""
A method to rotate clockwise (rot_angle in degrees)
If preserve_dtype is true then the result will be returned as the original
data type.
Resulting grey-values will be rescaled to min-max values of that dtype
:param np_array_in: input image
:param rot_angle: angle of rotation
:param preserve_dtype: whether to preserve input dtype
:return: The rotated image
"""
rot_img = rotate(np_array_in, rot_angle)
if preserve_dtype:
# cast back to original type, first rescaling to min/max for that type
rot_img = rescale_to_min_max(
rot_img, np_array_in.dtype, new_min=None, new_max=None
)
return rot_img
def flip_x(np_array_in, preserve_dtype=True):
"""
A method to flip horizontally (switch image left and right sides)
If preserve_dtype is true then the result will be returned as the original
data type.
Resulting grey-values will be rescaled to min-max values of that dtype
:param np_array_in: input image
:param preserve_dtype: whether to preserve dtype
:return: the horizontally flipped image
"""
flipx = flipud(np_array_in)
if preserve_dtype:
# cast back to original type, first rescaling to min/max for that type
flipx = rescale_to_min_max(flipx, np_array_in.dtype, new_min=None, new_max=None)
return flipx
def flip_y(np_array_in, preserve_dtype=True):
"""
A method to flip vertically (switch image top and bottom)
If preserve_dtype is true then the result will be returned as the original
data type.
Resulting grey-values will be rescaled to min-max values of that dtype
:param np_array_in: input image
:param preserve_dtype: whether to preserve dtype
:return: the vertically flipped image
"""
flipy = fliplr(np_array_in)
if preserve_dtype:
# cast back to original type, first rescaling to min/max for that type
flipy = rescale_to_min_max(flipy, np_array_in.dtype, new_min=None, new_max=None)
return flipy
| [
"skimage.util.invert",
"numpy.flipud",
"skimage.transform.rotate",
"numpy.fliplr",
"opencxr.utils.resize_rescale.rescale_to_min_max"
] | [((691, 715), 'skimage.util.invert', 'util.invert', (['np_array_in'], {}), '(np_array_in)\n', (702, 715), False, 'from skimage import util\n'), ((1486, 1516), 'skimage.transform.rotate', 'rotate', (['np_array_in', 'rot_angle'], {}), '(np_array_in, rot_angle)\n', (1492, 1516), False, 'from skimage.transform import rotate\n'), ((2201, 2220), 'numpy.flipud', 'flipud', (['np_array_in'], {}), '(np_array_in)\n', (2207, 2220), False, 'from numpy import fliplr, flipud\n'), ((2867, 2886), 'numpy.fliplr', 'fliplr', (['np_array_in'], {}), '(np_array_in)\n', (2873, 2886), False, 'from numpy import fliplr, flipud\n'), ((840, 918), 'opencxr.utils.resize_rescale.rescale_to_min_max', 'rescale_to_min_max', (['inverted_np', 'np_array_in.dtype'], {'new_min': 'None', 'new_max': 'None'}), '(inverted_np, np_array_in.dtype, new_min=None, new_max=None)\n', (858, 918), False, 'from opencxr.utils.resize_rescale import rescale_to_min_max\n'), ((1637, 1711), 'opencxr.utils.resize_rescale.rescale_to_min_max', 'rescale_to_min_max', (['rot_img', 'np_array_in.dtype'], {'new_min': 'None', 'new_max': 'None'}), '(rot_img, np_array_in.dtype, new_min=None, new_max=None)\n', (1655, 1711), False, 'from opencxr.utils.resize_rescale import rescale_to_min_max\n'), ((2339, 2411), 'opencxr.utils.resize_rescale.rescale_to_min_max', 'rescale_to_min_max', (['flipx', 'np_array_in.dtype'], {'new_min': 'None', 'new_max': 'None'}), '(flipx, np_array_in.dtype, new_min=None, new_max=None)\n', (2357, 2411), False, 'from opencxr.utils.resize_rescale import rescale_to_min_max\n'), ((3005, 3077), 'opencxr.utils.resize_rescale.rescale_to_min_max', 'rescale_to_min_max', (['flipy', 'np_array_in.dtype'], {'new_min': 'None', 'new_max': 'None'}), '(flipy, np_array_in.dtype, new_min=None, new_max=None)\n', (3023, 3077), False, 'from opencxr.utils.resize_rescale import rescale_to_min_max\n')] |
###Package Importing
import numpy as np
import pandas as pd
from sklearn import metrics
import logging
from sklearn.externals import joblib
from sklearn.metrics import f1_score
import matplotlib.pyplot as plt
import os
from datetime import datetime
from preprocessing import hash_col
from preprocessing import onehot
from preprocessing import normalize
from model import machine_learning as ml
from preprocessing import discretize
#from sklearn.ensemble import RandomForestClassifier
wkdir = ""
#set parameters
dump_path = wkdir + "/" + "model"
data_path = wkdir + "/" + "data"
out_path = wkdir + "/" + "output"
colList_float = []
colList_cnt = []
colList_days = []
colList_unicode = []
colList_dup = []
keys = ''
labels = ''
def train(*args, **kwargs) :
###Setup logging
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("START training")
# Mandatory Args
wkdir, dump_path, data_path, out_path = kwargs["path_list"]
filename0, filename1 = kwargs["filenames"]
colList_float, colList_cnt, colList_days, colList_unicode = kwargs["column_lists"]
keys = kwargs["keys"]
# Optional Args
oversampling_ratio = kwargs["oversampling_ratio"] if "oversampling_ratio" in kwargs.keys() else 0.5
comprehensive_search = kwargs["comprehensive_search"] if "comprehensive_search" in kwargs.keys() else False
###Data Loading
os.chdir(wkdir)
try :
data_ori0 = pd.read_csv(data_path + "/" + filename0 #/rex_up_features_sample0.csv"
, low_memory=False, encoding=u'utf-8') \
.drop_duplicates(subset=keys,keep='first')
data_ori1 = pd.read_csv(data_path + "/" + filename1
, low_memory=False, encoding=u'utf-8').drop_duplicates(subset=keys,keep='first')
#axis = 0 means merge by column(same column join)
#axis = 1 means merge by row(same index join)
data_tmp = pd.concat([data_ori0, data_ori1], axis=0)
data_tmp.index = data_tmp[keys]
#print(data_ori0.shape, data_ori1.shape, data_tmp.shape)
#print(data_tmp)
assert data_ori0.shape[0]+data_ori1.shape[0] == data_tmp.shape[0] , "0/1 Merging failed"
assert data_ori0.shape[1] == data_ori1.shape[1] == data_tmp.shape[1] , "Column number not match"
logger.info("shapes of data_ori0, data_ori1, data_tmp:" + str(data_ori0.shape) + str(data_ori1.shape) + str(data_tmp.shape))
#For numeric features including float, cnt and days, we fill NaN and normalize
#No need to discretize in this model.
#n_disc = 5
clients_discretized = data_tmp.loc[:, :].copy()
#nsamples = clients_discretized.shape[0]
features_num = clients_discretized[[keys]+colList_float + colList_days + colList_cnt].drop_duplicates( keep='first')
features_num = features_num[colList_float + colList_days + colList_cnt] \
.applymap(discretize.clean_cell) \
.applymap(lambda x : np.float64(x))
# save (mean, std) into tables so that can be retrieved at predict phase
features_num.apply(lambda x : pd.Series([np.mean(x), np.std(x)]),axis=0).to_csv(out_path + "/" + "features_num_meanstd.csv")
logger.info("numeric features normalization args have been writen to files:" + "features_num_meanstd.csv")
features_num = features_num.apply(normalize.meanstd, axis=0).fillna(0)
logger.info("numeric features processed, shaped as : " + str(features_num.shape))
features_cat=clients_discretized[[keys]+colList_unicode].drop_duplicates(keep='first')
features_cat=features_cat[colList_unicode].fillna("0") \
.apply(lambda x: x.astype('category',ordered=True) \
.factorize()[0],axis=0)
features_cat.apply(lambda x : pd.Series([np.mean(x), np.std(x)]),axis=0).to_csv(out_path+ "/" + "features_cat_meanstd.csv")
logger.info("categorical features normalization args have been writen to files:" + "features_cat_meanstd.csv")
features_cat = features_cat.apply(normalize.meanstd, axis=0) .fillna(0)
logger.info("categorical features processed, shaped as : " + str(features_cat.shape))
# Deal with label
label_data = clients_discretized[[keys]+[labels]].drop_duplicates(keep='first')
label_data = label_data[labels]
assert sum([features_num.isnull().sum().sum(),
features_cat.isnull().sum().sum() == 0]) , "There is NaN in features"
logger.info("labels processed, shaped as : " + str(label_data.shape))
data_all = pd.concat([features_num.loc[features_num.index.sort_values(),],
features_cat.loc[features_cat.index.sort_values(),],
label_data.loc[label_data.index.sort_values(),]], axis=1)
#data_all[labels] = data_all[labels].apply(lambda x: 0 if x != x else 1)
logger.info("merging features processed, shaped as : " + str(data_all.shape))
assert data_all.isnull().sum().sum() == 0, "There is NaN in data_all"
###Dataset Separation###
#oversample should be a fraction indicating how much samples oversample
def BuildDataSet(data, X, Y, frac, keys = None, oversampling = [0, 0.5]) :
if keys != None :
data.index = data[keys]
X_train = pd.DataFrame()
Y_train = pd.Series()
unique_Y = np.unique(data[Y])
for i in range(len(unique_Y)) :
val_Y = unique_Y[i]
print(val_Y)
# sampling
_X_train = data.loc[data[Y] == val_Y,X].sample(frac = frac,
replace = False, random_state=0)
_Y_train = data.loc[data.index.isin(_X_train.index), Y]
# append Y sampling
X_train = X_train.append(_X_train)
Y_train = Y_train.append(_Y_train)
# oversampling
_X_train = _X_train.sample(frac = oversampling[i],
replace=False, random_state=0)
_Y_train = data.loc[data.index.isin(_X_train.index), Y]
# append oversampling
X_train = X_train.append(_X_train)
Y_train = Y_train.append(_Y_train)
#X_train = X_train.loc[X_train.index.sort_values(),:]
#print(X_train.shape)
#X_train_1 = data.loc[data[Y] == 1,X].sample(frac = frac, replace = False,random_state=0)
#print(X_train_1.shape)
#Y_train = Y_train[Y_train.index.isin(X_train.index), Y]
#Y_train.index = X_train.index
#print(Y_train.shape)
#Y_train_1 = data.loc[data.index.isin(X_train_1.index), Y]
#print(Y_train_1.shape)
X_test = data.loc[~data.index.isin(X_train.index),X]
#X_test = X_test.loc[X_test.index.sort_values(),:]
Y_test = data.loc[data.index.isin(X_test.index), Y]
Y_test.index = X_test.index
return X_train, Y_train, X_test, Y_test
###
#Divide data into training and testing set
#70% training vs 30% testing
###
#features = colList_unicode + colList_int + colList_float + colList_dup
features = data_all.columns.values.tolist()
features.remove( labels )
X_train, Y_train, X_test, Y_test = BuildDataSet(data_all, features, labels, 0.9,
oversampling=[0,oversampling_ratio])
#print(X_train.head())
logger.info("building dataset processed, shapes of X_train, Y_train, X_test, Y_test: " +
str(X_train.shape) + str(Y_train.shape) +
str(X_test.shape) + str(Y_test.shape))
X_test.to_csv(data_path+"_X_test.csv")
Y_test.to_csv(data_path+"_Y_test.csv")
######################################################################
# Train and evaluate
######################################################################
def eval_pr(models, X_test, Y_test) :
colors = ['blue','yellow','green','red','purple','pink','grey']
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([-0.01, 1.01])
plt.title('2-class Precision-Recall curve') # AP={0:0.2f}'.format(average_precision))
handles = []
evals = []
for i in range(0, len(models)) :
m = models[i][1]
name = models[i][0]
pred = m.predict_proba(X_test)
#print(pred)
precision, recall, thresholds = metrics.precision_recall_curve(Y_test, pred[:,1])
average_precision = metrics.average_precision_score(Y_test, pred[:,1])
c = colors[i]
handle, = plt.plot(recall, precision, color=c, label=name + " with ap={:0.4f}".format(average_precision))
handles .append( handle ) #step='post', alpha=0.2, color=c)
evals.append((name, m, [precision, recall, thresholds, average_precision]))
plt.legend(handles=handles, loc=3)
return plt, evals
def eval_roc(models, X_test, Y_test) :
colors = ['blue','yellow','green','red','purple','pink','grey']
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.ylim([0.0, 1.05])
plt.xlim([-0.01, 1.01])
plt.title('2-class ROC curve') #: AUC={0:0.2f}'.format(auc))
#plt.legend(loc)
handles=[]
evals=[]
for i in range(0, len(models)) :
m = models[i][1]
name = models[i][0]
pred = m.predict_proba(X_test)
fpr, tpr, thresholds = metrics.roc_curve(Y_test, pred[:,1])#, pos_label=2)
auc = metrics.roc_auc_score(Y_test, pred[:,1])
c = colors[i]
handle, = plt.plot(fpr, tpr, color=c, label=name + " with auc={:0.4f}".format(auc))
handles .append( handle )
evals.append((name, m, [fpr, tpr, thresholds, auc]))
plt.legend(handles=handles, loc=4)
return plt, evals
#Fitting
logger.info("START Training Models")
gbdt, gbdt_par = ml.GBDT(X_train, Y_train, score='f1_micro',comprehensive=comprehensive_search)
logger.info("GBDT gridsearch done")
joblib.dump(gbdt,dump_path+"/gbdt.m")
logger.info("Dump gbdt processed at path:" + dump_path)
rf, rf_par = ml.RF(X_train, Y_train, score='f1_micro', comprehensive=comprehensive_search)
logger.info("RF gridsearch done")
joblib.dump(rf,dump_path+"/rf.m")
logger.info("Dump rf processed at path:" + dump_path)
svm, svm_par = ml.SVM(X_train, Y_train, score='f1_micro', comprehensive=comprehensive_search)
logger.info("svm gridsearch done")
joblib.dump(svm,dump_path+"/svm.m")
logger.info("Dump svm processed at path:" + dump_path)
lr, lr_par = ml.LR(X_train, Y_train, score='f1_micro', comprehensive=comprehensive_search)
logger.info("lr fitting done")
joblib.dump(lr,dump_path+"/lr.m")
logger.info("Dump lr processed at path:" + dump_path)
models = [("gbdt",gbdt),
("rf", rf),
("svm", svm),
("lr", lr)]
#Evaluate
logger.info("START Evaluating Models")
roc_plt, metrics_roc = eval_roc(models, X_train, Y_train)
roc_plt.savefig(out_path + "/" + "roc_train.png")
roc_plt.close()
#roc_plt.show()
roc_plt, metrics_roc = eval_roc(models, X_test, Y_test)
roc_plt.savefig(out_path + "/" + "roc_test.png")
roc_plt.close()
#roc_plt.show()
pr_plt, metrics_pr = eval_pr(models, X_train, Y_train)
pr_plt.savefig(out_path + "/" + "pr_train.png")
pr_plt.close()
#pr_plt.show()
pr_plt, metrics_pr = eval_pr(models, X_test, Y_test)
pr_plt.savefig(out_path + "/" + "pr_test.png")
#pr_plt.show()
pr_plt.close()
#Output Evaluation Reference
for i in range(len(metrics_pr)) :
m = models[i]
met = metrics_pr[i][2]
#joblib.dump(m[1],dump_path+"/"+m[0]+".m")
#logger.info("Dumping " + m[0] + " processed at path:" + dump_path)
precision, recall, thresholds, x = met
rslt = np.concatenate((precision.reshape(precision.shape[0],1)[1:,:]
,recall.reshape(recall.shape[0],1)[1:,:]
,thresholds.reshape(thresholds.shape[0],1)
), axis=1)
rslt = pd.DataFrame(rslt, columns=["precision","recall","threshold"])
plt.figure()
plt.plot(rslt["threshold"], rslt["precision"], color='blue')
plt.plot(rslt["threshold"], rslt["recall"], color='green')
plt.savefig(out_path + "/" + m[0] + "_threshold.png")
plt.close()
pr_matrix = pd.DataFrame()
for th in [0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8,
0.9, 0.95, 0.99, 0.995,
0.999, 0.9999] :
pr_matrix = pr_matrix.append( rslt.loc[rslt["threshold"] >= th, :].head(1) )
pr_matrix.to_excel(out_path+"/" + m[0] + "_score_matrix.xlsx", index=None)
#Feature Importance
feature_importance = pd.DataFrame([X_test.columns,gbdt.feature_importances_]).T
fi = feature_importance.rename(columns={0:"feature", 1:"importance"}).sort_values(by="importance",ascending=False)
fi.to_excel(out_path + '/' + m[0] + '_feature_importance.xlsx', index=False, merge_cells=False)
logger.info("Finish")
except Exception as err:
logger.error(str(err))
finally:
logger.removeHandler(handler)
return models, metrics_pr
if __name__ == "__main__" :
os.chdir(wkdir)
models = train(path_list=[wkdir, dump_path, data_path, out_path],
filenames=["", ""],
column_lists=[colList_float, colList_cnt, colList_days, colList_unicode],
keys = '',
comprehensive_search=True)
| [
"logging.getLogger",
"model.machine_learning.LR",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.roc_curve",
"numpy.mean",
"model.machine_learning.RF",
"numpy.float64",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"model.machine_learni... | [((825, 852), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (842, 852), False, 'import logging\n'), ((909, 939), 'logging.FileHandler', 'logging.FileHandler', (['"""log.txt"""'], {}), "('log.txt')\n", (928, 939), False, 'import logging\n'), ((991, 1064), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1008, 1064), False, 'import logging\n'), ((1703, 1718), 'os.chdir', 'os.chdir', (['wkdir'], {}), '(wkdir)\n', (1711, 1718), False, 'import os\n'), ((15190, 15205), 'os.chdir', 'os.chdir', (['wkdir'], {}), '(wkdir)\n', (15198, 15205), False, 'import os\n'), ((2219, 2260), 'pandas.concat', 'pd.concat', (['[data_ori0, data_ori1]'], {'axis': '(0)'}), '([data_ori0, data_ori1], axis=0)\n', (2228, 2260), True, 'import pandas as pd\n'), ((11250, 11329), 'model.machine_learning.GBDT', 'ml.GBDT', (['X_train', 'Y_train'], {'score': '"""f1_micro"""', 'comprehensive': 'comprehensive_search'}), "(X_train, Y_train, score='f1_micro', comprehensive=comprehensive_search)\n", (11257, 11329), True, 'from model import machine_learning as ml\n'), ((11390, 11430), 'sklearn.externals.joblib.dump', 'joblib.dump', (['gbdt', "(dump_path + '/gbdt.m')"], {}), "(gbdt, dump_path + '/gbdt.m')\n", (11401, 11430), False, 'from sklearn.externals import joblib\n'), ((11535, 11612), 'model.machine_learning.RF', 'ml.RF', (['X_train', 'Y_train'], {'score': '"""f1_micro"""', 'comprehensive': 'comprehensive_search'}), "(X_train, Y_train, score='f1_micro', comprehensive=comprehensive_search)\n", (11540, 11612), True, 'from model import machine_learning as ml\n'), ((11673, 11709), 'sklearn.externals.joblib.dump', 'joblib.dump', (['rf', "(dump_path + '/rf.m')"], {}), "(rf, dump_path + '/rf.m')\n", (11684, 11709), False, 'from sklearn.externals import joblib\n'), ((11805, 11883), 'model.machine_learning.SVM', 'ml.SVM', (['X_train', 'Y_train'], {'score': '"""f1_micro"""', 'comprehensive': 'comprehensive_search'}), "(X_train, Y_train, score='f1_micro', comprehensive=comprehensive_search)\n", (11811, 11883), True, 'from model import machine_learning as ml\n'), ((11944, 11982), 'sklearn.externals.joblib.dump', 'joblib.dump', (['svm', "(dump_path + '/svm.m')"], {}), "(svm, dump_path + '/svm.m')\n", (11955, 11982), False, 'from sklearn.externals import joblib\n'), ((12072, 12149), 'model.machine_learning.LR', 'ml.LR', (['X_train', 'Y_train'], {'score': '"""f1_micro"""', 'comprehensive': 'comprehensive_search'}), "(X_train, Y_train, score='f1_micro', comprehensive=comprehensive_search)\n", (12077, 12149), True, 'from model import machine_learning as ml\n'), ((12206, 12242), 'sklearn.externals.joblib.dump', 'joblib.dump', (['lr', "(dump_path + '/lr.m')"], {}), "(lr, dump_path + '/lr.m')\n", (12217, 12242), False, 'from sklearn.externals import joblib\n'), ((5885, 5899), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5897, 5899), True, 'import pandas as pd\n'), ((5922, 5933), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (5931, 5933), True, 'import pandas as pd\n'), ((5957, 5975), 'numpy.unique', 'np.unique', (['data[Y]'], {}), '(data[Y])\n', (5966, 5975), True, 'import numpy as np\n'), ((8977, 8997), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (8987, 8997), True, 'import matplotlib.pyplot as plt\n'), ((9010, 9033), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (9020, 9033), True, 'import matplotlib.pyplot as plt\n'), ((9046, 9067), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (9054, 9067), True, 'import matplotlib.pyplot as plt\n'), ((9080, 9103), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.01, 1.01]'], {}), '([-0.01, 1.01])\n', (9088, 9103), True, 'import matplotlib.pyplot as plt\n'), ((9116, 9159), 'matplotlib.pyplot.title', 'plt.title', (['"""2-class Precision-Recall curve"""'], {}), "('2-class Precision-Recall curve')\n", (9125, 9159), True, 'import matplotlib.pyplot as plt\n'), ((9984, 10018), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'handles', 'loc': '(3)'}), '(handles=handles, loc=3)\n', (9994, 10018), True, 'import matplotlib.pyplot as plt\n'), ((10202, 10219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (10212, 10219), True, 'import matplotlib.pyplot as plt\n'), ((10232, 10249), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (10242, 10249), True, 'import matplotlib.pyplot as plt\n'), ((10262, 10283), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (10270, 10283), True, 'import matplotlib.pyplot as plt\n'), ((10296, 10319), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.01, 1.01]'], {}), '([-0.01, 1.01])\n', (10304, 10319), True, 'import matplotlib.pyplot as plt\n'), ((10332, 10362), 'matplotlib.pyplot.title', 'plt.title', (['"""2-class ROC curve"""'], {}), "('2-class ROC curve')\n", (10341, 10362), True, 'import matplotlib.pyplot as plt\n'), ((11070, 11104), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'handles', 'loc': '(4)'}), '(handles=handles, loc=4)\n', (11080, 11104), True, 'import matplotlib.pyplot as plt\n'), ((13821, 13885), 'pandas.DataFrame', 'pd.DataFrame', (['rslt'], {'columns': "['precision', 'recall', 'threshold']"}), "(rslt, columns=['precision', 'recall', 'threshold'])\n", (13833, 13885), True, 'import pandas as pd\n'), ((13896, 13908), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13906, 13908), True, 'import matplotlib.pyplot as plt\n'), ((13921, 13981), 'matplotlib.pyplot.plot', 'plt.plot', (["rslt['threshold']", "rslt['precision']"], {'color': '"""blue"""'}), "(rslt['threshold'], rslt['precision'], color='blue')\n", (13929, 13981), True, 'import matplotlib.pyplot as plt\n'), ((13994, 14052), 'matplotlib.pyplot.plot', 'plt.plot', (["rslt['threshold']", "rslt['recall']"], {'color': '"""green"""'}), "(rslt['threshold'], rslt['recall'], color='green')\n", (14002, 14052), True, 'import matplotlib.pyplot as plt\n'), ((14071, 14124), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + '/' + m[0] + '_threshold.png')"], {}), "(out_path + '/' + m[0] + '_threshold.png')\n", (14082, 14124), True, 'import matplotlib.pyplot as plt\n'), ((14137, 14148), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14146, 14148), True, 'import matplotlib.pyplot as plt\n'), ((14186, 14200), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14198, 14200), True, 'import pandas as pd\n'), ((1749, 1826), 'pandas.read_csv', 'pd.read_csv', (["(data_path + '/' + filename0)"], {'low_memory': '(False)', 'encoding': 'u"""utf-8"""'}), "(data_path + '/' + filename0, low_memory=False, encoding=u'utf-8')\n", (1760, 1826), True, 'import pandas as pd\n'), ((1953, 2030), 'pandas.read_csv', 'pd.read_csv', (["(data_path + '/' + filename1)"], {'low_memory': '(False)', 'encoding': 'u"""utf-8"""'}), "(data_path + '/' + filename1, low_memory=False, encoding=u'utf-8')\n", (1964, 2030), True, 'import pandas as pd\n'), ((3325, 3338), 'numpy.float64', 'np.float64', (['x'], {}), '(x)\n', (3335, 3338), True, 'import numpy as np\n'), ((9488, 9538), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['Y_test', 'pred[:, 1]'], {}), '(Y_test, pred[:, 1])\n', (9518, 9538), False, 'from sklearn import metrics\n'), ((9583, 9634), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['Y_test', 'pred[:, 1]'], {}), '(Y_test, pred[:, 1])\n', (9614, 9634), False, 'from sklearn import metrics\n'), ((10675, 10712), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Y_test', 'pred[:, 1]'], {}), '(Y_test, pred[:, 1])\n', (10692, 10712), False, 'from sklearn import metrics\n'), ((10749, 10790), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Y_test', 'pred[:, 1]'], {}), '(Y_test, pred[:, 1])\n', (10770, 10790), False, 'from sklearn import metrics\n'), ((14642, 14699), 'pandas.DataFrame', 'pd.DataFrame', (['[X_test.columns, gbdt.feature_importances_]'], {}), '([X_test.columns, gbdt.feature_importances_])\n', (14654, 14699), True, 'import pandas as pd\n'), ((3471, 3481), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3478, 3481), True, 'import numpy as np\n'), ((3483, 3492), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (3489, 3492), True, 'import numpy as np\n'), ((4241, 4251), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (4248, 4251), True, 'import numpy as np\n'), ((4253, 4262), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (4259, 4262), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import math
import random
import torch
import numpy as np
class BucketIterator(object):
def __init__(self, data, batch_size, shuffle=True, sort=True):
self.shuffle = shuffle
self.sort = sort
self.batches, self.max_doc_len, self.num_batch = self.sort_and_pad(data, batch_size)
self.batch_len = len(self.batches)
def sort_and_pad(self, data, batch_size):
max_doc_len = 0
num_batch = int(math.ceil(len(data) / batch_size))
if self.sort:
sorted_data = sorted(data, key=lambda x: len(x['seq_lens']))
else:
sorted_data = data
batches = []
for i in range(num_batch):
padded_data, batch_max_doc_len = self.pad_data(sorted_data[i*batch_size : (i+1)*batch_size])
batches.append(padded_data)
if batch_max_doc_len > max_doc_len:
max_doc_len = batch_max_doc_len
return batches, max_doc_len, num_batch
@staticmethod
def pad_data(batch_data):
batch_doc_len = []
batch_text_indices = []
batch_y_emotion = []
batch_y_cause = []
batch_y_pair = []
batch_pos_matr = []
max_doc_len = max([len(t['seq_lens']) for t in batch_data])
max_seq_len = max([max(t['seq_lens']) for t in batch_data])
for item in batch_data:
seq_lens, text_indices, y_emotion, y_cause, y_pair, pos_matr = \
item['seq_lens'], item['text_indices'], item['y_emotion'], item['y_cause'], item['y_pair'], item['pos_matr']
doc_len = len(seq_lens)
batch_doc_len.append(doc_len)
padded_text_indices = []
for i, clause_indices in enumerate(text_indices):
clause_padding = [0] * (max_seq_len - seq_lens[i])
clause_indices = clause_indices + clause_padding
padded_text_indices.append(clause_indices)
text_padding = [[0] * max_seq_len] * (max_doc_len - doc_len)
padded_text_indices = padded_text_indices + text_padding
batch_text_indices.append(padded_text_indices)
y_emotion_padding = [0] * (max_doc_len - doc_len)
y_cause_padding = [0] * (max_doc_len - doc_len)
batch_y_emotion.append(y_emotion + y_emotion_padding)
batch_y_cause.append(y_cause + y_cause_padding)
batch_y_pair.append(np.pad(y_pair, \
((0, max_doc_len - doc_len), (0, max_doc_len - doc_len)), 'constant')) # default padding 0s
batch_pos_matr.append(np.pad(pos_matr, \
((0, max_doc_len - doc_len), (0, max_doc_len - doc_len)), 'constant'))
return {
'doc_len': torch.tensor(batch_doc_len),
'text_indices': torch.tensor(batch_text_indices),
'y_emotion': torch.tensor(batch_y_emotion),
'y_cause': torch.tensor(batch_y_cause),
'y_pair': torch.tensor(batch_y_pair),
'pos_matr' :torch.tensor(batch_pos_matr),
},max_doc_len
def __iter__(self):
if self.shuffle:
random.shuffle(self.batches)
for idx in range(self.batch_len):
yield self.batches[idx]
| [
"torch.tensor",
"numpy.pad",
"random.shuffle"
] | [((3191, 3219), 'random.shuffle', 'random.shuffle', (['self.batches'], {}), '(self.batches)\n', (3205, 3219), False, 'import random\n'), ((2480, 2568), 'numpy.pad', 'np.pad', (['y_pair', '((0, max_doc_len - doc_len), (0, max_doc_len - doc_len))', '"""constant"""'], {}), "(y_pair, ((0, max_doc_len - doc_len), (0, max_doc_len - doc_len)),\n 'constant')\n", (2486, 2568), True, 'import numpy as np\n'), ((2641, 2731), 'numpy.pad', 'np.pad', (['pos_matr', '((0, max_doc_len - doc_len), (0, max_doc_len - doc_len))', '"""constant"""'], {}), "(pos_matr, ((0, max_doc_len - doc_len), (0, max_doc_len - doc_len)),\n 'constant')\n", (2647, 2731), True, 'import numpy as np\n'), ((2791, 2818), 'torch.tensor', 'torch.tensor', (['batch_doc_len'], {}), '(batch_doc_len)\n', (2803, 2818), False, 'import torch\n'), ((2849, 2881), 'torch.tensor', 'torch.tensor', (['batch_text_indices'], {}), '(batch_text_indices)\n', (2861, 2881), False, 'import torch\n'), ((2910, 2939), 'torch.tensor', 'torch.tensor', (['batch_y_emotion'], {}), '(batch_y_emotion)\n', (2922, 2939), False, 'import torch\n'), ((2966, 2993), 'torch.tensor', 'torch.tensor', (['batch_y_cause'], {}), '(batch_y_cause)\n', (2978, 2993), False, 'import torch\n'), ((3019, 3045), 'torch.tensor', 'torch.tensor', (['batch_y_pair'], {}), '(batch_y_pair)\n', (3031, 3045), False, 'import torch\n'), ((3072, 3100), 'torch.tensor', 'torch.tensor', (['batch_pos_matr'], {}), '(batch_pos_matr)\n', (3084, 3100), False, 'import torch\n')] |
import numpy as np
from scipy.signal import savgol_filter
from qube.postprocess.dataset import Axis
def create_name(name, suffix=None, prefix=None):
elements = []
if prefix:
elements.append(str(prefix))
elements.append(str(name))
if suffix:
elements.append(str(suffix))
name = '_'.join(elements)
return name
def duplicate_dataset(dataset, suffix=None, prefix=None, custom_name=None):
new_ds = dataset.copy(shallow_copy=False)
if custom_name:
name = custom_name
else:
name = dataset.name
new_ds.name = create_name(name, suffix, prefix)
return new_ds
def remove_dim_in_axes(axes, dim=None):
new_axes = []
if dim is not None:
for si in axes:
ax = si.copy(shallow_copy=False)
if si.dim != dim and si.dim > dim:
ax.dim = si.dim - 1
new_axes.append(ax)
elif si.dim < dim:
new_axes.append(ax)
return new_axes
def histogram1d(dataset, bins=10, range=None, normed=None, weights=None, density=None):
ds = dataset.copy()
ds.name = f'{ds.name}_hist1d'
hist, bins = np.histogram(
ds.value,
bins=bins,
range=range,
normed=normed,
weights=weights,
density=density,
)
bins = bins[:-1] # remove 1 point for ax.plot
axis = Axis(
name=ds.name,
value=bins,
unit=ds.unit,
dim=0,
)
ds.value = hist
ds.unit = 'Counts'
ds.axes = {axis.name: axis}
return ds
def take(dataset, indices, axis=None):
ds = dataset.copy()
ds.name = f'{ds.name}_take'
ds.value = np.take(ds.value, indices=indices, axis=axis)
old_axes = ds.get_axes(counters=False)
ds.clear_axes()
if axis is not None:
for si in old_axes:
ax = si.copy(shallow_copy=False)
if si.dim == axis:
ax.value = np.take(ax.value, indices=indices)
ds.add_axis(ax)
elif si.dim < axis or si.dim > axis:
ds.add_axis(ax)
return ds
def mean(dataset, axis=None):
ds = dataset.copy()
ds.name = f'{ds.name}_mean'
ds.value = np.mean(ds.value, axis=axis)
old_axes = ds.get_axes(counters=False)
ds.clear_axes()
new_axes = remove_dim_in_axes(old_axes, axis)
for ax in new_axes:
ds.add_axis(ax)
return ds
def nanmean(dataset, axis=None):
ds = dataset.copy()
ds.name = f'{ds.name}_nanmean'
ds.value = np.nanmean(ds.value, axis=axis)
old_axes = ds.get_axes(counters=False)
ds.clear_axes()
new_axes = remove_dim_in_axes(old_axes, axis)
for ax in new_axes:
ds.add_axis(ax)
return ds
def subtract(dataset1, dataset2):
ds1 = dataset1.copy()
ds2 = dataset2
ds1.value = ds1.value - ds2.value
ds1.name = f'{ds1.name}-{ds2.name}'
return ds1
def gradient(dataset, axis=None, edge_order=1):
ds = dataset.copy()
ds.name = f'{ds.name}_grad'
ds.unit = f'd {ds.unit}'
ds.value = np.gradient(ds.value, axis=axis, edge_order=edge_order)
old_axes = ds.get_axes(counters=False)
ds.clear_axes()
if axis is not None:
for si in old_axes:
ax = si.copy(shallow_copy=False)
ds.add_axis(ax)
return ds
def smooth(dataset, window=5, order=3, axis=-1,**kwargs):
ds = dataset.copy()
ds.name = f'{ds.name}_smooth'
ds.value = savgol_filter(ds.value, window_length=window, polyorder=order, axis=axis,**kwargs)
old_axes = ds.get_axes(counters=False)
ds.clear_axes()
if axis is not None:
for si in old_axes:
ax = si.copy(shallow_copy=False)
ds.add_axis(ax)
return ds
def fft(
dataset,
axis=-1,
as_period=False, # return "xdata" as period instead of frequency
no_dc_offset=True, # take out point at 0 frequency
only_positive=True, # get only positive frequencies
**kwargs
):
ds_amp = dataset.copy()
ds_amp.name = f'{ds_amp.name}_fftamp'
ds_amp.unit = f'{ds_amp.unit}'
ds_pha = dataset.copy()
ds_pha.name = f'{ds_pha.name}_fftpha'
ds_pha.unit = f'rad'
old_axes = dataset.get_axes(counters=False)
ds_amp.clear_axes()
ds_pha.clear_axes()
if as_period:
no_dc_offset = True
if no_dc_offset:
ind0 = 1
else:
ind0 = 0
axs = []
for old_axis in old_axes:
ax = old_axis.copy(shallow_copy=False)
if ax.dim == axis:
N = len(ax.value)
Nhalf = int(N/2)
if only_positive:
ind1 = Nhalf
else:
ind1 = N
xdata_freq = np.fft.fftfreq(len(ax.value), np.abs(ax.value[1] - ax.value[0]))[ind0:ind1]
if not as_period:
ax.name = f'{ax.name}_fftfreq'
ax.unit = f'1/{ax.unit}'
ax.value = xdata_freq
print(ax.unit)
else:
ax.name = f'{ax.name}_fftper'
ax.unit = f'{ax.unit}'
ax.value = 1.0/xdata_freq
axs.append(ax)
data2analyse = np.moveaxis(dataset.value, axis, 0)
value_complex = np.fft.fft(data2analyse,axis=0,**kwargs)
value_complex = value_complex[ind0:ind1,Ellipsis]
value_complex = np.moveaxis(value_complex, 0, axis)
ds_amp.value = np.abs(value_complex)
ds_pha.value = np.angle(value_complex)
for ax in axs:
ds_amp.add_axis(ax)
ds_pha.add_axis(ax)
return ds_amp,ds_pha
def value_mask_by_range(dataset, init, final, value, unit=None):
ds = dataset.copy()
ds.name = f'{ds.name}_vmasked'
if init <= final:
f1 = np.greater_equal
f2 = np.less_equal
else:
f1 = np.less_equal
f2 = np.greater_equal
idxs_1 = f1(ds.value, init)
idxs_2 = f2(ds.value, final)
idxs = np.logical_and(idxs_1, idxs_2)
new_value = np.array(ds.value)
new_value[idxs] = value
ds.value = new_value
ds.unit = unit
return ds
def value_mask_by_bounds(dataset, bounds, values, unit=None):
ds = dataset.copy()
ds.name = f'{ds.name}_vmasked'
" Verify length of bounds and values"
bounds = np.array(bounds)
values = np.array(values)
valid_length = len(values) == len(bounds) + 1
if not valid_length:
raise ValueError('len(values) must be len(bounds) + 1)')
" Verify that bounds increase or decrease "
comparison = [left < right for left, right in zip(bounds[0:-1], bounds[1:])]
comparison = np.array(comparison)
reduced_comp = list(set(comparison))
valid_slope = len(reduced_comp) == 1
if not valid_slope:
raise ValueError('bounds must increase or decrease')
" Sort bounds and values to increase "
if reduced_comp[0] is False:
idxs_sorted = np.argsort(bounds)
bounds = np.sort(bounds)
values = values[idxs_sorted]
" Apply mask "
n_bulk = len(values) - 2
new_values = np.zeros_like(ds.value)
for i, vi in enumerate(values):
if i == 0:
idxs = np.less(ds.value, bounds[i])
elif i < n_bulk:
idxs_left = np.greater_equal(ds.value, bounds[i - 1])
idxs_right = np.less_equal(ds.value, bounds[i])
idxs = np.logical_and(idxs_left, idxs_right)
else:
idxs = np.greater(ds.value, bounds[i - 1])
new_values[idxs] = vi
ds.value = new_values
ds.unit = unit
return ds
def boolmask(dataset, value, key='=='):
ds = dataset.copy()
ds.name = f'{ds.name}_bmasked'
if key == '==':
ds.value = ds.value == value
if key == '>=':
ds.value = ds.value >= value
if key == '<=':
ds.value = ds.value <= value
if key == '!=':
ds.value = ds.value != value
if key == '>':
ds.value = ds.value > value
if key == '<':
ds.value = ds.value < value
ds.unit = 'boolean'
return ds
def probability(dataset, value, key='==', axis=None):
ds = dataset.copy()
ds.name = f'{ds.name}_prob'
ds_bool = boolmask(ds, value, key=key)
boolv = ds_bool.value
ds.value = np.apply_along_axis(_prob, axis=axis, arr=boolv)
ds.unit = '%'
old_axes = ds.get_axes(counters=False)
ds.clear_axes()
new_axes = remove_dim_in_axes(old_axes, dim=axis)
for ax in new_axes:
ds.add_axis(ax)
return ds
def _prob(arr):
total_counts = arr.size
nonzero_counts = np.count_nonzero(arr)
if total_counts >= 0:
prob = 1. * nonzero_counts / total_counts * 100.
else:
prob = 0
return prob
if __name__ == '__main__':
pass
bonds = [-1, 0, 1, 2]
values = [0, 1, 2, 3, 4]
| [
"numpy.less_equal",
"scipy.signal.savgol_filter",
"qube.postprocess.dataset.Axis",
"numpy.nanmean",
"numpy.array",
"numpy.count_nonzero",
"numpy.argsort",
"numpy.moveaxis",
"numpy.gradient",
"numpy.greater_equal",
"numpy.mean",
"numpy.histogram",
"numpy.less",
"numpy.greater",
"numpy.sor... | [((1153, 1253), 'numpy.histogram', 'np.histogram', (['ds.value'], {'bins': 'bins', 'range': 'range', 'normed': 'normed', 'weights': 'weights', 'density': 'density'}), '(ds.value, bins=bins, range=range, normed=normed, weights=\n weights, density=density)\n', (1165, 1253), True, 'import numpy as np\n'), ((1367, 1418), 'qube.postprocess.dataset.Axis', 'Axis', ([], {'name': 'ds.name', 'value': 'bins', 'unit': 'ds.unit', 'dim': '(0)'}), '(name=ds.name, value=bins, unit=ds.unit, dim=0)\n', (1371, 1418), False, 'from qube.postprocess.dataset import Axis\n'), ((1659, 1704), 'numpy.take', 'np.take', (['ds.value'], {'indices': 'indices', 'axis': 'axis'}), '(ds.value, indices=indices, axis=axis)\n', (1666, 1704), True, 'import numpy as np\n'), ((2189, 2217), 'numpy.mean', 'np.mean', (['ds.value'], {'axis': 'axis'}), '(ds.value, axis=axis)\n', (2196, 2217), True, 'import numpy as np\n'), ((2502, 2533), 'numpy.nanmean', 'np.nanmean', (['ds.value'], {'axis': 'axis'}), '(ds.value, axis=axis)\n', (2512, 2533), True, 'import numpy as np\n'), ((3033, 3088), 'numpy.gradient', 'np.gradient', (['ds.value'], {'axis': 'axis', 'edge_order': 'edge_order'}), '(ds.value, axis=axis, edge_order=edge_order)\n', (3044, 3088), True, 'import numpy as np\n'), ((3424, 3511), 'scipy.signal.savgol_filter', 'savgol_filter', (['ds.value'], {'window_length': 'window', 'polyorder': 'order', 'axis': 'axis'}), '(ds.value, window_length=window, polyorder=order, axis=axis,\n **kwargs)\n', (3437, 3511), False, 'from scipy.signal import savgol_filter\n'), ((5190, 5225), 'numpy.moveaxis', 'np.moveaxis', (['dataset.value', 'axis', '(0)'], {}), '(dataset.value, axis, 0)\n', (5201, 5225), True, 'import numpy as np\n'), ((5246, 5288), 'numpy.fft.fft', 'np.fft.fft', (['data2analyse'], {'axis': '(0)'}), '(data2analyse, axis=0, **kwargs)\n', (5256, 5288), True, 'import numpy as np\n'), ((5361, 5396), 'numpy.moveaxis', 'np.moveaxis', (['value_complex', '(0)', 'axis'], {}), '(value_complex, 0, axis)\n', (5372, 5396), True, 'import numpy as np\n'), ((5421, 5442), 'numpy.abs', 'np.abs', (['value_complex'], {}), '(value_complex)\n', (5427, 5442), True, 'import numpy as np\n'), ((5462, 5485), 'numpy.angle', 'np.angle', (['value_complex'], {}), '(value_complex)\n', (5470, 5485), True, 'import numpy as np\n'), ((5947, 5977), 'numpy.logical_and', 'np.logical_and', (['idxs_1', 'idxs_2'], {}), '(idxs_1, idxs_2)\n', (5961, 5977), True, 'import numpy as np\n'), ((5995, 6013), 'numpy.array', 'np.array', (['ds.value'], {}), '(ds.value)\n', (6003, 6013), True, 'import numpy as np\n'), ((6279, 6295), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (6287, 6295), True, 'import numpy as np\n'), ((6309, 6325), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (6317, 6325), True, 'import numpy as np\n'), ((6613, 6633), 'numpy.array', 'np.array', (['comparison'], {}), '(comparison)\n', (6621, 6633), True, 'import numpy as np\n'), ((7055, 7078), 'numpy.zeros_like', 'np.zeros_like', (['ds.value'], {}), '(ds.value)\n', (7068, 7078), True, 'import numpy as np\n'), ((8223, 8271), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_prob'], {'axis': 'axis', 'arr': 'boolv'}), '(_prob, axis=axis, arr=boolv)\n', (8242, 8271), True, 'import numpy as np\n'), ((8536, 8557), 'numpy.count_nonzero', 'np.count_nonzero', (['arr'], {}), '(arr)\n', (8552, 8557), True, 'import numpy as np\n'), ((6900, 6918), 'numpy.argsort', 'np.argsort', (['bounds'], {}), '(bounds)\n', (6910, 6918), True, 'import numpy as np\n'), ((6936, 6951), 'numpy.sort', 'np.sort', (['bounds'], {}), '(bounds)\n', (6943, 6951), True, 'import numpy as np\n'), ((7153, 7181), 'numpy.less', 'np.less', (['ds.value', 'bounds[i]'], {}), '(ds.value, bounds[i])\n', (7160, 7181), True, 'import numpy as np\n'), ((1924, 1958), 'numpy.take', 'np.take', (['ax.value'], {'indices': 'indices'}), '(ax.value, indices=indices)\n', (1931, 1958), True, 'import numpy as np\n'), ((7231, 7272), 'numpy.greater_equal', 'np.greater_equal', (['ds.value', 'bounds[i - 1]'], {}), '(ds.value, bounds[i - 1])\n', (7247, 7272), True, 'import numpy as np\n'), ((7298, 7332), 'numpy.less_equal', 'np.less_equal', (['ds.value', 'bounds[i]'], {}), '(ds.value, bounds[i])\n', (7311, 7332), True, 'import numpy as np\n'), ((7352, 7389), 'numpy.logical_and', 'np.logical_and', (['idxs_left', 'idxs_right'], {}), '(idxs_left, idxs_right)\n', (7366, 7389), True, 'import numpy as np\n'), ((7423, 7458), 'numpy.greater', 'np.greater', (['ds.value', 'bounds[i - 1]'], {}), '(ds.value, bounds[i - 1])\n', (7433, 7458), True, 'import numpy as np\n'), ((4751, 4784), 'numpy.abs', 'np.abs', (['(ax.value[1] - ax.value[0])'], {}), '(ax.value[1] - ax.value[0])\n', (4757, 4784), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
from keras import Input, Model
from keras.callbacks import History
from keras.engine.saving import load_model
from keras.layers import Dense, regularizers, Dropout, BatchNormalization
from keras.optimizers import Optimizer
class MLP:
def __init__(self, input_size: Tuple, optimizer: Optimizer, loss, hidden_layers: Tuple = (3, 3, 1),
activation: str = 'relu', output_activation: str = 'relu',
dropout: float = 0., batch_normalization: bool = False,
weight_decay_l1: float = 0., weight_decay_l2: float = 0.):
# define model
self.hidden_layers = hidden_layers
# create the model
inputs = x_data = Input(shape=input_size)
# rest of the hidden layers if any
for neurons in hidden_layers[:-1]:
x_data = Dense(neurons, activation=activation,
kernel_regularizer=regularizers.l1_l2(l1=weight_decay_l1, l2=weight_decay_l2),
bias_regularizer=regularizers.l1_l2(l1=weight_decay_l1, l2=weight_decay_l2))(x_data)
if dropout > 0.:
x_data = Dropout(dropout)(x_data)
if batch_normalization:
x_data = BatchNormalization()(x_data)
predictions = Dense(hidden_layers[-1], activation=output_activation)(x_data)
self.model = Model(inputs=inputs, outputs=predictions)
self.model.compile(optimizer=optimizer, loss=loss)
def fit(self, inputs: np.ndarray, outputs: np.ndarray, batch_size: int = 1, epochs: int = 100) -> History:
return self.model.fit(inputs, outputs, batch_size=batch_size, epochs=epochs)
def predict(self, data, number_of_steps):
predictions = np.empty(shape=(number_of_steps,))
data_shape = data.shape
for i in range(predictions.shape[0]):
predicted_value = self.model.predict(data)
predictions[i] = predicted_value.item()
# remove first element and add the prediction
data = np.reshape(np.append(data[0][1:], predicted_value.item()), newshape=data_shape)
return predictions
def save_model(self, name):
self.model.save('ckpt/' + name)
def load_model(self, name):
self.model = load_model(name)
| [
"keras.Model",
"keras.Input",
"numpy.empty",
"keras.layers.Dense",
"keras.engine.saving.load_model",
"keras.layers.BatchNormalization",
"keras.layers.Dropout",
"keras.layers.regularizers.l1_l2"
] | [((730, 753), 'keras.Input', 'Input', ([], {'shape': 'input_size'}), '(shape=input_size)\n', (735, 753), False, 'from keras import Input, Model\n'), ((1382, 1423), 'keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'predictions'}), '(inputs=inputs, outputs=predictions)\n', (1387, 1423), False, 'from keras import Input, Model\n'), ((1749, 1783), 'numpy.empty', 'np.empty', ([], {'shape': '(number_of_steps,)'}), '(shape=(number_of_steps,))\n', (1757, 1783), True, 'import numpy as np\n'), ((2281, 2297), 'keras.engine.saving.load_model', 'load_model', (['name'], {}), '(name)\n', (2291, 2297), False, 'from keras.engine.saving import load_model\n'), ((1298, 1352), 'keras.layers.Dense', 'Dense', (['hidden_layers[-1]'], {'activation': 'output_activation'}), '(hidden_layers[-1], activation=output_activation)\n', (1303, 1352), False, 'from keras.layers import Dense, regularizers, Dropout, BatchNormalization\n'), ((1161, 1177), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (1168, 1177), False, 'from keras.layers import Dense, regularizers, Dropout, BatchNormalization\n'), ((1247, 1267), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1265, 1267), False, 'from keras.layers import Dense, regularizers, Dropout, BatchNormalization\n'), ((940, 998), 'keras.layers.regularizers.l1_l2', 'regularizers.l1_l2', ([], {'l1': 'weight_decay_l1', 'l2': 'weight_decay_l2'}), '(l1=weight_decay_l1, l2=weight_decay_l2)\n', (958, 998), False, 'from keras.layers import Dense, regularizers, Dropout, BatchNormalization\n'), ((1039, 1097), 'keras.layers.regularizers.l1_l2', 'regularizers.l1_l2', ([], {'l1': 'weight_decay_l1', 'l2': 'weight_decay_l2'}), '(l1=weight_decay_l1, l2=weight_decay_l2)\n', (1057, 1097), False, 'from keras.layers import Dense, regularizers, Dropout, BatchNormalization\n')] |
from tensorflow import keras
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
import cv2
from skimage.color import rgb2gray, gray2rgb, rgb2lab, lab2rgb
import numpy as np
from inception_embeddings import inception_embedding
import json
with open('parameters.json') as f:
data = json.load(f)
filepath = data['model_path']
model = keras.models.load_model(filepath)
TestImagePath=data['colo_images']
test = []
for file in tqdm(os.listdir(TestImagePath)):
try:
img = cv2.imread(TestImagePath+file)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (256,256))
test.append(img)
except:
pass
test = np.array(test).astype('float32') / 255.
im = gray2rgb(rgb2gray(test))
im_embed = inception_embedding(im)
im = rgb2lab(im)[:,:,:,0]
im = im.reshape(im.shape+(1,))
pred = model.predict([im, im_embed])
pred = pred * 128
decodings = np.zeros((len(pred),256, 256, 3))
for i in range(len(pred)):
pp = np.zeros((256, 256, 3))
pp[:,:,0] = im[i][:,:,0]
pp[:,:,1:] = pred[i]
decodings[i] = lab2rgb(pp)
cv2.imwrite("img_"+str(i)+".jpg", lab2rgb(pp))
# recolored
plt.figure(figsize=(40, 10))
for i in range(5):
plt.subplot(3, 10, i + 1 +10)
plt.imshow(decodings[i].reshape(256, 256,3))
plt.axis('off')
plt.tight_layout()
plt.show() | [
"skimage.color.rgb2gray",
"os.listdir",
"skimage.color.rgb2lab",
"cv2.imread",
"skimage.color.lab2rgb",
"inception_embeddings.inception_embedding",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.tight_layout",
"cv2.cvtColor",
... | [((368, 401), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['filepath'], {}), '(filepath)\n', (391, 401), False, 'from tensorflow import keras\n'), ((793, 816), 'inception_embeddings.inception_embedding', 'inception_embedding', (['im'], {}), '(im)\n', (812, 816), False, 'from inception_embeddings import inception_embedding\n'), ((1204, 1232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(40, 10)'}), '(figsize=(40, 10))\n', (1214, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1385), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1383, 1385), True, 'import matplotlib.pyplot as plt\n'), ((1387, 1397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1395, 1397), True, 'import matplotlib.pyplot as plt\n'), ((311, 323), 'json.load', 'json.load', (['f'], {}), '(f)\n', (320, 323), False, 'import json\n'), ((470, 495), 'os.listdir', 'os.listdir', (['TestImagePath'], {}), '(TestImagePath)\n', (480, 495), False, 'import os\n'), ((765, 779), 'skimage.color.rgb2gray', 'rgb2gray', (['test'], {}), '(test)\n', (773, 779), False, 'from skimage.color import rgb2gray, gray2rgb, rgb2lab, lab2rgb\n'), ((823, 834), 'skimage.color.rgb2lab', 'rgb2lab', (['im'], {}), '(im)\n', (830, 834), False, 'from skimage.color import rgb2gray, gray2rgb, rgb2lab, lab2rgb\n'), ((1024, 1047), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (1032, 1047), True, 'import numpy as np\n'), ((1124, 1135), 'skimage.color.lab2rgb', 'lab2rgb', (['pp'], {}), '(pp)\n', (1131, 1135), False, 'from skimage.color import rgb2gray, gray2rgb, rgb2lab, lab2rgb\n'), ((1258, 1288), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(10)', '(i + 1 + 10)'], {}), '(3, 10, i + 1 + 10)\n', (1269, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1358), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1351, 1358), True, 'import matplotlib.pyplot as plt\n'), ((523, 555), 'cv2.imread', 'cv2.imread', (['(TestImagePath + file)'], {}), '(TestImagePath + file)\n', (533, 555), False, 'import cv2\n'), ((569, 605), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (581, 605), False, 'import cv2\n'), ((620, 647), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {}), '(img, (256, 256))\n', (630, 647), False, 'import cv2\n'), ((1175, 1186), 'skimage.color.lab2rgb', 'lab2rgb', (['pp'], {}), '(pp)\n', (1182, 1186), False, 'from skimage.color import rgb2gray, gray2rgb, rgb2lab, lab2rgb\n'), ((708, 722), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (716, 722), True, 'import numpy as np\n')] |
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import binascii
import numpy as np
import random
import argparse
import time
from lxml import etree
from lm_scorer import LMScorer
from utils import _load_config, _remove_outliner
from pdfalto.alto_parser import filter_text
from pdfalto.wrapper import PdfAltoWrapper
import logging
import logging.handlers
# default logging settings, will be override by config file
logging.basicConfig(filename='client.log', filemode='w', level=logging.DEBUG)
from sklearn.utils import shuffle
import xgboost as xgb
import cld3
SCORER_FILE = "scorer.json"
# to do: make this list dynamic by exploring the data/models repository
supported_languages = ['en', 'de', 'fr']
class OCRScorer(object):
config = None
config_path = None
# models is a map of language models, language (two letters ISO 639-1) as key
models = {}
# scorers is a map of regression models, language (two letters ISO 639-1) as key
scorers = {}
def __init__(self, config_path="./config.yml"):
self.config_path = config_path
self.config = _load_config(config_path)
logs_filename = "client.log"
if "log_file" in self.config:
logs_filename = self.config['log_file']
logs_level = logging.DEBUG
if "log_level" in self.config:
if self.config["log_level"] == 'INFO':
logs_level = logging.INFO
elif self.config["log_level"] == 'ERROR':
logs_level = logging.ERROR
elif self.config["log_level"] == 'WARNING':
logs_level = logging.WARNING
elif self.config["log_level"] == 'CRITICAL':
logs_level = logging.CRITICAL
else:
logs_level = logging.NOTSET
logging.basicConfig(filename=logs_filename, filemode='w', level=logs_level)
print("logs are written in " + logs_filename)
def load_lm_model(self, lang):
lm_model = LMScorer(lang, config_path=self.config_path)
lm_model.load()
self.models[lang] = lm_model
def get_lm_model(self, lang):
local_model = None
if not lang in self.models:
self.load_lm_model(lang)
if not lang in self.models:
raise Exception("No model available for the language " + lang)
local_model = self.models[lang]
if local_model == None:
raise Exception("Failed to identify the language")
return local_model
def get_scorer_model(self, lang):
if lang in self.scorers:
return self.scorers[lang]
self.load_scorer(lang)
if not lang in self.scorers:
raise Exception("No model available for the language " + lang)
return self.scorers[lang]
def score_text(self, text, lang="en"):
'''
If no language is provided, use a language detector
'''
local_model = None
try:
local_model = self.get_lm_model(lang)
except:
logging.error("Fail to load the language model for language " + lang)
if local_model is None:
raise Exception("Failed to process language model for " + lang)
text_scores = []
if len(text) < 500:
# we process the whole text segment
text_scores.append(local_model.score_text(text))
else:
# we sample random segments
for text_sample in local_model.read_text_sequence(text, max_length=600, samples=10):
local_lang = cld3.get_language(text_sample)
#print(local_lang)
#print(str(local_lang.probability))
if not local_lang.is_reliable or local_lang.language != lang or local_lang.proportion != 1.0 :
continue
local_score = local_model.score_text(text_sample)
text_scores.append(local_score)
local_text_score = np.mean(text_scores)
deviation = np.std(text_scores, dtype=np.float32)
scorer_model = None
try:
scorer_model = self.get_scorer_model(lang)
except:
logging.error("Fail to load the scorer model for language " + lang)
if scorer_model is None:
raise Exception("Failed to process scorer model for language " + lang)
X = np.zeros((len(text_scores), 1), dtype=np.float32)
for i in range(len(text_scores)):
X[i,0]= (text_scores[i])
#X[i,1]= deviation
#print(X)
x_pred = xgb.DMatrix(X)
final_text_scores = scorer_model.predict(x_pred)
#print(final_text_scores)
avg_text_score = np.mean(final_text_scores)
max_text_score = np.max(final_text_scores)
min_text_score = np.min(final_text_scores)
deviation = np.std(final_text_scores, dtype=np.float32)
boost_max = 1 / max_text_score
boost_min = 0.1 / min_text_score
# normalize
if avg_text_score > 0.5:
final_text_score = avg_text_score * boost_max
else:
final_text_score = avg_text_score * boost_min
if final_text_score > 1.0:
final_text_score = 1.0
if final_text_score < 0.0:
final_text_score = 0.0
#print(final_text_score)
return float(final_text_score)
def score_txt_file(self, txt_file, lang=None):
logging.info("processing text file: " + txt_file)
if txt_file == None or not os.path.isfile(txt_file) or not txt_file.endswith(".txt"):
print("issue")
raise ValueError('Invalid input file: ' + txt_file)
with open(txt_file, "r") as text_file:
local_text = text_file.read()
return self.score_text(local_text)
return 0.0
def score_pdf(self, pdf_file, lang=None):
'''
PDF file is parsed by external pdfalto tool. Spatial information can be used.
If no language is provided, use a language detector
'''
# convert pdf file
logging.info("processing PDF file: " + pdf_file)
pdfalto = PdfAltoWrapper('./data/pdfalto/lin64/pdfalto')
output_path = os.path.join('./data/pdfalto/tmp/', binascii.b2a_hex(os.urandom(7)).decode() + ".xml")
pdfalto.convert(pdf_file, output_path)
logging.info("pdfalto conversion: " + output_path)
local_text = filter_text(output_path)
local_score = self.score_text(local_text)
# cleaning ALTO file(s)
if os.path.isfile(output_path):
os.remove(output_path)
output_path = output_path.replace(".xml", "_metadata.xml")
if os.path.isfile(output_path):
os.remove(output_path)
return local_score
def score_xml(self, xml_file, lang=None):
'''
Processing of XML file with text body section, such as ST.36 format or TEI format
If no language is provided, use a language detector
'''
logging.info("processing XML file: " + xml_file)
if xml_file == None or not os.path.isfile(xml_file) or not xml_file.endswith(".xml"):
raise ValueError('Invalid input file: ' + txt_file)
with open(txt_file, "r") as text_file:
local_text = file.read()
root = etree.fromstring(xml_string)
text = etree.tostring(root, encoding='utf-8', method='text')
text = text.decode()
return self.score_text(text, lang)
return 0.0
def score_repository(self, repository):
'''
Score files in a repository. Supported file formats (by file extensions) are
any combination of text files (.txt), XML files (.xml) and PDF files (.pdf)
Return scores as a dict mapping file names to OCR quality score for the file.
'''
results = {}
files_scores = []
logging.info("processing repository: " + repository)
if repository == None or not os.path.isdir(repository):
raise ValueError('Invalid directory to be read: ' + target_dir)
nb_files = 0
for file in os.listdir(repository):
logging.info("processing file: " + file)
if file.endswith(".txt"):
try:
results[file] = self.score_txt_file(os.path.join(repository, file))
except:
logging.warning("Fail to score text file " + os.path.join(repository, file))
elif file.endswith(".xml"):
try:
results[file] = self.score_xml_file(os.path.join(repository, file))
except:
logging.warning("Fail to score XML file " + os.path.join(repository, file))
elif file.endswith(".pdf"):
try:
results[file] = self.score_pdf_file(os.path.join(repository, file))
except:
logging.warning("Fail to score PDF file " + os.path.join(repository, file))
if file in results:
files_scores.append(results[file])
if nb_files > 100:
break
nb_files += 1
print("\taverage score:", str(np.mean(files_scores)))
print("\tlowest score:", str(np.min(files_scores)))
print("\thighest score:", str(np.max(files_scores)))
deviation = np.std(files_scores, dtype=np.float64)
print("\tstandard deviation:", str(deviation))
return results
def train_scorer(self, lang):
'''
Train a scorer regression model, which uses the LM probability score as feature,
combined with others to produce a normalized score in [0,1]
'''
x_pos, y_pos = self.load_positive_examples(lang)
x_neg, y_neg = self.load_degraded_examples(lang)
if len(x_neg) > len(x_pos):
x_neg = x_neg[:len(x_pos)]
y_neg = y_neg[:len(x_pos)]
x_pos, y_pos = _remove_outliner(x_pos, y_pos)
x_neg, y_neg = _remove_outliner(x_neg, y_neg)
x = x_pos + x_neg
y = y_pos + y_neg
x, y = shuffle(x, y)
print(x)
print(y)
#dtrain = xgb.DMatrix(x, label=y)
#xgb_model = xgb.XGBRegressor(objective="reg:squarederror", random_state=42)
xgb_model = xgb.XGBRegressor(objective ='reg:squarederror', colsample_bytree = 0.3, learning_rate = 0.1,
max_depth = 5, alpha = 10, n_estimators = 10)
xgb_model.fit(x, y)
self.scorers[lang] = xgb_model
def save_scorer(self, lang):
# save scorer
save_path = os.path.join(self.config["models_dir"], lang, SCORER_FILE)
model_xgb = self.get_scorer_model(lang)
if model_xgb is not None:
# save to JSON
model_xgb.save_model(save_path)
def load_scorer(self, lang):
load_path = os.path.join(self.config["models_dir"], lang, SCORER_FILE)
model_xgb = xgb.Booster()
model_xgb.load_model(load_path)
self.scorers[lang] = model_xgb
def load_positive_examples(self, lang):
x = []
y = []
text_scores = []
local_model = None
try:
local_model = self.get_lm_model(lang)
except:
logging.error("Fail to load the model for language " + lang)
if local_model is None:
raise Exception("Failed to process language " + lang)
start_time = time.time()
for text in local_model.read_files_sequence(max_length=500, samples=None):
text_scores.append(local_model.score_text(text))
total_time = round(time.time() - start_time, 3)
print("\nscored", str(len(text_scores)), "text segments in {:.3f}s".format(total_time))
scores = np.array(text_scores)
print("\taverage score:", str(np.mean(scores)))
print("\tlowest score:", str(np.min(scores)))
print("\thighest score:", str(np.max(scores)))
deviation = np.std(scores, dtype=np.float64)
for i in range(len(scores)):
features = []
# LM probability of the sequence
features.append(scores[i])
# general standard deviation
#features.append(deviation)
x.append(features)
y.append(1.0)
return x, y
def load_degraded_examples(self, lang):
x = []
y = []
local_model = None
try:
local_model = self.get_lm_model(lang)
except:
logging.error("Fail to load the model for language " + lang)
if local_model is None:
raise Exception("Failed to process language " + lang)
start_time = time.time()
text_scores = []
target_dir = os.path.join(self.config['training_dir'], lang, "ocr")
nb_file = 0
for file in os.listdir(target_dir):
if file.endswith(".txt"):
print(file)
i = 0
for text in local_model.read_file_sequence(target_file=os.path.join(target_dir, file),
max_length=500, samples=None):
text_scores.append(local_model.score_text(text))
i += 1
if i>200:
break
if nb_file > 10:
break
nb_file += 1
total_time = round(time.time() - start_time, 3)
print("\nscored", str(len(text_scores)), "text segments in {:.3f}s".format(total_time))
scores = np.array(text_scores)
print("\taverage score:", str(np.mean(scores)))
print("\tlowest score:", str(np.min(scores)))
print("\thighest score:", str(np.max(scores)))
deviation = np.std(scores, dtype=np.float64)
for i in range(len(scores)):
features = []
# LM probability of the sequence
features.append(scores[i])
# general standard deviation
#features.append(deviation)
x.append(features)
y.append(0.0)
return x, y
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Simple command line OCR scorer. Use the service for more intensive/pipeline tasks.")
parser.add_argument("--config-file", type=str, required=False, help="configuration file to be used", default='./config.yml')
parser.add_argument("--debug", action="store_true", required=False, default=False,
help="activate the debug mode (override the config file logging parameter)")
parser.add_argument("--text-file", type=str, required=False, help="text file to be analyzed, expected encoding is UTF-8")
parser.add_argument("--pdf-file", type=str, required=False, help="PDF file to be analyzed")
parser.add_argument("--xml-file", type=str, required=False, help="XML file to be analyzed, with text body section")
parser.add_argument("--repository", type=str, required=False, help="a repository of text/XML/PDF files to be evaluated")
args = parser.parse_args()
debug = args.debug
text_file = args.text_file
pdf_file = args.pdf_file
xml_file = args.xml_file
config_file = args.config_file
repository = args.repository
try:
scorer = OCRScorer(config_file)
if pdf_file != None:
print(scorer.score_pdf(pdf_file))
elif text_file != None:
print(scorer.score_pdf(text_file))
elif xml_file != None:
print(scorer.score_patent_xml(patent_xml_file))
elif repository != None:
results = scorer.score_repository(repository)
print(results)
else:
print("At least one file to be evaluated must be provided\n")
parser.print_help()
exit(1)
except Exception as e:
print("Scorer failed: ", str(e))
exit(1)
| [
"utils._load_config",
"pdfalto.wrapper.PdfAltoWrapper",
"numpy.array",
"xgboost.Booster",
"lxml.etree.fromstring",
"xgboost.DMatrix",
"logging.info",
"logging.error",
"lxml.etree.tostring",
"os.remove",
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"numpy.max",
"pdfalto.alto_par... | [((420, 497), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""client.log"""', 'filemode': '"""w"""', 'level': 'logging.DEBUG'}), "(filename='client.log', filemode='w', level=logging.DEBUG)\n", (439, 497), False, 'import logging\n'), ((14206, 14337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple command line OCR scorer. Use the service for more intensive/pipeline tasks."""'}), "(description=\n 'Simple command line OCR scorer. Use the service for more intensive/pipeline tasks.'\n )\n", (14229, 14337), False, 'import argparse\n'), ((1099, 1124), 'utils._load_config', '_load_config', (['config_path'], {}), '(config_path)\n', (1111, 1124), False, 'from utils import _load_config, _remove_outliner\n'), ((1794, 1869), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'logs_filename', 'filemode': '"""w"""', 'level': 'logs_level'}), "(filename=logs_filename, filemode='w', level=logs_level)\n", (1813, 1869), False, 'import logging\n'), ((1979, 2023), 'lm_scorer.LMScorer', 'LMScorer', (['lang'], {'config_path': 'self.config_path'}), '(lang, config_path=self.config_path)\n', (1987, 2023), False, 'from lm_scorer import LMScorer\n'), ((3975, 3995), 'numpy.mean', 'np.mean', (['text_scores'], {}), '(text_scores)\n', (3982, 3995), True, 'import numpy as np\n'), ((4016, 4053), 'numpy.std', 'np.std', (['text_scores'], {'dtype': 'np.float32'}), '(text_scores, dtype=np.float32)\n', (4022, 4053), True, 'import numpy as np\n'), ((4584, 4598), 'xgboost.DMatrix', 'xgb.DMatrix', (['X'], {}), '(X)\n', (4595, 4598), True, 'import xgboost as xgb\n'), ((4717, 4743), 'numpy.mean', 'np.mean', (['final_text_scores'], {}), '(final_text_scores)\n', (4724, 4743), True, 'import numpy as np\n'), ((4769, 4794), 'numpy.max', 'np.max', (['final_text_scores'], {}), '(final_text_scores)\n', (4775, 4794), True, 'import numpy as np\n'), ((4820, 4845), 'numpy.min', 'np.min', (['final_text_scores'], {}), '(final_text_scores)\n', (4826, 4845), True, 'import numpy as np\n'), ((4866, 4909), 'numpy.std', 'np.std', (['final_text_scores'], {'dtype': 'np.float32'}), '(final_text_scores, dtype=np.float32)\n', (4872, 4909), True, 'import numpy as np\n'), ((5449, 5498), 'logging.info', 'logging.info', (["('processing text file: ' + txt_file)"], {}), "('processing text file: ' + txt_file)\n", (5461, 5498), False, 'import logging\n'), ((6102, 6150), 'logging.info', 'logging.info', (["('processing PDF file: ' + pdf_file)"], {}), "('processing PDF file: ' + pdf_file)\n", (6114, 6150), False, 'import logging\n'), ((6169, 6215), 'pdfalto.wrapper.PdfAltoWrapper', 'PdfAltoWrapper', (['"""./data/pdfalto/lin64/pdfalto"""'], {}), "('./data/pdfalto/lin64/pdfalto')\n", (6183, 6215), False, 'from pdfalto.wrapper import PdfAltoWrapper\n'), ((6380, 6430), 'logging.info', 'logging.info', (["('pdfalto conversion: ' + output_path)"], {}), "('pdfalto conversion: ' + output_path)\n", (6392, 6430), False, 'import logging\n'), ((6453, 6477), 'pdfalto.alto_parser.filter_text', 'filter_text', (['output_path'], {}), '(output_path)\n', (6464, 6477), False, 'from pdfalto.alto_parser import filter_text\n'), ((6572, 6599), 'os.path.isfile', 'os.path.isfile', (['output_path'], {}), '(output_path)\n', (6586, 6599), False, 'import os\n'), ((6714, 6741), 'os.path.isfile', 'os.path.isfile', (['output_path'], {}), '(output_path)\n', (6728, 6741), False, 'import os\n'), ((7035, 7083), 'logging.info', 'logging.info', (["('processing XML file: ' + xml_file)"], {}), "('processing XML file: ' + xml_file)\n", (7047, 7083), False, 'import logging\n'), ((7930, 7982), 'logging.info', 'logging.info', (["('processing repository: ' + repository)"], {}), "('processing repository: ' + repository)\n", (7942, 7982), False, 'import logging\n'), ((8166, 8188), 'os.listdir', 'os.listdir', (['repository'], {}), '(repository)\n', (8176, 8188), False, 'import os\n'), ((9432, 9470), 'numpy.std', 'np.std', (['files_scores'], {'dtype': 'np.float64'}), '(files_scores, dtype=np.float64)\n', (9438, 9470), True, 'import numpy as np\n'), ((10022, 10052), 'utils._remove_outliner', '_remove_outliner', (['x_pos', 'y_pos'], {}), '(x_pos, y_pos)\n', (10038, 10052), False, 'from utils import _load_config, _remove_outliner\n'), ((10076, 10106), 'utils._remove_outliner', '_remove_outliner', (['x_neg', 'y_neg'], {}), '(x_neg, y_neg)\n', (10092, 10106), False, 'from utils import _load_config, _remove_outliner\n'), ((10176, 10189), 'sklearn.utils.shuffle', 'shuffle', (['x', 'y'], {}), '(x, y)\n', (10183, 10189), False, 'from sklearn.utils import shuffle\n'), ((10374, 10505), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {'objective': '"""reg:squarederror"""', 'colsample_bytree': '(0.3)', 'learning_rate': '(0.1)', 'max_depth': '(5)', 'alpha': '(10)', 'n_estimators': '(10)'}), "(objective='reg:squarederror', colsample_bytree=0.3,\n learning_rate=0.1, max_depth=5, alpha=10, n_estimators=10)\n", (10390, 10505), True, 'import xgboost as xgb\n'), ((10673, 10731), 'os.path.join', 'os.path.join', (["self.config['models_dir']", 'lang', 'SCORER_FILE'], {}), "(self.config['models_dir'], lang, SCORER_FILE)\n", (10685, 10731), False, 'import os\n'), ((10941, 10999), 'os.path.join', 'os.path.join', (["self.config['models_dir']", 'lang', 'SCORER_FILE'], {}), "(self.config['models_dir'], lang, SCORER_FILE)\n", (10953, 10999), False, 'import os\n'), ((11020, 11033), 'xgboost.Booster', 'xgb.Booster', ([], {}), '()\n', (11031, 11033), True, 'import xgboost as xgb\n'), ((11514, 11525), 'time.time', 'time.time', ([], {}), '()\n', (11523, 11525), False, 'import time\n'), ((11840, 11861), 'numpy.array', 'np.array', (['text_scores'], {}), '(text_scores)\n', (11848, 11861), True, 'import numpy as np\n'), ((12047, 12079), 'numpy.std', 'np.std', (['scores'], {'dtype': 'np.float64'}), '(scores, dtype=np.float64)\n', (12053, 12079), True, 'import numpy as np\n'), ((12763, 12774), 'time.time', 'time.time', ([], {}), '()\n', (12772, 12774), False, 'import time\n'), ((12821, 12875), 'os.path.join', 'os.path.join', (["self.config['training_dir']", 'lang', '"""ocr"""'], {}), "(self.config['training_dir'], lang, 'ocr')\n", (12833, 12875), False, 'import os\n'), ((12916, 12938), 'os.listdir', 'os.listdir', (['target_dir'], {}), '(target_dir)\n', (12926, 12938), False, 'import os\n'), ((13617, 13638), 'numpy.array', 'np.array', (['text_scores'], {}), '(text_scores)\n', (13625, 13638), True, 'import numpy as np\n'), ((13824, 13856), 'numpy.std', 'np.std', (['scores'], {'dtype': 'np.float64'}), '(scores, dtype=np.float64)\n', (13830, 13856), True, 'import numpy as np\n'), ((6613, 6635), 'os.remove', 'os.remove', (['output_path'], {}), '(output_path)\n', (6622, 6635), False, 'import os\n'), ((6755, 6777), 'os.remove', 'os.remove', (['output_path'], {}), '(output_path)\n', (6764, 6777), False, 'import os\n'), ((7346, 7374), 'lxml.etree.fromstring', 'etree.fromstring', (['xml_string'], {}), '(xml_string)\n', (7362, 7374), False, 'from lxml import etree\n'), ((7394, 7447), 'lxml.etree.tostring', 'etree.tostring', (['root'], {'encoding': '"""utf-8"""', 'method': '"""text"""'}), "(root, encoding='utf-8', method='text')\n", (7408, 7447), False, 'from lxml import etree\n'), ((8202, 8242), 'logging.info', 'logging.info', (["('processing file: ' + file)"], {}), "('processing file: ' + file)\n", (8214, 8242), False, 'import logging\n'), ((3045, 3114), 'logging.error', 'logging.error', (["('Fail to load the language model for language ' + lang)"], {}), "('Fail to load the language model for language ' + lang)\n", (3058, 3114), False, 'import logging\n'), ((3576, 3606), 'cld3.get_language', 'cld3.get_language', (['text_sample'], {}), '(text_sample)\n', (3593, 3606), False, 'import cld3\n'), ((4179, 4246), 'logging.error', 'logging.error', (["('Fail to load the scorer model for language ' + lang)"], {}), "('Fail to load the scorer model for language ' + lang)\n", (4192, 4246), False, 'import logging\n'), ((5534, 5558), 'os.path.isfile', 'os.path.isfile', (['txt_file'], {}), '(txt_file)\n', (5548, 5558), False, 'import os\n'), ((7119, 7143), 'os.path.isfile', 'os.path.isfile', (['xml_file'], {}), '(xml_file)\n', (7133, 7143), False, 'import os\n'), ((8021, 8046), 'os.path.isdir', 'os.path.isdir', (['repository'], {}), '(repository)\n', (8034, 8046), False, 'import os\n'), ((9267, 9288), 'numpy.mean', 'np.mean', (['files_scores'], {}), '(files_scores)\n', (9274, 9288), True, 'import numpy as np\n'), ((9328, 9348), 'numpy.min', 'np.min', (['files_scores'], {}), '(files_scores)\n', (9334, 9348), True, 'import numpy as np\n'), ((9389, 9409), 'numpy.max', 'np.max', (['files_scores'], {}), '(files_scores)\n', (9395, 9409), True, 'import numpy as np\n'), ((11332, 11392), 'logging.error', 'logging.error', (["('Fail to load the model for language ' + lang)"], {}), "('Fail to load the model for language ' + lang)\n", (11345, 11392), False, 'import logging\n'), ((11697, 11708), 'time.time', 'time.time', ([], {}), '()\n', (11706, 11708), False, 'import time\n'), ((11900, 11915), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (11907, 11915), True, 'import numpy as np\n'), ((11955, 11969), 'numpy.min', 'np.min', (['scores'], {}), '(scores)\n', (11961, 11969), True, 'import numpy as np\n'), ((12010, 12024), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (12016, 12024), True, 'import numpy as np\n'), ((12581, 12641), 'logging.error', 'logging.error', (["('Fail to load the model for language ' + lang)"], {}), "('Fail to load the model for language ' + lang)\n", (12594, 12641), False, 'import logging\n'), ((13474, 13485), 'time.time', 'time.time', ([], {}), '()\n', (13483, 13485), False, 'import time\n'), ((13677, 13692), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (13684, 13692), True, 'import numpy as np\n'), ((13732, 13746), 'numpy.min', 'np.min', (['scores'], {}), '(scores)\n', (13738, 13746), True, 'import numpy as np\n'), ((13787, 13801), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (13793, 13801), True, 'import numpy as np\n'), ((8358, 8388), 'os.path.join', 'os.path.join', (['repository', 'file'], {}), '(repository, file)\n', (8370, 8388), False, 'import os\n'), ((13099, 13129), 'os.path.join', 'os.path.join', (['target_dir', 'file'], {}), '(target_dir, file)\n', (13111, 13129), False, 'import os\n'), ((6291, 6304), 'os.urandom', 'os.urandom', (['(7)'], {}), '(7)\n', (6301, 6304), False, 'import os\n'), ((8629, 8659), 'os.path.join', 'os.path.join', (['repository', 'file'], {}), '(repository, file)\n', (8641, 8659), False, 'import os\n'), ((8479, 8509), 'os.path.join', 'os.path.join', (['repository', 'file'], {}), '(repository, file)\n', (8491, 8509), False, 'import os\n'), ((8911, 8941), 'os.path.join', 'os.path.join', (['repository', 'file'], {}), '(repository, file)\n', (8923, 8941), False, 'import os\n'), ((8749, 8779), 'os.path.join', 'os.path.join', (['repository', 'file'], {}), '(repository, file)\n', (8761, 8779), False, 'import os\n'), ((9031, 9061), 'os.path.join', 'os.path.join', (['repository', 'file'], {}), '(repository, file)\n', (9043, 9061), False, 'import os\n')] |
# ----------------------------------------------------------------------------------------------
# CoFormer Official Code
# Copyright (c) <NAME>. All Rights Reserved
# Licensed under the Apache License 2.0 [see LICENSE for details]
# ----------------------------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved [see LICENSE for details]
# ----------------------------------------------------------------------------------------------
"""
Run an inference on a custom image
"""
import argparse
import random
import numpy as np
import torch
import datasets
import util.misc as utils
import cv2
import skimage
import skimage.transform
import nltk
import re
from util import box_ops
from PIL import Image
from torch.utils.data import DataLoader
from datasets import build_dataset
from models import build_model
from pathlib import Path
from nltk.corpus import wordnet as wn
def noun2synset(noun):
return wn.synset_from_pos_and_offset(noun[0], int(noun[1:])).name() if re.match(r'n[0-9]*', noun) else "'{}'".format(noun)
def visualize_bbox(image_path=None, num_roles=None, noun_labels=None, pred_bbox=None, pred_bbox_conf=None, output_dir=None):
image = cv2.imread(image_path)
image_name = image_path.split('/')[-1].split('.')[0]
h, w = image.shape[0], image.shape[1]
red_color = (232, 126, 253)
green_color = (130, 234, 198)
blue_color = (227,188, 134)
orange_color = (98, 129, 240)
brown_color = (79, 99, 216)
purple_color = (197, 152, 173)
colors = [red_color, green_color, blue_color, orange_color, brown_color, purple_color]
white_color = (255, 255, 255)
line_width = 4
# the value of pred_bbox_conf is logit, not probability.
for i in range(num_roles):
if pred_bbox_conf[i] >= 0:
# bbox
pred_left_top = (int(pred_bbox[i][0].item()), int(pred_bbox[i][1].item()))
pred_right_bottom = (int(pred_bbox[i][2].item()), int(pred_bbox[i][3].item()))
lt_0 = max(pred_left_top[0], line_width)
lt_1 = max(pred_left_top[1], line_width)
rb_0 = min(pred_right_bottom[0], w-line_width)
rb_1 = min(pred_right_bottom[1], h-line_width)
lt = (lt_0, lt_1)
rb = (rb_0, rb_1)
cv2.rectangle(img=image, pt1=lt, pt2=rb, color=colors[i], thickness=line_width, lineType=-1)
# label
label = noun_labels[i].split('.')[0]
text_size, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
p1 = (lt[0], lt[1] - text_size[1])
cv2.rectangle(img=image, pt1=(p1[0], (p1[1]-2-baseline)), pt2=((p1[0]+text_size[0]), (p1[1]+text_size[1])), color=colors[i], thickness=-1)
cv2.putText(image, label, (p1[0], p1[1] + baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.4, white_color, 1, 8)
# save image
cv2.imwrite("{}/{}_result.jpg".format(output_dir, image_name), image)
return
def process_image(image):
mean = np.array([[[0.485, 0.456, 0.406]]])
std = np.array([[[0.229, 0.224, 0.225]]])
image = (image.astype(np.float32) - mean) / std
min_side, max_side= 512, 700
rows_orig, cols_orig, cns_orig = image.shape
smallest_side = min(rows_orig, cols_orig)
scale = min_side / smallest_side
largest_side = max(rows_orig, cols_orig)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows_orig * scale)), int(round((cols_orig * scale)))))
rows, cols, cns = image.shape
new_image = np.zeros((rows, cols, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
image = torch.from_numpy(new_image)
shift_1 = int((700 - cols) * 0.5)
shift_0 = int((700 - rows) * 0.5)
max_height = 700
max_width = 700
padded_imgs = torch.zeros(1, max_height, max_width, 3)
padded_imgs[0, shift_0:shift_0+image.shape[0], shift_1:shift_1+image.shape[1], :] = image
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
height = torch.tensor(int(image.shape[0])).float()
width = torch.tensor(int(image.shape[1])).float()
shift_0 = torch.tensor(shift_0).float()
shift_1 = torch.tensor(shift_1).float()
scale = torch.tensor(scale).float()
mw = torch.tensor(max_width).float()
mh = torch.tensor(max_height).float()
return (utils.nested_tensor_from_tensor_list(padded_imgs),
{'width': width,
'height': height,
'shift_0': shift_0,
'shift_1': shift_1,
'scale': scale,
'max_width': mw,
'max_height': mh})
def inference(model, device, image_path=None, inference=False, idx_to_verb=None, idx_to_role=None,
vidx_ridx=None, idx_to_class=None, output_dir=None):
model.eval()
image_name = image_path.split('/')[-1].split('.')[0]
# load image & process
image = Image.open(image_path)
image = image.convert('RGB')
image = np.array(image)
image = image.astype(np.float32) / 255.0
image, info = process_image(image)
image = image.to(device)
info = {k: v.to(device) if type(v) is not str else v for k, v in info.items()}
output = model(image, inference=inference)
pred_verb = output['pred_verb'][0]
pred_noun = output['pred_noun_3'][0]
pred_bbox = output['pred_bbox'][0]
pred_bbox_conf = output['pred_bbox_conf'][0]
top1_verb = torch.topk(pred_verb, k=1, dim=0)[1].item()
roles = vidx_ridx[top1_verb]
num_roles = len(roles)
verb_label = idx_to_verb[top1_verb]
role_labels = []
noun_labels = []
for i in range(num_roles):
top1_noun = torch.topk(pred_noun[i], k=1, dim=0)[1].item()
role_labels.append(idx_to_role[roles[i]])
noun_labels.append(noun2synset(idx_to_class[top1_noun]))
# convert bbox
mw, mh = info['max_width'], info['max_height']
w, h = info['width'], info['height']
shift_0, shift_1, scale = info['shift_0'], info['shift_1'], info['scale']
pb_xyxy = box_ops.swig_box_cxcywh_to_xyxy(pred_bbox.clone(), mw, mh, device=device)
for i in range(num_roles):
pb_xyxy[i][0] = max(pb_xyxy[i][0] - shift_1, 0)
pb_xyxy[i][1] = max(pb_xyxy[i][1] - shift_0, 0)
pb_xyxy[i][2] = max(pb_xyxy[i][2] - shift_1, 0)
pb_xyxy[i][3] = max(pb_xyxy[i][3] - shift_0, 0)
# locate predicted boxes within image (processing w/ image width & height)
pb_xyxy[i][0] = min(pb_xyxy[i][0], w)
pb_xyxy[i][1] = min(pb_xyxy[i][1], h)
pb_xyxy[i][2] = min(pb_xyxy[i][2], w)
pb_xyxy[i][3] = min(pb_xyxy[i][3], h)
pb_xyxy /= scale
# outputs
with open("{}/{}_result.txt".format(output_dir, image_name), "w") as f:
text_line = "verb: {} \n".format(verb_label)
f.write(text_line)
for i in range(num_roles):
text_line = "role: {}, noun: {} \n".format(role_labels[i], noun_labels[i])
f.write(text_line)
f.close()
visualize_bbox(image_path=image_path, num_roles=num_roles, noun_labels=noun_labels, pred_bbox=pb_xyxy, pred_bbox_conf=pred_bbox_conf, output_dir=output_dir)
def get_args_parser():
parser = argparse.ArgumentParser('Set CoFormer', add_help=False)
# Backbone parameters
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--position_embedding', default='learned', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# Transformer parameters
parser.add_argument('--num_glance_enc_layers', default=3, type=int,
help="Number of encoding layers in Glance Transformer")
parser.add_argument('--num_gaze_s1_dec_layers', default=3, type=int,
help="Number of decoding layers in Gaze-Step1 Transformer")
parser.add_argument('--num_gaze_s1_enc_layers', default=3, type=int,
help="Number of encoding layers in Gaze-Step1 Transformer")
parser.add_argument('--num_gaze_s2_dec_layers', default=3, type=int,
help="Number of decoding layers in Gaze-Step2 Transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=512, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.15, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
# Dataset parameters
parser.add_argument('--dataset_file', default='swig')
parser.add_argument('--swig_path', type=str, default="SWiG")
parser.add_argument('--image_path', default='inference/image.jpg',
help='path where the test image is')
# Etc...
parser.add_argument('--inference', default=True)
parser.add_argument('--output_dir', default='CoFormer_inference',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for inference')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=1, type=int)
parser.add_argument('--saved_model', default='CoFormer_checkpoint.pth',
help='path where saved model is')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print(args)
if not args.inference:
assert False, f"Please set inference to True"
# fix the seed
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# num noun classes in train dataset
dataset_train = build_dataset(image_set='train', args=args)
args.num_noun_classes = dataset_train.num_nouns()
# build model
device = torch.device(args.device)
model, _ = build_model(args)
model.to(device)
checkpoint = torch.load(args.saved_model, map_location='cpu')
model.load_state_dict(checkpoint['model'])
inference(model, device, image_path=args.image_path, inference=args.inference,
idx_to_verb=args.idx_to_verb, idx_to_role=args.idx_to_role, vidx_ridx=args.vidx_ridx,
idx_to_class=args.idx_to_class, output_dir=args.output_dir)
return
if __name__ == '__main__':
nltk.download('wordnet')
parser = argparse.ArgumentParser('CoFormer inference script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args) | [
"cv2.rectangle",
"nltk.download",
"torch.from_numpy",
"util.misc.get_sha",
"numpy.array",
"argparse.ArgumentParser",
"pathlib.Path",
"models.build_model",
"numpy.random.seed",
"util.misc.init_distributed_mode",
"torch.topk",
"re.match",
"cv2.putText",
"util.misc.nested_tensor_from_tensor_l... | [((1315, 1337), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1325, 1337), False, 'import cv2\n'), ((3134, 3169), 'numpy.array', 'np.array', (['[[[0.485, 0.456, 0.406]]]'], {}), '([[[0.485, 0.456, 0.406]]])\n', (3142, 3169), True, 'import numpy as np\n'), ((3180, 3215), 'numpy.array', 'np.array', (['[[[0.229, 0.224, 0.225]]]'], {}), '([[[0.229, 0.224, 0.225]]])\n', (3188, 3215), True, 'import numpy as np\n'), ((3883, 3910), 'torch.from_numpy', 'torch.from_numpy', (['new_image'], {}), '(new_image)\n', (3899, 3910), False, 'import torch\n'), ((4048, 4088), 'torch.zeros', 'torch.zeros', (['(1)', 'max_height', 'max_width', '(3)'], {}), '(1, max_height, max_width, 3)\n', (4059, 4088), False, 'import torch\n'), ((5120, 5142), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (5130, 5142), False, 'from PIL import Image\n'), ((5188, 5203), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (5196, 5203), True, 'import numpy as np\n'), ((7399, 7454), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Set CoFormer"""'], {'add_help': '(False)'}), "('Set CoFormer', add_help=False)\n", (7422, 7454), False, 'import argparse\n'), ((9978, 10011), 'util.misc.init_distributed_mode', 'utils.init_distributed_mode', (['args'], {}), '(args)\n', (10005, 10011), True, 'import util.misc as utils\n'), ((10224, 10247), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (10241, 10247), False, 'import torch\n'), ((10252, 10272), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10266, 10272), True, 'import numpy as np\n'), ((10277, 10294), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (10288, 10294), False, 'import random\n'), ((10356, 10399), 'datasets.build_dataset', 'build_dataset', ([], {'image_set': '"""train"""', 'args': 'args'}), "(image_set='train', args=args)\n", (10369, 10399), False, 'from datasets import build_dataset\n'), ((10486, 10511), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (10498, 10511), False, 'import torch\n'), ((10527, 10544), 'models.build_model', 'build_model', (['args'], {}), '(args)\n', (10538, 10544), False, 'from models import build_model\n'), ((10583, 10631), 'torch.load', 'torch.load', (['args.saved_model'], {'map_location': '"""cpu"""'}), "(args.saved_model, map_location='cpu')\n", (10593, 10631), False, 'import torch\n'), ((10984, 11008), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (10997, 11008), False, 'import nltk\n'), ((1125, 1150), 're.match', 're.match', (['"""n[0-9]*"""', 'noun'], {}), "('n[0-9]*', noun)\n", (1133, 1150), False, 'import re\n'), ((4571, 4620), 'util.misc.nested_tensor_from_tensor_list', 'utils.nested_tensor_from_tensor_list', (['padded_imgs'], {}), '(padded_imgs)\n', (4607, 4620), True, 'import util.misc as utils\n'), ((10203, 10219), 'util.misc.get_rank', 'utils.get_rank', ([], {}), '()\n', (10217, 10219), True, 'import util.misc as utils\n'), ((2404, 2501), 'cv2.rectangle', 'cv2.rectangle', ([], {'img': 'image', 'pt1': 'lt', 'pt2': 'rb', 'color': 'colors[i]', 'thickness': 'line_width', 'lineType': '(-1)'}), '(img=image, pt1=lt, pt2=rb, color=colors[i], thickness=\n line_width, lineType=-1)\n', (2417, 2501), False, 'import cv2\n'), ((2616, 2672), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.4)', '(1)'], {}), '(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)\n', (2631, 2672), False, 'import cv2\n'), ((2732, 2876), 'cv2.rectangle', 'cv2.rectangle', ([], {'img': 'image', 'pt1': '(p1[0], p1[1] - 2 - baseline)', 'pt2': '(p1[0] + text_size[0], p1[1] + text_size[1])', 'color': 'colors[i]', 'thickness': '(-1)'}), '(img=image, pt1=(p1[0], p1[1] - 2 - baseline), pt2=(p1[0] +\n text_size[0], p1[1] + text_size[1]), color=colors[i], thickness=-1)\n', (2745, 2876), False, 'import cv2\n'), ((2883, 2990), 'cv2.putText', 'cv2.putText', (['image', 'label', '(p1[0], p1[1] + baseline)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.4)', 'white_color', '(1)', '(8)'], {}), '(image, label, (p1[0], p1[1] + baseline), cv2.\n FONT_HERSHEY_SIMPLEX, 0.4, white_color, 1, 8)\n', (2894, 2990), False, 'import cv2\n'), ((3766, 3793), 'numpy.zeros', 'np.zeros', (['(rows, cols, cns)'], {}), '((rows, cols, cns))\n', (3774, 3793), True, 'import numpy as np\n'), ((4361, 4382), 'torch.tensor', 'torch.tensor', (['shift_0'], {}), '(shift_0)\n', (4373, 4382), False, 'import torch\n'), ((4405, 4426), 'torch.tensor', 'torch.tensor', (['shift_1'], {}), '(shift_1)\n', (4417, 4426), False, 'import torch\n'), ((4447, 4466), 'torch.tensor', 'torch.tensor', (['scale'], {}), '(scale)\n', (4459, 4466), False, 'import torch\n'), ((4484, 4507), 'torch.tensor', 'torch.tensor', (['max_width'], {}), '(max_width)\n', (4496, 4507), False, 'import torch\n'), ((4525, 4549), 'torch.tensor', 'torch.tensor', (['max_height'], {}), '(max_height)\n', (4537, 4549), False, 'import torch\n'), ((10044, 10059), 'util.misc.get_sha', 'utils.get_sha', ([], {}), '()\n', (10057, 10059), True, 'import util.misc as utils\n'), ((5633, 5666), 'torch.topk', 'torch.topk', (['pred_verb'], {'k': '(1)', 'dim': '(0)'}), '(pred_verb, k=1, dim=0)\n', (5643, 5666), False, 'import torch\n'), ((11167, 11188), 'pathlib.Path', 'Path', (['args.output_dir'], {}), '(args.output_dir)\n', (11171, 11188), False, 'from pathlib import Path\n'), ((5870, 5906), 'torch.topk', 'torch.topk', (['pred_noun[i]'], {'k': '(1)', 'dim': '(0)'}), '(pred_noun[i], k=1, dim=0)\n', (5880, 5906), False, 'import torch\n')] |
# -------------------------------------------------------
# CSCI 561, Spring 2021
# Homework 1
# The Oregon Trail
# Author: <NAME>
# This creates a Node class,
# representing the node in search space
# -------------------------------------------------------
import numpy as np
class Node:
def __init__(self, xy, h):
self.coord = xy # coordinate of node
self.height = h # height of node
self.cost = 0 # cumulated cost of node
self.parent = 0 # parent of node
self.id = 0 # unique ID of node
self.heuristic = 0 # heuristic cost of node
self.state_id = np.array2string(xy, precision=0, separator=',') | [
"numpy.array2string"
] | [((614, 661), 'numpy.array2string', 'np.array2string', (['xy'], {'precision': '(0)', 'separator': '""","""'}), "(xy, precision=0, separator=',')\n", (629, 661), True, 'import numpy as np\n')] |
"""
Internal tools needed to query the index based on rectangles
and position/radius. Based on tools in argodata:
https://github.com/ArgoCanada/argodata/blob/master/R/utils.R#L54-L165
"""
import warnings
import numpy as np
def geodist_rad(long1, lat1, long2, lat2, R=6371.010):
delta_long = long2 - long1
delta_lat = lat2 - lat1
a = np.sin(delta_lat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(delta_long / 2) ** 2
c = 2 * np.arcsin(np.minimum(1, np.sqrt(a)))
return R * c
def geodist_lnglat(xy1, xy2, R=6371.010):
return geodist_rad(
xy1['x'] * np.pi / 180, xy1['y'] * np.pi / 180,
xy2['x'] * np.pi / 180, xy2['y'] * np.pi / 180,
R=R
)
def rect_intersects(r1, r2):
limits = {
'xmin': np.maximum(r1['xmin'], r2['xmin']),
'xmax': np.minimum(r1['xmax'], r2['xmax']),
'ymin': np.maximum(r1['ymin'], r2['ymin']),
'ymax': np.minimum(r1['ymax'], r2['ymax'])
}
return (limits['xmax'] >= limits['xmin']) & (limits['ymax'] >= limits['ymin'])
def rect_contains(r, xy):
return (xy['x'] >= r['xmin']) & \
(xy['x'] <= r['xmax']) & \
(xy['y'] >= r['ymin']) & \
(xy['y'] <= r['ymax'])
def rect_split_dateline(r):
is_wrap = r['xmax'] < r['xmin']
xmin1 = np.asarray(r['xmin']).copy()
xmin1[is_wrap] = -180
xmin2 = r['xmin']
xmax1 = r['xmax']
xmax2 = np.asarray(r['xmax']).copy()
xmax2[is_wrap] = 180
return (
{'xmin': xmin1, 'ymin': r['ymin'], 'xmax': xmax1, 'ymax': r['ymax']},
{'xmin': xmin2, 'ymin': r['ymin'], 'xmax': xmax2, 'ymax': r['ymax']}
)
def normalize_lat(latitude):
# some latitude values are -99.999 instead of missing
latitude = np.asfarray(latitude).copy()
latitude[latitude == -99.999] = np.nan
return latitude
def normalize_lng(longitude):
# -999.999 is occasionally used to denote missing in the profile index
# some longitudes are greater than 180, but we handle that more robustly
# here.
with warnings.catch_warnings():
# Suppress warnings of 'invalid remainder' because we know there are
# nan values already.
warnings.simplefilter("ignore")
longitude = np.asfarray(longitude).copy()
longitude[longitude == -999.999] = np.nan
longitude_inf = np.isinf(longitude)
normalized = np.asfarray(((longitude + 180.0) % 360) - 180.0)
normalized[longitude == 180.0] = 180.0
normalized[longitude_inf] = longitude[longitude_inf]
return normalized
| [
"numpy.sqrt",
"numpy.minimum",
"numpy.sin",
"warnings.catch_warnings",
"numpy.asarray",
"numpy.asfarray",
"numpy.cos",
"warnings.simplefilter",
"numpy.maximum",
"numpy.isinf"
] | [((760, 794), 'numpy.maximum', 'np.maximum', (["r1['xmin']", "r2['xmin']"], {}), "(r1['xmin'], r2['xmin'])\n", (770, 794), True, 'import numpy as np\n'), ((812, 846), 'numpy.minimum', 'np.minimum', (["r1['xmax']", "r2['xmax']"], {}), "(r1['xmax'], r2['xmax'])\n", (822, 846), True, 'import numpy as np\n'), ((864, 898), 'numpy.maximum', 'np.maximum', (["r1['ymin']", "r2['ymin']"], {}), "(r1['ymin'], r2['ymin'])\n", (874, 898), True, 'import numpy as np\n'), ((916, 950), 'numpy.minimum', 'np.minimum', (["r1['ymax']", "r2['ymax']"], {}), "(r1['ymax'], r2['ymax'])\n", (926, 950), True, 'import numpy as np\n'), ((2026, 2051), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2049, 2051), False, 'import warnings\n'), ((2168, 2199), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2189, 2199), False, 'import warnings\n'), ((2333, 2352), 'numpy.isinf', 'np.isinf', (['longitude'], {}), '(longitude)\n', (2341, 2352), True, 'import numpy as np\n'), ((2374, 2420), 'numpy.asfarray', 'np.asfarray', (['((longitude + 180.0) % 360 - 180.0)'], {}), '((longitude + 180.0) % 360 - 180.0)\n', (2385, 2420), True, 'import numpy as np\n'), ((348, 369), 'numpy.sin', 'np.sin', (['(delta_lat / 2)'], {}), '(delta_lat / 2)\n', (354, 369), True, 'import numpy as np\n'), ((1285, 1306), 'numpy.asarray', 'np.asarray', (["r['xmin']"], {}), "(r['xmin'])\n", (1295, 1306), True, 'import numpy as np\n'), ((1396, 1417), 'numpy.asarray', 'np.asarray', (["r['xmax']"], {}), "(r['xmax'])\n", (1406, 1417), True, 'import numpy as np\n'), ((1729, 1750), 'numpy.asfarray', 'np.asfarray', (['latitude'], {}), '(latitude)\n', (1740, 1750), True, 'import numpy as np\n'), ((377, 389), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (383, 389), True, 'import numpy as np\n'), ((392, 404), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (398, 404), True, 'import numpy as np\n'), ((407, 429), 'numpy.sin', 'np.sin', (['(delta_long / 2)'], {}), '(delta_long / 2)\n', (413, 429), True, 'import numpy as np\n'), ((471, 481), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (478, 481), True, 'import numpy as np\n'), ((2229, 2251), 'numpy.asfarray', 'np.asfarray', (['longitude'], {}), '(longitude)\n', (2240, 2251), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import gzip
import tarfile
import numpy as np
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from astropy import wcs
from astropy.coordinates import SkyCoord
from pymoc import MOC
from pymoc.io.fits import read_moc_fits
def downloadFile(url, dest_folder, filename=None):
# Open the url
try:
f = urlopen(url)
# Open our local file for writing
if filename is None:
print("Downloading " + url)
dest_file = os.path.join(dest_folder, os.path.basename(url))
else:
dest_file = os.path.join(dest_folder, filename)
with open(dest_file, "wb") as local_file:
local_file.write(f.read())
#handle errors
except HTTPError as e:
print("HTTP Error:", e.code, url)
except URLError as e:
print("URL Error:", e.reason, url)
def untarFile(input_file, dest_folder, remove=True):
f = tarfile.open(input_file)
f.extractall(path=dest_folder)
def gunzipFile(input_file, output_file, remove=True):
"""
gunzip input_file, save to output_file. If remove is True,
input_file is deleted.
"""
inF = gzip.open(input_file, 'rb')
outF = open(output_file, 'wb')
outF.write( inF.read() )
inF.close()
outF.close()
if remove:
os.remove(input_file)
def get_moc(url, survey, folder):
"""
Get the moc of the area covered by survey from url and store it in folder.
"""
if survey is 'UKIDSS':
filenameJ = 'las-J1-DR10.fits'
filenameH = 'las-H-DR10.fits'
filenameK = 'las-K-DR10.fits'
elif survey is 'VISTA':
filenameJ = 'vhs-J-dr4.fits'
filenameH = 'vhs-H-dr4.fits'
filenameK = 'vhs-Ks-dr4.fits'
elif survey is '2MASS':
return None
elif survey is 'sdss':
downloadFile(url, folder, 'moc_{}.fits'.format(survey.lower()))
else:
raise ValueError('Invalid near-infrared survey!')
filename = os.path.join(folder, 'moc_{}.fits'.format(survey.lower()))
if not os.path.isfile(filename):
# J moc
moc_J = MOC()
downloadFile(os.path.join(url, filenameJ), folder)
read_moc_fits(moc_J, os.path.join(folder, filenameJ))
# H moc
moc_H = MOC()
downloadFile(os.path.join(url, filenameH), folder)
read_moc_fits(moc_H, os.path.join(folder, filenameH))
# K moc
moc_K = MOC()
downloadFile(os.path.join(url, filenameK), folder)
read_moc_fits(moc_K, os.path.join(folder, filenameK))
moc_JH = moc_J.intersection(moc_H)
moc_JHK = moc_JH.intersection(moc_K)
moc_JHK.write(filename, filetype="FITS", overwrite=True)
return filename
def sources_inmoc(sources, hp, moc, moc_order=15,
ra='RA', dec='Dec', units=None):
"""
Find those sources in the astropy table sources included in moc.
"""
if units is None:
coords = SkyCoord(ra=sources[ra], dec=sources[dec])
else:
coords = SkyCoord(ra=sources[ra], dec=sources[dec], unit=units)
cells_msk = np.array([moc.contains(moc_order, cell)
for cell in hp.skycoord_to_healpix(coords)])
return sources[cells_msk]
def obsid_wcs(coords):
"""
WCS object with a gnomonic projection centered in coords.
Parameters as in an EPIC XMM-Newton observation.
"""
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
# Set up an "gnomonic" projection (as in XMM EPIC images)
# Vector properties may be set with Python lists, or Numpy arrays
w.wcs.crpix = [2.98436767602103E+02, 2.98436767602103E+02]
w.wcs.cdelt = np.array([-1.20833333333333E-03, 1.20833333333333E-03])
w.wcs.crval = [coords.ra.deg, coords.dec.deg]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.set_pv([(2, 1, 0.0)])
return w | [
"tarfile.open",
"pymoc.MOC",
"gzip.open",
"os.path.join",
"astropy.coordinates.SkyCoord",
"urllib.request.urlopen",
"os.path.isfile",
"numpy.array",
"os.path.basename",
"astropy.wcs.WCS",
"os.remove"
] | [((976, 1000), 'tarfile.open', 'tarfile.open', (['input_file'], {}), '(input_file)\n', (988, 1000), False, 'import tarfile\n'), ((1209, 1236), 'gzip.open', 'gzip.open', (['input_file', '"""rb"""'], {}), "(input_file, 'rb')\n", (1218, 1236), False, 'import gzip\n'), ((3586, 3602), 'astropy.wcs.WCS', 'wcs.WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (3593, 3602), False, 'from astropy import wcs\n'), ((3817, 3870), 'numpy.array', 'np.array', (['[-0.00120833333333333, 0.00120833333333333]'], {}), '([-0.00120833333333333, 0.00120833333333333])\n', (3825, 3870), True, 'import numpy as np\n'), ((384, 396), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (391, 396), False, 'from urllib.request import urlopen\n'), ((1358, 1379), 'os.remove', 'os.remove', (['input_file'], {}), '(input_file)\n', (1367, 1379), False, 'import os\n'), ((2119, 2143), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2133, 2143), False, 'import os\n'), ((2177, 2182), 'pymoc.MOC', 'MOC', ([], {}), '()\n', (2180, 2182), False, 'from pymoc import MOC\n'), ((2343, 2348), 'pymoc.MOC', 'MOC', ([], {}), '()\n', (2346, 2348), False, 'from pymoc import MOC\n'), ((2511, 2516), 'pymoc.MOC', 'MOC', ([], {}), '()\n', (2514, 2516), False, 'from pymoc import MOC\n'), ((3053, 3095), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'sources[ra]', 'dec': 'sources[dec]'}), '(ra=sources[ra], dec=sources[dec])\n', (3061, 3095), False, 'from astropy.coordinates import SkyCoord\n'), ((3123, 3177), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'sources[ra]', 'dec': 'sources[dec]', 'unit': 'units'}), '(ra=sources[ra], dec=sources[dec], unit=units)\n', (3131, 3177), False, 'from astropy.coordinates import SkyCoord\n'), ((629, 664), 'os.path.join', 'os.path.join', (['dest_folder', 'filename'], {}), '(dest_folder, filename)\n', (641, 664), False, 'import os\n'), ((2206, 2234), 'os.path.join', 'os.path.join', (['url', 'filenameJ'], {}), '(url, filenameJ)\n', (2218, 2234), False, 'import os\n'), ((2273, 2304), 'os.path.join', 'os.path.join', (['folder', 'filenameJ'], {}), '(folder, filenameJ)\n', (2285, 2304), False, 'import os\n'), ((2370, 2398), 'os.path.join', 'os.path.join', (['url', 'filenameH'], {}), '(url, filenameH)\n', (2382, 2398), False, 'import os\n'), ((2437, 2468), 'os.path.join', 'os.path.join', (['folder', 'filenameH'], {}), '(folder, filenameH)\n', (2449, 2468), False, 'import os\n'), ((2538, 2566), 'os.path.join', 'os.path.join', (['url', 'filenameK'], {}), '(url, filenameK)\n', (2550, 2566), False, 'import os\n'), ((2605, 2636), 'os.path.join', 'os.path.join', (['folder', 'filenameK'], {}), '(folder, filenameK)\n', (2617, 2636), False, 'import os\n'), ((568, 589), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (584, 589), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** <NAME>
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman Community (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
TODO
"""
import os.path
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GL.ARB.texture_non_power_of_two import *
from core import G
from image import Image
import log
from getpath import getSysDataPath
NOTFOUND_TEXTURE = getSysDataPath('textures/texture_notfound.png')
class Texture(object):
_npot = None
_powers = None
def __new__(cls, *args, **kwargs):
self = super(Texture, cls).__new__(cls)
if cls._npot is None:
cls._npot = glInitTextureNonPowerOfTwoARB()
try:
import debugdump
debugdump.dump.appendMessage("GL.EXTENSION: GL_ARB_texture_non_power_of_two %s" % ("enabled" if cls._npot else "not available"))
except Exception as e:
log.error("Failed to write GL debug info to debug dump: %s", format(str(e)))
if cls._powers is None:
cls._powers = [2**i for i in range(20)]
self.textureId = glGenTextures(1)
self.width = 0
self.height = 0
self.modified = None
return self
def __init__(self, image = None, size = None, components = 4):
if image is not None:
self.loadImage(image)
elif size is not None:
width, height = size
self.initTexture(width, height, components)
def __del__(self):
try:
glDeleteTextures(self.textureId)
except Exception:
pass
@staticmethod
def getFormat(components):
if components == 1:
return (GL_ALPHA8, GL_ALPHA)
elif components == 3:
return (3, GL_RGB)
elif components == 4:
return (4, GL_RGBA)
else:
raise RuntimeError("Unsupported pixel format")
def initTexture(self, width, height, components = 4, pixels = None):
internalFormat, format = self.getFormat(components)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
use_mipmaps = False
if not (width in self._powers and height in self._powers) and not self._npot:
log.debug("Non-power-of-two textures not supported, building mipmaps for image with dimensions %sx%s.", width, height)
use_mipmaps = True
if use_mipmaps and pixels is None:
raise RuntimeError("Non-power-of-two textures not supported")
if pixels is None:
# Zero fill pixel data to allocate
import numpy as np
pixels = np.zeros(width*height*components, dtype=np.uint8)
if height == 1:
glBindTexture(GL_TEXTURE_1D, self.textureId)
if not use_mipmaps:
glTexImage1D(GL_PROXY_TEXTURE_1D, 0, internalFormat, width, 0, format, GL_UNSIGNED_BYTE, pixels)
if not glGetTexLevelParameteriv(GL_PROXY_TEXTURE_1D, 0, GL_TEXTURE_WIDTH):
log.notice('texture size (%d) too large, building mipmaps', width)
use_mipmaps = True
if use_mipmaps:
gluBuild1DMipmaps(GL_TEXTURE_1D, internalFormat, width, format, GL_UNSIGNED_BYTE, pixels)
# glGetTexLevelParameter is broken on X11
# width = glGetTexLevelParameteriv(GL_TEXTURE_1D, 0, GL_TEXTURE_WIDTH)
else:
glTexImage1D(GL_TEXTURE_1D, 0, internalFormat, width, 0, format, GL_UNSIGNED_BYTE, pixels)
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glBindTexture(GL_TEXTURE_1D, 0)
else:
glBindTexture(GL_TEXTURE_2D, self.textureId)
if not use_mipmaps:
glTexImage2D(GL_PROXY_TEXTURE_2D, 0, internalFormat, width, height, 0, format, GL_UNSIGNED_BYTE, pixels)
if not glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH):
log.notice('texture size (%d x %d) too large, building mipmaps', width, height)
use_mipmaps = True
if use_mipmaps:
gluBuild2DMipmaps(GL_TEXTURE_2D, internalFormat, width, height, format, GL_UNSIGNED_BYTE, pixels)
# glGetTexLevelParameter is broken on X11
# width = glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH)
# height = glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT)
else:
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, width, height, 0, format, GL_UNSIGNED_BYTE, pixels)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glBindTexture(GL_TEXTURE_2D, 0)
self.width, self.height = width, height
log.debug('initTexture: %s, %s, %s', width, height, use_mipmaps)
def loadImage(self, image):
if isinstance(image, str):
image = Image(image)
pixels = image.flip_vertical().data
self.initTexture(image.width, image.height, image.components, pixels)
def loadSubImage(self, image, x, y):
if not self.textureId:
raise RuntimeError("Texture is empty, cannot load a sub texture into it")
if isinstance(image, str):
image = Image(image)
internalFormat, format = self.getFormat(image.components)
pixels = image.flip_vertical().data
if image.height == 1:
glBindTexture(GL_TEXTURE_1D, self.textureId)
glTexSubImage1D(GL_TEXTURE_1D, 0, x, image.width, format, GL_UNSIGNED_BYTE, pixels)
glBindTexture(GL_TEXTURE_1D, 0)
else:
glBindTexture(GL_TEXTURE_2D, self.textureId)
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, image.width, image.height, format, GL_UNSIGNED_BYTE, pixels)
glBindTexture(GL_TEXTURE_2D, 0)
_textureCache = {}
def getTexture(path, cache=None):
texture = None
cache = cache or _textureCache
if isinstance(path, Image):
img = path
if hasattr(img, 'sourcePath'):
if img.sourcePath in cache:
texture = cache[img.sourcePath]
if not (hasattr(img, 'modified') and img.modified > texture.modified):
return texture
else:
log.warning("Image used as texture does not contain a \"sourcePath\" attribute, making it impossible to cache it. This could cause slow rendering (always creates new texture).")
return Texture(img)
import time
if img.sourcePath in cache:
log.debug("Reloading texture for dynamic image %s.", img.sourcePath)
texture = cache[img.sourcePath]
texture.loadImage(img)
else:
log.debug("Creating new texture for dynamic image %s.", img.sourcePath)
texture = Texture(img)
if hasattr(img, 'modified'):
texture.modified = img.modified
else:
texture.modified = time.time()
cache[img.sourcePath] = texture
return texture
elif not os.path.isfile(path):
log.error('Cannot get texture for file path %s, no such file.', path)
return None
if path in cache:
texture = cache[path]
if texture is False:
return texture
if os.path.getmtime(path) > texture.modified:
log.message('Reloading texture %s.', path) # TL: unicode problems unbracketed
try:
img = Image(path=path)
texture.loadImage(img)
except RuntimeError as text:
log.error("%s", text, exc_info=True)
return
else:
texture.modified = os.path.getmtime(path)
else:
try:
log.debug("Creating new texture for image %s.", path)
img = Image(path=path)
texture = Texture(img)
except RuntimeError as text:
log.error("Error loading texture %s", path, exc_info=True)
texture = False
else:
texture.modified = os.path.getmtime(path)
cache[path] = texture
return texture
def reloadTextures():
"""
Clear the entire texture cache, resulting in removing all contained textures
from the GPU memory (unless other references are kept to the texture
objects).
"""
log.message('Reloading all textures')
for path in _textureCache:
try:
_textureCache[path].loadImage(path)
except RuntimeError as _:
log.error("Error loading texture %s", path, exc_info=True)
def reloadTexture(path):
"""
Remove a texture from the texture cache. Removing a texture from cache will
result in unloading the texture from the GPU memory, unless another
reference to it is kept.
"""
log.message('Reloading texture %s', path)
if path not in _textureCache:
log.error('Cannot reload non-existing texture %s', path)
return
try:
_textureCache[path].loadImage(path)
except RuntimeError as text:
log.error("Error loading texture %s", path, exc_info=True)
| [
"log.warning",
"log.notice",
"getpath.getSysDataPath",
"log.error",
"log.debug",
"numpy.zeros",
"debugdump.dump.appendMessage",
"time.time",
"log.message",
"image.Image"
] | [((1331, 1378), 'getpath.getSysDataPath', 'getSysDataPath', (['"""textures/texture_notfound.png"""'], {}), "('textures/texture_notfound.png')\n", (1345, 1378), False, 'from getpath import getSysDataPath\n'), ((9761, 9798), 'log.message', 'log.message', (['"""Reloading all textures"""'], {}), "('Reloading all textures')\n", (9772, 9798), False, 'import log\n'), ((10223, 10264), 'log.message', 'log.message', (['"""Reloading texture %s"""', 'path'], {}), "('Reloading texture %s', path)\n", (10234, 10264), False, 'import log\n'), ((6170, 6234), 'log.debug', 'log.debug', (['"""initTexture: %s, %s, %s"""', 'width', 'height', 'use_mipmaps'], {}), "('initTexture: %s, %s, %s', width, height, use_mipmaps)\n", (6179, 6234), False, 'import log\n'), ((10307, 10363), 'log.error', 'log.error', (['"""Cannot reload non-existing texture %s"""', 'path'], {}), "('Cannot reload non-existing texture %s', path)\n", (10316, 10363), False, 'import log\n'), ((3161, 3289), 'log.debug', 'log.debug', (['"""Non-power-of-two textures not supported, building mipmaps for image with dimensions %sx%s."""', 'width', 'height'], {}), "(\n 'Non-power-of-two textures not supported, building mipmaps for image with dimensions %sx%s.'\n , width, height)\n", (3170, 3289), False, 'import log\n'), ((3555, 3608), 'numpy.zeros', 'np.zeros', (['(width * height * components)'], {'dtype': 'np.uint8'}), '(width * height * components, dtype=np.uint8)\n', (3563, 3608), True, 'import numpy as np\n'), ((6323, 6335), 'image.Image', 'Image', (['image'], {}), '(image)\n', (6328, 6335), False, 'from image import Image\n'), ((6675, 6687), 'image.Image', 'Image', (['image'], {}), '(image)\n', (6680, 6687), False, 'from image import Image\n'), ((7692, 7877), 'log.warning', 'log.warning', (['"""Image used as texture does not contain a "sourcePath" attribute, making it impossible to cache it. This could cause slow rendering (always creates new texture)."""'], {}), '(\n \'Image used as texture does not contain a "sourcePath" attribute, making it impossible to cache it. This could cause slow rendering (always creates new texture).\'\n )\n', (7703, 7877), False, 'import log\n'), ((7971, 8039), 'log.debug', 'log.debug', (['"""Reloading texture for dynamic image %s."""', 'img.sourcePath'], {}), "('Reloading texture for dynamic image %s.', img.sourcePath)\n", (7980, 8039), False, 'import log\n'), ((8145, 8216), 'log.debug', 'log.debug', (['"""Creating new texture for dynamic image %s."""', 'img.sourcePath'], {}), "('Creating new texture for dynamic image %s.', img.sourcePath)\n", (8154, 8216), False, 'import log\n'), ((8378, 8389), 'time.time', 'time.time', ([], {}), '()\n', (8387, 8389), False, 'import time\n'), ((8497, 8566), 'log.error', 'log.error', (['"""Cannot get texture for file path %s, no such file."""', 'path'], {}), "('Cannot get texture for file path %s, no such file.', path)\n", (8506, 8566), False, 'import log\n'), ((8763, 8805), 'log.message', 'log.message', (['"""Reloading texture %s."""', 'path'], {}), "('Reloading texture %s.', path)\n", (8774, 8805), False, 'import log\n'), ((9167, 9220), 'log.debug', 'log.debug', (['"""Creating new texture for image %s."""', 'path'], {}), "('Creating new texture for image %s.', path)\n", (9176, 9220), False, 'import log\n'), ((9239, 9255), 'image.Image', 'Image', ([], {'path': 'path'}), '(path=path)\n', (9244, 9255), False, 'from image import Image\n'), ((10473, 10531), 'log.error', 'log.error', (['"""Error loading texture %s"""', 'path'], {'exc_info': '(True)'}), "('Error loading texture %s', path, exc_info=True)\n", (10482, 10531), False, 'import log\n'), ((1680, 1818), 'debugdump.dump.appendMessage', 'debugdump.dump.appendMessage', (["('GL.EXTENSION: GL_ARB_texture_non_power_of_two %s' % ('enabled' if cls.\n _npot else 'not available'))"], {}), "(\n 'GL.EXTENSION: GL_ARB_texture_non_power_of_two %s' % ('enabled' if cls.\n _npot else 'not available'))\n", (1708, 1818), False, 'import debugdump\n'), ((8883, 8899), 'image.Image', 'Image', ([], {'path': 'path'}), '(path=path)\n', (8888, 8899), False, 'from image import Image\n'), ((9340, 9398), 'log.error', 'log.error', (['"""Error loading texture %s"""', 'path'], {'exc_info': '(True)'}), "('Error loading texture %s', path, exc_info=True)\n", (9349, 9398), False, 'import log\n'), ((9937, 9995), 'log.error', 'log.error', (['"""Error loading texture %s"""', 'path'], {'exc_info': '(True)'}), "('Error loading texture %s', path, exc_info=True)\n", (9946, 9995), False, 'import log\n'), ((3944, 4010), 'log.notice', 'log.notice', (['"""texture size (%d) too large, building mipmaps"""', 'width'], {}), "('texture size (%d) too large, building mipmaps', width)\n", (3954, 4010), False, 'import log\n'), ((5138, 5217), 'log.notice', 'log.notice', (['"""texture size (%d x %d) too large, building mipmaps"""', 'width', 'height'], {}), "('texture size (%d x %d) too large, building mipmaps', width, height)\n", (5148, 5217), False, 'import log\n'), ((8996, 9032), 'log.error', 'log.error', (['"""%s"""', 'text'], {'exc_info': '(True)'}), "('%s', text, exc_info=True)\n", (9005, 9032), False, 'import log\n')] |
from dask.distributed import Client
import dask.array as da
import dask_ml
import dask_bigquery
import numpy as np
client = Client("localhost:8786")
x = da.sum(np.ones(5))
x.compute()
| [
"dask.distributed.Client",
"numpy.ones"
] | [((126, 150), 'dask.distributed.Client', 'Client', (['"""localhost:8786"""'], {}), "('localhost:8786')\n", (132, 150), False, 'from dask.distributed import Client\n'), ((163, 173), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (170, 173), True, 'import numpy as np\n')] |
from openmdao.api import ExplicitComponent
import numpy as np
import os
import sys
from wisdem.pymap import pyMAP
from wisdem.commonse import gravity, Enum
from wisdem.commonse.utilities import assembleI, unassembleI
Anchor = Enum('DRAGEMBEDMENT SUCTIONPILE')
NLINES_MAX = 15
NPTS_PLOT = 20
class MapMooring(ExplicitComponent):
"""
OpenMDAO Component class for mooring system attached to sub-structure of floating offshore wind turbines.
Should be tightly coupled with Spar class for full system representation.
"""
def setup(self):
# Variables local to the class and not OpenMDAO
self.min_break_load = None
self.wet_mass_per_length = None
self.axial_stiffness = None
self.area = None
self.cost_per_length = None
self.finput = None
self.tlpFlag = False
# Environment
self.add_input('water_density', val=0.0, units='kg/m**3', desc='density of water')
self.add_input('water_depth', val=0.0, units='m', desc='water depth')
# Material properties
# Inputs from SparGeometry
self.add_input('fairlead_radius', val=0.0, units='m', desc='Outer spar radius at fairlead depth (point of mooring attachment)')
# Design variables
self.add_input('fairlead', val=0.0, units='m', desc='Depth below water for mooring line attachment')
self.add_input('mooring_line_length', val=0.0, units='m',desc='Unstretched total mooring line length')
self.add_input('anchor_radius', val=0.0, units='m', desc='radius from center of spar to mooring anchor point')
self.add_input('mooring_diameter', val=0.0, units='m',desc='diameter of mooring line')
# User inputs (could be design variables)
self.add_input('number_of_mooring_connections', val=3, desc='number of mooring connections on vessel')
self.add_input('mooring_lines_per_connection', val=1, desc='number of mooring lines per connection')
self.add_discrete_input('mooring_type', val='CHAIN', desc='chain, nylon, polyester, fiber, or iwrc')
self.add_discrete_input('anchor_type', val='DRAGEMBEDMENT', desc='SUCTIONPILE or DRAGEMBEDMENT')
self.add_input('max_offset', val=0.0, units='m',desc='X offsets in discretization')
self.add_input('operational_heel', val=0.0, units='deg',desc='Maximum angle of heel allowable during operation')
self.add_input('max_survival_heel', val=0.0, units='deg', desc='max heel angle for turbine survival')
self.add_input('gamma_f', val=0.0, desc='Safety factor for mooring line tension')
# Cost rates
self.add_input('mooring_cost_factor', val=0.0, desc='miscellaneous cost factor in percent')
# Outputs
self.add_output('number_of_mooring_lines', val=0, desc='total number of mooring lines')
self.add_output('mooring_mass', val=0.0, units='kg',desc='total mass of mooring')
self.add_output('mooring_moments_of_inertia', val=np.zeros(6), units='kg*m**2', desc='mass moment of inertia of mooring system about fairlead-centerline point [xx yy zz xy xz yz]')
self.add_output('mooring_cost', val=0.0, units='USD',desc='total cost for anchor + legs + miscellaneous costs')
self.add_output('mooring_stiffness', val=np.zeros((6,6)), units='N/m', desc='Linearized stiffness matrix of mooring system at neutral (no offset) conditions.')
self.add_output('anchor_cost', val=0.0, units='USD',desc='total cost for anchor')
self.add_output('mooring_neutral_load', val=np.zeros((NLINES_MAX,3)), units='N',desc='mooring vertical load in all mooring lines')
self.add_output('max_offset_restoring_force', val=0.0, units='N',desc='sum of forces in x direction after max offset')
self.add_output('operational_heel_restoring_force', val=np.zeros((NLINES_MAX,3)), units='N',desc='forces for all mooring lines after operational heel')
self.add_output('mooring_plot_matrix', val=np.zeros((NLINES_MAX, NPTS_PLOT, 3)), units='m', desc='data matrix for plotting')
# Output constriants
self.add_output('axial_unity', val=0.0, units='m',desc='range of damaged mooring')
self.add_output('mooring_length_max', val=0.0, desc='mooring line length ratio to nodal distance')
# Derivatives
self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
"""Sets mooring line properties then writes MAP input file and executes MAP.
INPUTS:
----------
inputs : dictionary of input parameters
outputs : dictionary of output parameters
OUTPUTS : none (all unknown dictionary values set)
"""
# Set characteristics based on regressions / empirical data
self.set_properties(inputs, discrete_inputs)
# Set geometry profile
self.set_geometry(inputs, outputs)
# Write MAP input file and analyze the system at every angle
self.runMAP(inputs, discrete_inputs, outputs)
# Compute costs for the system
self.compute_cost(inputs, discrete_inputs, outputs)
def set_properties(self, inputs, discrete_inputs):
"""Sets mooring line properties: Minimum Breaking Load, Mass per Length, Axial Stiffness, Cross-Sectional Area, Cost-per-Length.
INPUTS:
----------
inputs : dictionary of input parameters
OUTPUTS : Parameters are class variables and are set internally
References:
https://daim.idi.ntnu.no/masteroppgaver/015/15116/masteroppgave.pdf
http://offshoremechanics.asmedigitalcollection.asme.org/article.aspx?articleid=2543338
https://www.orcina.com/SoftwareProducts/OrcaFlex/Documentation/Help/Content/html/
Chain.htm
Chain,AxialandBendingStiffness.htm
Chain,MechanicalProperties.htm
RopeWire.htm
RopeWire,MinimumBreakingLoads.htm
RopeWire,Massperunitlength.htm
RopeWire,AxialandBendingStiffness.htm
"""
# Unpack variables
Dmooring = inputs['mooring_diameter']
lineType = discrete_inputs['mooring_type'].upper()
# Set parameters based on regressions for different mooring line type
Dmooring2 = Dmooring**2
# TODO: Costs per unit length are not synced with new input sources
if lineType == 'CHAIN':
self.min_break_load = 2.74e7 * Dmooring2 * (44.0 - 80.0*Dmooring)
# Use a linear fit to the other fit becuase it is poorly conditioned for optimization
#self.min_break_load = 1e3*np.maximum(1.0, -5445.2957034820683+176972.68498888266*Dmooring)
self.wet_mass_per_length = 19.9e3 * Dmooring2 # From Orca, 7983.34117 OC3 definiton doc
self.axial_stiffness = 8.54e10 * Dmooring2 # From Orca, 4.74374e10 OC3 definiton doc,
self.area = 2.0 * 0.25 * np.pi * Dmooring2
self.cost_per_length = 3.415e4 * Dmooring2 #0.58*1e-3*self.min_break_load/gravity - 87.6
elif lineType == 'NYLON':
self.min_break_load = 139357e3 * Dmooring2
self.wet_mass_per_length = 0.6476e3 * Dmooring2
self.axial_stiffness = 1.18e8 * Dmooring2
self.area = 0.25 * np.pi * Dmooring2
self.cost_per_length = 3.415e4 * Dmooring2 #0.42059603*1e-3*self.min_break_load/gravity + 109.5
elif lineType == 'POLYESTER':
self.min_break_load = 170466e3 * Dmooring2
self.wet_mass_per_length = 0.7978e3 * Dmooring2
self.axial_stiffness = 1.09e9 * Dmooring2
self.area = 0.25 * np.pi * Dmooring2
self.cost_per_length = 3.415e4 * Dmooring2 #0.42059603*1e-3*self.min_break_load/gravity + 109.5
elif lineType == 'FIBER': # Wire rope with fiber rope
self.min_break_load = 584175e3 * Dmooring2
self.wet_mass_per_length = 3.6109e3 * Dmooring2
self.axial_stiffness = 3.67e10 * Dmooring2
self.area = 0.455 * 0.25 * np.pi * Dmooring2
self.cost_per_length = 2.0 * 6.32e4 * Dmooring2 #0.53676471*1e-3*self.min_break_load/gravity
elif lineType == 'IWRC': # Wire rope with steel core
self.min_break_load = 633358e3 * Dmooring2
self.wet_mass_per_length = 3.9897e3 * Dmooring2
self.axial_stiffness = 4.04e10 * Dmooring2
self.area = 0.455 * 0.25 * np.pi * Dmooring2
self.cost_per_length = 6.32e4 * Dmooring2 #0.33*1e-3*self.min_break_load/gravity + 139.5
else:
raise ValueError('Available line types are: chain nylon polyester fiber iwrc')
def set_geometry(self, inputs, outputs):
# Unpack variables
fairleadDepth = inputs['fairlead']
R_fairlead = inputs['fairlead_radius']
R_anchor = inputs['anchor_radius']
waterDepth = inputs['water_depth']
L_mooring = inputs['mooring_line_length']
max_heel = inputs['max_survival_heel']
gamma = inputs['gamma_f']
if L_mooring > (waterDepth - fairleadDepth):
self.tlpFlag = False
# Create constraint that line isn't too long that there is no catenary hang
outputs['mooring_length_max'] = L_mooring / (0.95 * (R_anchor + waterDepth - fairleadDepth) )
else:
self.tlpFlag = True
# Create constraint that we don't lose line tension
outputs['mooring_length_max'] = L_mooring / ( (waterDepth - fairleadDepth - gamma*R_fairlead*np.sin(np.deg2rad(max_heel))) )
def write_line_dictionary(self, inputs, discrete_inputs, cable_sea_friction_coefficient=0.65):
"""Writes LINE DICTIONARY section of input.map file
INPUTS:
----------
inputs : dictionary of input parameters
cable_sea_friction_coefficient : coefficient of friction with sea floor (defaults to 0.65)
OUTPUTS : none
"""
# Unpack variables
rhoWater = inputs['water_density']
lineType = discrete_inputs['mooring_type'].lower()
Dmooring = inputs['mooring_diameter']
self.finput.append('---------------------- LINE DICTIONARY ---------------------------------------')
self.finput.append('LineType Diam MassDenInAir EA CB CIntDamp Ca Cdn Cdt')
self.finput.append('(-) (m) (kg/m) (N) (-) (Pa-s) (-) (-) (-)')
self.finput.append('%s %.5f %.5f %.5f %.5f 1.0E8 0.6 -1.0 0.05' %
(lineType, Dmooring, self.wet_mass_per_length, self.axial_stiffness, cable_sea_friction_coefficient) )
def write_node_properties_header(self):
"""Writes NODE PROPERTIES section header of input.map file
INPUTS: none
----------
OUTPUTS : none
"""
self.finput.append('---------------------- NODE PROPERTIES ---------------------------------------')
# Doesn't like some weird character here somewhere
#self.finput.append('Node Type X Y Z M B FX FY FZ')
#self.finput.append('(-) (-) (m) (m) (m) (kg) (m^3) (N) (N) (N)')
self.finput.append('Node Type X Y Z M V FX FY FZ')
self.finput.append('(-) (-) (m) (m) (m) (kg) (m^3) (kN) (kN) (kN)')
def write_node_properties(self, number, node_type, x_pos, y_pos, z_pos,
point_mass=0, displaced_volume=0,
x_force=None, y_force=None, z_force=None):
"""Writes NODE PROPERTIES data of input.map file. Nodes are connections between mooring lines and bridles, vessels, and anchors
INPUTS:
----------
number : The node number listing in the input file
node_type : fix / vessel / connect (fix=anchor)
x_, y_, z_pos : position of node in coordinate system (separate inputs)
point_mass : see MAP reference (defaults to 0)
displaced_volume : see MAP reference (defaults to 0)
x_, y_, z_force : see MAP reference (defaults to None)
OUTPUTS : none
"""
# Ensure this connection is something MAP understands
nodeStr = node_type.lower()
if not nodeStr in ['fix', 'connect', 'vessel']:
raise ValueError('%s is not a valid node type for node %d' % (node_type, number))
# If this is a node between two lines have to specify connection details
if nodeStr == 'connect':
try:
x_force = float(x_force)
y_force = float(y_force)
z_force = float(z_force)
except:
raise ValueError('%s must have numerical force applied values.' % node_type)
# Set location strings
forceStr = '# # #'
if nodeStr == 'connect':
forceStr = '%f %f %f' % (x_force, y_force, z_force)
posStr = '#%f #%f #%f ' % (x_pos, y_pos, z_pos)
elif nodeStr == 'fix':
posStr = '%f %f depth ' % (x_pos, y_pos)
elif nodeStr == 'vessel':
posStr = '%f %f %f ' % (x_pos, y_pos, z_pos)
# Write the connection line
line = ('%d ' % number)
line += ('%s ' % node_type)
line += (posStr)
line += ('%f %f ' % (point_mass, displaced_volume) )
line += (forceStr)
self.finput.append(line)
def write_line_properties(self, inputs, discrete_inputs, line_number=1, anchor_node=2, fairlead_node=1, flags=''):
"""Writes LINE PROPERTIES section of input.map file that connects multiple nodes
INPUTS:
----------
inputs : dictionary of input parameters- only 'mooring_type' is used
line_number : Line ID number (defaults to 1)
anchor_node : Node number corresponding to anchor (defaults to 1)
fairlead_node : Node number corresponding to fairlead (vessel) node (defaults to 2)
flags : see MAP reference (defaults to empty string ' ')
OUTPUTS : none
"""
# Add flag for taut lines
if self.tlpFlag:
flags += ' LINEAR SPRING'
self.finput.append('---------------------- LINE PROPERTIES ---------------------------------------')
self.finput.append('Line LineType UnstrLen NodeAnch NodeFair Flags')
self.finput.append('(-) (-) (m) (-) (-) (-)')
self.finput.append('%d %s %f %d %d %s' %
(line_number, discrete_inputs['mooring_type'], inputs['mooring_line_length'], anchor_node, fairlead_node, flags) )
def write_solver_options(self, inputs):
"""Writes SOLVER OPTIONS section of input.map file,
which includes repeating node/line arrangement in even angular spacing around structure.
INPUTS:
----------
inputs : dictionary of input parameters
OUTPUTS : none
"""
# Unpack variables
n_connect = max(1, int(inputs['number_of_mooring_connections']))
self.finput.append('---------------------- SOLVER OPTIONS-----------------------------------------')
self.finput.append('Option')
self.finput.append('(-)')
self.finput.append('help')
self.finput.append(' integration_dt 0')
self.finput.append(' kb_default 3.0e6')
self.finput.append(' cb_default 3.0e5')
self.finput.append(' wave_kinematics ')
self.finput.append('inner_ftol 1e-5')
self.finput.append('inner_gtol 1e-5')
self.finput.append('inner_xtol 1e-5')
self.finput.append('outer_tol 1e-3')
self.finput.append(' pg_cooked 10000 1')
self.finput.append(' outer_fd')
self.finput.append(' outer_bd')
self.finput.append(' outer_cd')
self.finput.append(' inner_max_its 200')
self.finput.append(' outer_max_its 600')
# Repeat the details for the one mooring line multiple times
angles = np.linspace(0, 360, n_connect+1)[1:-1]
line = 'repeat'
for degree in angles:
line += (' %d' % degree)
self.finput.append(line)
self.finput.append(' krylov_accelerator 3')
self.finput.append(' ref_position 0.0 0.0 0.0')
def write_input_file(self, inputs, discrete_inputs):
"""Writes SOLVER OPTIONS section of input.map file,
which includes repeating node/line arrangement in even angular spacing around structure.
INPUTS:
----------
inputs : dictionary of input parameters
OUTPUTS : none
"""
# Unpack variables
fairleadDepth = inputs['fairlead']
R_fairlead = inputs['fairlead_radius']
R_anchor = inputs['anchor_radius']
n_connect = int(inputs['number_of_mooring_connections'])
n_lines = int(inputs['mooring_lines_per_connection'])
ntotal = n_connect * n_lines
# Open the map input file
self.finput = []
# Write the "Line Dictionary" section
self.write_line_dictionary(inputs, discrete_inputs)
# Write the "Node Properties" section
self.write_node_properties_header()
# One end on sea floor the other at fairlead
self.write_node_properties(1, "VESSEL", R_fairlead, 0, -fairleadDepth)
if n_lines > 1:
angles = np.linspace(0, 2*np.pi, ntotal+1)[:n_lines]
angles -= np.mean(angles)
anchorx = R_anchor * np.cos( angles )
anchory = R_anchor * np.sin( angles )
for k in range(n_lines):
self.write_node_properties(k+2, "FIX", anchorx[k], anchory[k], None)
else:
self.write_node_properties(2, "FIX", R_anchor, 0, None)
# Write the "Line Properties" section
for k in range(n_lines):
self.write_line_properties(inputs, discrete_inputs, line_number=k+1, anchor_node=k+2, fairlead_node=1)
# Write the "Solve Options" section
self.write_solver_options(inputs)
def runMAP(self, inputs, discrete_inputs, outputs):
"""Writes MAP input file, executes, and then queries MAP to find
maximum loading and displacement from vessel displacement around all 360 degrees
INPUTS:
----------
inputs : dictionary of input parameters
outputs : dictionary of output parameters
OUTPUTS : none (multiple unknown dictionary values set)
"""
# Unpack variables
rhoWater = inputs['water_density']
waterDepth = inputs['water_depth']
fairleadDepth = inputs['fairlead']
Dmooring = inputs['mooring_diameter']
offset = inputs['max_offset']
heel = inputs['operational_heel']
gamma = inputs['gamma_f']
n_connect = int(inputs['number_of_mooring_connections'])
n_lines = int(inputs['mooring_lines_per_connection'])
ntotal = n_connect * n_lines
# Write the mooring system input file for this design
self.write_input_file(inputs, discrete_inputs)
# Initiate MAP++ for this design
mymap = pyMAP( )
#mymap.ierr = 0
mymap.map_set_sea_depth(waterDepth)
mymap.map_set_gravity(gravity)
mymap.map_set_sea_density(rhoWater)
mymap.read_list_input(self.finput)
mymap.init( )
# Get the stiffness matrix at neutral position
mymap.displace_vessel(0, 0, 0, 0, 0, 0)
mymap.update_states(0.0, 0)
K = mymap.linear(1e-4) # Input finite difference epsilon
outputs['mooring_stiffness'] = np.array( K )
mymap.displace_vessel(0, 0, 0, 0, 0, 0)
mymap.update_states(0.0, 0)
# Get the vertical load on the structure and plotting data
F_neutral = np.zeros((NLINES_MAX, 3))
plotMat = np.zeros((NLINES_MAX, NPTS_PLOT, 3))
nptsMOI = 100
xyzpts = np.zeros((ntotal, nptsMOI, 3)) # For MOI calculation
for k in range(ntotal):
(F_neutral[k,0], F_neutral[k,1], F_neutral[k,2]) = mymap.get_fairlead_force_3d(k)
plotMat[k,:,0] = mymap.plot_x(k, NPTS_PLOT)
plotMat[k,:,1] = mymap.plot_y(k, NPTS_PLOT)
plotMat[k,:,2] = mymap.plot_z(k, NPTS_PLOT)
xyzpts[k,:,0] = mymap.plot_x(k, nptsMOI)
xyzpts[k,:,1] = mymap.plot_y(k, nptsMOI)
xyzpts[k,:,2] = mymap.plot_z(k, nptsMOI)
if self.tlpFlag:
# Seems to be a bug in the plot arrays from MAP++ for plotting output with taut lines
plotMat[k,:,2] = np.linspace(-fairleadDepth, -waterDepth, NPTS_PLOT)
xyzpts[k,:,2] = np.linspace(-fairleadDepth, -waterDepth, nptsMOI)
outputs['mooring_neutral_load'] = F_neutral
outputs['mooring_plot_matrix'] = plotMat
# Fine line segment length, ds = sqrt(dx^2 + dy^2 + dz^2)
xyzpts_dx = np.gradient(xyzpts[:,:,0], axis=1)
xyzpts_dy = np.gradient(xyzpts[:,:,1], axis=1)
xyzpts_dz = np.gradient(xyzpts[:,:,2], axis=1)
xyzpts_ds = np.sqrt(xyzpts_dx**2 + xyzpts_dy**2 + xyzpts_dz**2)
# Initialize inertia tensor integrands in https://en.wikipedia.org/wiki/Moment_of_inertia#Inertia_tensor
# Taking MOI relative to body centerline at fairlead depth
r0 = np.array([0.0, 0.0, -fairleadDepth])
R = np.zeros((ntotal, nptsMOI, 6))
for ii in range(nptsMOI):
for k in range(ntotal):
r = xyzpts[k,ii,:] - r0
R[k,ii,:] = unassembleI(np.dot(r,r)*np.eye(3) - np.outer(r,r))
Imat = self.wet_mass_per_length * np.trapz(R, x=xyzpts_ds[:,:,np.newaxis], axis=1)
outputs['mooring_moments_of_inertia'] = np.abs( Imat.sum(axis=0) )
# Get the restoring moment at maximum angle of heel
# Since we don't know the substucture CG, have to just get the forces of the lines now and do the cross product later
# We also want to allow for arbitraty wind direction and yaw of rotor relative to mooring lines, so we will compare
# pitch and roll forces as extremes
# TODO: This still isgn't quite the same as clocking the mooring lines in different directions,
# which is what we want to do, but that requires multiple input files and solutions
Fh = np.zeros((NLINES_MAX,3))
mymap.displace_vessel(0, 0, 0, 0, heel, 0)
mymap.update_states(0.0, 0)
for k in range(ntotal):
Fh[k][0], Fh[k][1], Fh[k][2] = mymap.get_fairlead_force_3d(k)
outputs['operational_heel_restoring_force'] = Fh
# Get angles by which to find the weakest line
dangle = 2.0
angles = np.deg2rad( np.arange(0.0, 360.0, dangle) )
nangles = len(angles)
# Get restoring force at weakest line at maximum allowable offset
# Will global minimum always be along mooring angle?
max_tension = 0.0
max_angle = None
min_angle = None
F_max_tension = None
F_min = np.inf
T = np.zeros((NLINES_MAX,))
F = np.zeros((NLINES_MAX,))
# Loop around all angles to find weakest point
for a in angles:
# Unit vector and offset in x-y components
idir = np.array([np.cos(a), np.sin(a)])
surge = offset * idir[0]
sway = offset * idir[1]
# Get restoring force of offset at this angle
mymap.displace_vessel(surge, sway, 0, 0, 0, 0) # 0s for z, angles
mymap.update_states(0.0, 0)
for k in range(ntotal):
# Force in x-y-z coordinates
fx,fy,fz = mymap.get_fairlead_force_3d(k)
T[k] = np.sqrt(fx*fx + fy*fy + fz*fz)
# Total restoring force
F[k] = np.dot([fx, fy], idir)
# Check if this is the weakest direction (highest tension)
tempMax = T.max()
if tempMax > max_tension:
max_tension = tempMax
F_max_tension = F.sum()
max_angle = a
if F.sum() < F_min:
F_min = F.sum()
min_angle = a
# Store the weakest restoring force when the vessel is offset the maximum amount
outputs['max_offset_restoring_force'] = F_min
# Check for good convergence
if (plotMat[0,-1,-1] + fairleadDepth) > 1.0:
outputs['axial_unity'] = 1e30
else:
outputs['axial_unity'] = gamma * max_tension / self.min_break_load
mymap.end()
def compute_cost(self, inputs, discrete_inputs, outputs):
"""Computes cost, based on mass scaling, of mooring system.
INPUTS:
----------
inputs : dictionary of input parameters
outputs : dictionary of output parameters
OUTPUTS : none (mooring_cost/mass unknown dictionary values set)
"""
# Unpack variables
rhoWater = inputs['water_density']
L_mooring = inputs['mooring_line_length']
anchorType = discrete_inputs['anchor_type']
costFact = inputs['mooring_cost_factor']
n_connect = int(inputs['number_of_mooring_connections'])
n_lines = int(inputs['mooring_lines_per_connection'])
ntotal = n_connect * n_lines
# Cost of anchors
if type(anchorType) == type(''): anchorType = Anchor[anchorType.upper()]
if anchorType == Anchor['DRAGEMBEDMENT']:
anchor_rate = 1e-3 * self.min_break_load / gravity / 20*2000
elif anchorType == Anchor['SUCTIONPILE']:
anchor_rate = 150000.* np.sqrt(1e-3*self.min_break_load/gravity/1250.)
else:
raise ValueError('Anchor Type must be DRAGEMBEDMENT or SUCTIONPILE')
anchor_total = anchor_rate*ntotal
# Cost of all of the mooring lines
legs_total = ntotal * self.cost_per_length * L_mooring
# Total summations
outputs['anchor_cost'] = anchor_total
outputs['mooring_cost'] = costFact*(legs_total + anchor_total)
outputs['mooring_mass'] = self.wet_mass_per_length*L_mooring*ntotal
outputs['number_of_mooring_lines'] = ntotal
| [
"numpy.mean",
"numpy.eye",
"numpy.trapz",
"numpy.sqrt",
"wisdem.pymap.pyMAP",
"numpy.array",
"wisdem.commonse.Enum",
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"numpy.dot",
"numpy.outer",
"numpy.sin",
"numpy.deg2rad",
"numpy.gradient",
"numpy.arange"
] | [((231, 264), 'wisdem.commonse.Enum', 'Enum', (['"""DRAGEMBEDMENT SUCTIONPILE"""'], {}), "('DRAGEMBEDMENT SUCTIONPILE')\n", (235, 264), False, 'from wisdem.commonse import gravity, Enum\n'), ((19924, 19931), 'wisdem.pymap.pyMAP', 'pyMAP', ([], {}), '()\n', (19929, 19931), False, 'from wisdem.pymap import pyMAP\n'), ((20393, 20404), 'numpy.array', 'np.array', (['K'], {}), '(K)\n', (20401, 20404), True, 'import numpy as np\n'), ((20587, 20612), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX, 3)'], {}), '((NLINES_MAX, 3))\n', (20595, 20612), True, 'import numpy as np\n'), ((20633, 20669), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX, NPTS_PLOT, 3)'], {}), '((NLINES_MAX, NPTS_PLOT, 3))\n', (20641, 20669), True, 'import numpy as np\n'), ((20714, 20744), 'numpy.zeros', 'np.zeros', (['(ntotal, nptsMOI, 3)'], {}), '((ntotal, nptsMOI, 3))\n', (20722, 20744), True, 'import numpy as np\n'), ((21719, 21755), 'numpy.gradient', 'np.gradient', (['xyzpts[:, :, 0]'], {'axis': '(1)'}), '(xyzpts[:, :, 0], axis=1)\n', (21730, 21755), True, 'import numpy as np\n'), ((21774, 21810), 'numpy.gradient', 'np.gradient', (['xyzpts[:, :, 1]'], {'axis': '(1)'}), '(xyzpts[:, :, 1], axis=1)\n', (21785, 21810), True, 'import numpy as np\n'), ((21829, 21865), 'numpy.gradient', 'np.gradient', (['xyzpts[:, :, 2]'], {'axis': '(1)'}), '(xyzpts[:, :, 2], axis=1)\n', (21840, 21865), True, 'import numpy as np\n'), ((21884, 21941), 'numpy.sqrt', 'np.sqrt', (['(xyzpts_dx ** 2 + xyzpts_dy ** 2 + xyzpts_dz ** 2)'], {}), '(xyzpts_dx ** 2 + xyzpts_dy ** 2 + xyzpts_dz ** 2)\n', (21891, 21941), True, 'import numpy as np\n'), ((22130, 22166), 'numpy.array', 'np.array', (['[0.0, 0.0, -fairleadDepth]'], {}), '([0.0, 0.0, -fairleadDepth])\n', (22138, 22166), True, 'import numpy as np\n'), ((22180, 22210), 'numpy.zeros', 'np.zeros', (['(ntotal, nptsMOI, 6)'], {}), '((ntotal, nptsMOI, 6))\n', (22188, 22210), True, 'import numpy as np\n'), ((23130, 23155), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX, 3)'], {}), '((NLINES_MAX, 3))\n', (23138, 23155), True, 'import numpy as np\n'), ((23870, 23893), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX,)'], {}), '((NLINES_MAX,))\n', (23878, 23893), True, 'import numpy as np\n'), ((23906, 23929), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX,)'], {}), '((NLINES_MAX,))\n', (23914, 23929), True, 'import numpy as np\n'), ((16633, 16667), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', '(n_connect + 1)'], {}), '(0, 360, n_connect + 1)\n', (16644, 16667), True, 'import numpy as np\n'), ((18145, 18160), 'numpy.mean', 'np.mean', (['angles'], {}), '(angles)\n', (18152, 18160), True, 'import numpy as np\n'), ((22442, 22492), 'numpy.trapz', 'np.trapz', (['R'], {'x': 'xyzpts_ds[:, :, np.newaxis]', 'axis': '(1)'}), '(R, x=xyzpts_ds[:, :, np.newaxis], axis=1)\n', (22450, 22492), True, 'import numpy as np\n'), ((23514, 23543), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', 'dangle'], {}), '(0.0, 360.0, dangle)\n', (23523, 23543), True, 'import numpy as np\n'), ((3050, 3061), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (3058, 3061), True, 'import numpy as np\n'), ((3350, 3366), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (3358, 3366), True, 'import numpy as np\n'), ((3611, 3636), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX, 3)'], {}), '((NLINES_MAX, 3))\n', (3619, 3636), True, 'import numpy as np\n'), ((3889, 3914), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX, 3)'], {}), '((NLINES_MAX, 3))\n', (3897, 3914), True, 'import numpy as np\n'), ((4036, 4072), 'numpy.zeros', 'np.zeros', (['(NLINES_MAX, NPTS_PLOT, 3)'], {}), '((NLINES_MAX, NPTS_PLOT, 3))\n', (4044, 4072), True, 'import numpy as np\n'), ((18079, 18116), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(ntotal + 1)'], {}), '(0, 2 * np.pi, ntotal + 1)\n', (18090, 18116), True, 'import numpy as np\n'), ((18194, 18208), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (18200, 18208), True, 'import numpy as np\n'), ((18244, 18258), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (18250, 18258), True, 'import numpy as np\n'), ((21387, 21438), 'numpy.linspace', 'np.linspace', (['(-fairleadDepth)', '(-waterDepth)', 'NPTS_PLOT'], {}), '(-fairleadDepth, -waterDepth, NPTS_PLOT)\n', (21398, 21438), True, 'import numpy as np\n'), ((21472, 21521), 'numpy.linspace', 'np.linspace', (['(-fairleadDepth)', '(-waterDepth)', 'nptsMOI'], {}), '(-fairleadDepth, -waterDepth, nptsMOI)\n', (21483, 21521), True, 'import numpy as np\n'), ((24535, 24571), 'numpy.sqrt', 'np.sqrt', (['(fx * fx + fy * fy + fz * fz)'], {}), '(fx * fx + fy * fy + fz * fz)\n', (24542, 24571), True, 'import numpy as np\n'), ((24633, 24655), 'numpy.dot', 'np.dot', (['[fx, fy]', 'idir'], {}), '([fx, fy], idir)\n', (24639, 24655), True, 'import numpy as np\n'), ((24095, 24104), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (24101, 24104), True, 'import numpy as np\n'), ((24106, 24115), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (24112, 24115), True, 'import numpy as np\n'), ((26554, 26609), 'numpy.sqrt', 'np.sqrt', (['(0.001 * self.min_break_load / gravity / 1250.0)'], {}), '(0.001 * self.min_break_load / gravity / 1250.0)\n', (26561, 26609), True, 'import numpy as np\n'), ((22385, 22399), 'numpy.outer', 'np.outer', (['r', 'r'], {}), '(r, r)\n', (22393, 22399), True, 'import numpy as np\n'), ((9914, 9934), 'numpy.deg2rad', 'np.deg2rad', (['max_heel'], {}), '(max_heel)\n', (9924, 9934), True, 'import numpy as np\n'), ((22361, 22373), 'numpy.dot', 'np.dot', (['r', 'r'], {}), '(r, r)\n', (22367, 22373), True, 'import numpy as np\n'), ((22373, 22382), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (22379, 22382), True, 'import numpy as np\n')] |
import json
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import csv
import time
import copy
import os
from datetime import datetime
import error_metrics
global gld_num
gld_num = '1'
os.chdir('/home/ankit/PFO-ADC-DER-Testbed/ADC-DER-Testbed/testbed/post_process')
# discard_time = 3600*4
## loading cosim_manager data
lp = open('./cosim_data.json').read()
cosim_data = json.loads(lp)
## Appending all cosim data with one more entry
for key, value in cosim_data.items():
for k, v in value.items():
if k == 'Timestamp':
# v.append(v[-1]+v[-1]-v[-2]) # adding one more timestamp
v.append(v[-1] + v[0])
else:
v.append(v[-1]) # repeating the last value again
cosim_data[key][k] = v
cosim_time = cosim_data[list(cosim_data)[0]]['Timestamp']
cosim_data['time'] = np.array([int(i) for i in cosim_time])
# create mapping of each node to its ADC
adc_nodes_map=[]
adc_file = "./../../../GLD/initial_scenario/ADC_Location/ADC_Placement_by_Voltage_Drop.csv"
with open(adc_file, mode='r') as csv_file:
for i in range(1):
next(csv_file)
csv_reader = csv.reader(csv_file)
for row in csv_reader:
adc_nodes_map.append([row[0], row[-1]])
adc_nodes_map = np.array(adc_nodes_map)
#function to return adc name of the input node
def find_adc(node, adc_nodes_map=adc_nodes_map):
ind = np.where(adc_nodes_map[:,0]==node)[0][0]
adc_name = 'M' + gld_num + '_ADC' + adc_nodes_map[ind,1]
return adc_name
# Loading gld_data.json
lp = open('GLD_' + gld_num + '_data.json').read()
gld_data = json.loads(lp)
# creating a dict to map each adc to the indexes of devices in gld_data for each der type
# adc['der']['adc name']=[indexes in the gld data]
# t=time.time()
# adc_ind = {}
# der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']]
# for der in der_type:
# adc_ind[der[0]] = {}
# obj = gld_data[der[0]][der[1]]['object_name']
# for a in obj:
# b = a.split('_')[-2][1:]
# # if 'l102_tm' in a:
# if find_adc(b) not in adc_ind[der[0]]:
# adc_ind[der[0]][find_adc(b)] = []
# adc_ind[der[0]][find_adc(b)].append(obj.index(a))
# print('elapsed time is ',time.time()-t)
# creating a dict to map each adc to the indexes of devices in gld_data for each der type
# adc_ind['adc name']['der']=[indexes in the gld data]
t=time.time()
adc_ind = {}
der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']]
for der in der_type:
obj = gld_data[der[0]][der[1]]['object_name']
for a in obj:
b = a.split('_')[-2][1:]
# if 'l102_tm' in a:
if find_adc(b) == 'M1_ADCNONE':
continue
if find_adc(b) not in adc_ind:
adc_ind[find_adc(b)] = {}
if der[0] not in adc_ind[find_adc(b)]:
adc_ind[find_adc(b)][der[0]]=[]
adc_ind[find_adc(b)][der[0]].append(obj.index(a))
# print('elapsed time is ',time.time()-t)
#Voltages
voltages = np.array(gld_data['hvac']['voltages']['values']).astype(np.cfloat)
# Actuation Signals
#hrs = gld_data['battInv']['P_Out']['time']
battInv_Pout = np.array(gld_data['battInv']['P_Out']['values']).astype(np.float)
battInv_Qout = np.array(gld_data['battInv']['Q_Out']['values']).astype(np.float)
solarInv_Pout = np.array(gld_data['solarInv']['P_Out']['values']).astype(np.float)
solarInv_Qout = np.array(gld_data['solarInv']['Q_Out']['values']).astype(np.float)
hvac_seth = np.array(gld_data['hvac']['heating_setpoint']['values']).astype(np.float)
hvac_setc = np.array(gld_data['hvac']['cooling_setpoint']['values']).astype(np.float)
hvac_cooling_demand = (np.array(gld_data['hvac']['cooling_demand']['values'])).astype(np.float)
hvac_fan_power = (np.array(gld_data['hvac']['fan_design_power']['values'])).astype(np.float)/1000
hvac_rating = hvac_cooling_demand+hvac_fan_power
hvac_c_thermal_capacity = (np.array(gld_data['hvac']['design_cooling_capacity']['values'])).astype(np.float)
hvac_c_cop = (np.array(gld_data['hvac']['cooling_COP']['values'])).astype(np.float)
hvac_rating1 = hvac_c_thermal_capacity/12000/hvac_c_cop*3.5168
wh_tanks = np.array(gld_data['wh']['tank_setpoint']['values']).astype(np.float)
hvac_c_status = np.array(gld_data['hvac']['cooling_status']['values']).astype(np.float)
wh_rating = np.array(gld_data['wh']['heating_element_capacity']['values']).astype(np.float)
battInv_rated = (np.array(gld_data['battInv']['rated_power']['values'])).astype(np.float)
batt_rated = (np.array(gld_data['batt']['rated_power']['values'])).astype(np.float)
solar_rated = (np.array(gld_data['solar']['rated_power']['values'])).astype(np.float)
# Device Power Outputs
battInv_power = (np.array(gld_data['battInv']['power']['values'])).astype(np.cfloat)
solarInv_power = (np.array(gld_data['solarInv']['power']['values'])).astype(np.cfloat)
hvac_power = (np.array(gld_data['hvac']['power']['values'])).astype(np.cfloat)
wh_power = (np.array(gld_data['wh']['power']['values'])).astype(np.cfloat)
solar_VA = (np.array(gld_data['solar']['VA']['values'])).astype(np.cfloat)
#aggregating device outputs per adc in adc_agg dict
# adc_agg['adc name']['der type']=sum of all devices of der type
t=time.time()
adc_agg = copy.deepcopy(adc_ind)
adc_Prating = {}
num_der = {}
total_num_der = 0
for adc_num in adc_ind:
adc_Prating[adc_num] = {}
if "battInv" in adc_agg[adc_num]:
adc_agg[adc_num]["battInv"] = np.sum(battInv_power[:, adc_ind[adc_num]['battInv']], 1)/1000
adc_agg[adc_num]["batt_Pout"] = np.sum(battInv_Pout[:, adc_ind[adc_num]['battInv']], 1) / 1000
adc_agg[adc_num]["batt_Qout"] = np.sum(battInv_Qout[:, adc_ind[adc_num]['battInv']], 1) / 1000
adc_agg[adc_num]["total"] = adc_agg[adc_num]["battInv"]
adc_Prating[adc_num]["battInv"] = np.sum(battInv_rated[0, adc_ind[adc_num]['battInv']])/1000
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["battInv"]
if "solarInv" in adc_agg[adc_num]:
adc_agg[adc_num]["solarInv"] = np.sum(solarInv_power[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["solar_Pout"] = np.sum(solarInv_Pout[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["solar_Qout"] = np.sum(solarInv_Qout[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["solarInv"]
adc_Prating[adc_num]["solarInv"] = np.sum(solar_rated[0, adc_ind[adc_num]['solarInv']]) / 1000
adc_Prating[adc_num]["solarVA"] = np.sum(solar_VA[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["solarInv"]
if "hvac" in adc_agg[adc_num]:
adc_agg[adc_num]["hvac"] = np.sum(hvac_power[:, adc_ind[adc_num]['hvac']], 1)
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["hvac"]
adc_Prating[adc_num]["hvac"] = np.sum(hvac_rating[0, adc_ind[adc_num]['hvac']])
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["hvac"]
if "wh" in adc_agg[adc_num]:
adc_agg[adc_num]["wh"] = np.sum(wh_power[:, adc_ind[adc_num]['wh']], 1)
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["wh"]
adc_Prating[adc_num]["wh"] = np.sum(wh_rating[0, adc_ind[adc_num]['wh']])
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["wh"]
error_metrics.calculate(adc_agg, adc_Prating, cosim_data)
#Plot aggregate devices output at given adc for each der type
time_format = '%H:%M:%S'
time_stamp = [t.split(' ')[1] for t in gld_data['wh']['power']['time']]
time_h = [datetime.strptime(t, '%H:%M:%S') for t in time_stamp]
hrs = [int((i-time_h[0]).total_seconds()) for i in time_h]
# start_time = 3600*4
adc_num = 'M1_ADC18'
# total_rating = sum(wh_rating[0, adc_ind[adc_num]['wh']]) + sum(hvac_rating[0, adc_ind[adc_num]['hvac']]) + sum(
# battInv_rated[0, adc_ind[adc_num]['battInv']]) / 1000 + sum(solar_rated[0, adc_ind[adc_num]['solarInv']]) / 1000
fig1, ax1 = plt.subplots(2, 2, sharex='col')
# ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['batt_Pout']), label='Battery', color='C0')
# ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['solar_Pout']), label='Solar', color='C1')
ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout']), label='Solar+Battery', color='C2')
ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['wh']), label='WH', color='C3')
ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['hvac']), label='HVAC', color='C4')
#
# ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_BATT'])/1,'k', linestyle='--', color='C0', where='post', label='battery set point')
# ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_PV'])/1,'k', linestyle='--', color='C1', where='post', label='pv set point')
ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_PV']) + np.array(cosim_data[adc_num]['Popt_BATT']),'k', linestyle='--', color='C2', where='post', label='PV+Batt set point')
ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_WH'])/1,'k', linestyle='--', color='C3', where='post', label='WH set point')
ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_HVAC'])/1,'k', linestyle='--', color='C4', where='post', label='AC set point')
ax1[0,0].set_ylabel("kW")
ax1[0,0].set_title("Aggregated kW at ADC "+adc_num+" by DER")
ax1[0,0].legend(loc='best')
# plt.xlim(left=start_time)
# ax1[0,1].plot(hrs, np.real(adc_agg[adc_num]['batt_Qout']), label='Battery')
# ax1[0,1].plot(hrs, np.real(adc_agg[adc_num]['solar_Qout']), label='Solar')
ax1[0,1].plot(hrs, np.real(adc_agg[adc_num]['batt_Qout'] + adc_agg[adc_num]['solar_Qout']), label='Solar+Battery', color='C2')
ax1[0,1].plot(hrs, np.imag(adc_agg[adc_num]['wh']), label='WH', color='C3')
ax1[0,1].plot(hrs, np.imag(adc_agg[adc_num]['hvac']), label='HVAC', color='C4')
# ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_BATT'])/1,'k', linestyle='--', color='C0', where='post', label='battery set point')
# ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_PV'])/1,'k', linestyle='--', color='C1', where='post', label='pv set point')
ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_PV']) + np.array(cosim_data[adc_num]['Qopt_BATT']),'k', linestyle='--', color='C2', where='post', label='PV+Batt set point')
ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_WH'])/1,'k', linestyle='--', color='C3', where='post', label='WH set point')
ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_HVAC'])/1,'k', linestyle='--', color='C4', where='post', label='AC set point')
ax1[0,1].set_ylabel("kVar")
ax1[0,1].set_title("Aggregated kVar at ADC "+adc_num+" by DER")
ax1[0,1].legend(loc='best')
# plt.xlim(left=start_time)
ax1[1,0].plot(hrs, np.real(adc_agg[adc_num]['total']), label='ADC output')
ax1[1,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt']),'k', linestyle='--', where='post', label='ADC P*')
ax1[1,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt'])-np.array(cosim_data[adc_num][' Popt_unserved']),'r', linestyle='--', where='post', label='ADC servable setpoint')
ax1[1,0].set_ylabel("kW")
ax1[1,0].set_title("Aggregated P at ADC "+adc_num)
ax1[1,0].legend(loc='best')
# plt.xlim(left=start_time)
ax1[1,1].plot(hrs, np.imag(adc_agg[adc_num]['total']), label='ADC output')
ax1[1,1].step(cosim_data['time'],np.array(cosim_data[adc_num]['Qopt'])/1,'k--', linestyle='--', where='post',label='ADC set point')
ax1[1,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt'])-np.array(cosim_data[adc_num][' Qopt_unserved']),'r', linestyle='--', where='post', label='ADC servable setpoint')
ax1[1,1].set_ylabel("kVar")
ax1[1,1].set_title("Aggregated Q at ADC "+adc_num)
ax1[1,1].legend(loc='best')
# plt.xlim(left=start_time)
plt.show()
cool_on_ind = np.nonzero(hvac_c_status[-1,:])[0]
# fig2, ax2 = plt.subplots(2, 2, sharex='col')
# # ax2.plot(hrs, np.abs(voltages[:,adc_ind[adc_num]['hvac']])/120
# ax2[1,0].plot(np.abs(voltages[-1,cool_on_ind])/120)
# ax2[1,0].plot(np.ones(len(cool_on_ind), int), 'r--')
# ax2[1,0].set_title("Voltages at all residential meters")
# ax2[1,0].set_ylabel("pu")
# ax2[1,0].set_xlabel("individual devices")
#
# # ax3.plot(hvac_rating[-1,cool_on_ind],linestyle='None', marker = 'o', markersize=2, label='rating')
# # ax3.plot(np.real(hvac_power[-1,cool_on_ind]),linestyle='None', marker = 'o', markersize=2, label='Output')
# ax2[0,0].plot(hvac_rating[-1,cool_on_ind]- np.real(hvac_power[-1,cool_on_ind]), label='rating-output')
# ax2[0,0].set_title("Difference between HVAC Rating and Output (Rating - Output)")
# ax2[0,0].set_ylabel("kW")
# ax2[0,0].set_xlabel("individual devices")
# ax2[0,0].legend(loc='best')
#
# ax2[0,1].plot((hvac_rating[-1,cool_on_ind]- np.real(hvac_power[-1,cool_on_ind]))/hvac_rating[-1,cool_on_ind] *100, label='(rating-output)/rating*100')
# ax2[0,1].set_title("% Error between HVAC Rating and Output (Rating - Output)/Rating")
# ax2[0,1].set_ylabel("%")
# ax2[0,1].set_xlabel("individual devices")
# ax2[0,1].legend(loc='best')
#
# ax2[1,1].plot(np.imag(hvac_power[-1,cool_on_ind])/np.real(hvac_power[-1,cool_on_ind]))
# ax2[1,1].set_title("Q/P")
# ax2[1,1].set_ylabel("ratio")
# ax2[1,1].set_xlabel("individual devices")
# ax2[1,1].legend(loc='best')
# # plt.show()
# # #plot
# fig3, ax3 = plt.subplots()
# plt.figure(1)
# pv_ind = 10
# plt.plot(hrs,solarInv_Pout[:,pv_ind],label='P_set', color='C1')
# plt.plot(hrs,solarInv_Qout[:,pv_ind],label='Q_set', color='C2')
# plt.plot(hrs,np.abs(solarInv_Pout[:,pv_ind] + 1j*solarInv_Qout[:,pv_ind]),label='VA_set', color='C3')
# plt.plot(hrs,np.real(solarInv_power[:,pv_ind]),'--', label='P_actual', color='C1')
# plt.plot(hrs,np.imag(solarInv_power[:,pv_ind]),'--', label='Q_actual', color='C2')
# plt.plot(hrs,np.abs(solarInv_power[:,pv_ind]),'k--',label='VA_actual', color='C3')
# plt.plot(hrs, np.ones(len(hrs))*solar_rated[0,pv_ind], '--',label='solar rating', color='k')
# plt.legend(loc='best')
#
# fig4, ax4 = plt.subplots()
# time_ind = 400
# plt.plot(np.arange(168),solarInv_Pout[time_ind,:],label='P_set', color='C1')
# plt.plot(np.arange(168),solarInv_Qout[time_ind,:],label='Q_set', color='C2')
# plt.plot(np.arange(168),np.abs(solarInv_Pout[time_ind,:] + 1j*solarInv_Qout[time_ind,:]),label='VA_set', color='C3')
# plt.plot(np.arange(168),np.real(solarInv_power[time_ind,:]),'--', label='P_actual', color='C1')
# plt.plot(np.arange(168),np.imag(solarInv_power[time_ind,:]),'--', label='Q_actual', color='C2')
# plt.plot(np.arange(168), np.abs(solarInv_power[time_ind,:]),'k--',label='VA_actual', color='C3')
# plt.plot(np.arange(168), solar_rated[0],'--',label='solar rating', color='k')
# plt.plot(np.arange(168), np.abs(solar_VA[time_ind,:]),'--',label='PV_max')
# plt.legend(loc='best')
#
# #
# plt.figure(8)
# # plt.plot(hrs, battInv_Pout[:,3], label='Pout', color='C1')
# # plt.plot(hrs, battInv_Qout[:,3], label='Qout', color='C2')
# # plt.plot(hrs,np.abs(battInv_Pout[:,3] + 1j*battInv_Qout[:,3]),label='VA_set', color='C3')
# # plt.plot(hrs, np.real(battInv_power[:,3]),'--', label='P_actual', color='C1')
# # plt.plot(hrs, np.imag(battInv_power[:,3]),'--', label='Q_actual', color='C2')
# plt.plot(np.arange(505), batt_rated[0],'--',label='battery rating')
# plt.plot(np.arange(505), battInv_rated[0],'--',label='battery inverter rating')
# plt.plot(np.arange(505), battInv_Pout[400,:],'--',label='battery P_out')
# plt.plot(np.arange(505), np.real(battInv_power[400,:]),'--', label='P_actual')
# # plt.plot(hrs, np.abs(battInv_power[:,3]),'k--',label='VA_actual', color='C3')
# plt.legend(loc='best')
#
# plt.figure(9)
# out_temp = (np.array(gld_data['hvac']['outside_temperatrue']['values'])).astype(np.float)
# inside_temp = (np.array(gld_data['hvac']['air_temperature']['values'])).astype(np.float)
# plt.plot(hrs, inside_temp[:,adc_ind[adc_num ]['hvac']])
# plt.plot(hrs, hvac_setc[:,adc_ind[adc_num]['hvac']])
# # plt.plot(out_temp)
# *** Plotting bid curves *****
# fig2, ax2
# ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_PV']) + np.array(cosim_data[adc_num]['Popt_BATT']),'k', linestyle='--', color='C2', where='post', label='PV+Batt set point')
#----------------------------------------------------
## ------ Validation of AC Controller ---------------
#----------------------------------------------------
# fig2, ax2 = plt.subplots(2,2, sharex='col')
# out_temp = (np.array(gld_data['hvac']['outside_temperatrue']['values'])).astype(np.float)
# inside_temp = (np.array(gld_data['hvac']['air_temperature']['values'])).astype(np.float)
#
# ax2[0,0].plot(hrs, np.real(adc_agg[adc_num]['hvac']), label='HVAC', color='C4')
# ax2[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_HVAC'])/1,'k', linestyle='--', color='C4', where='post', label='AC set point')
# ax2[0,0].set_ylabel("kW")
# ax2[0,0].set_title("Aggregated kW at ADC "+adc_num+" by DER")
# ax2[0,0].legend(loc='best')
#
# ax2[1,1].plot(hrs, inside_temp[:,adc_ind[adc_num ]['hvac']])
# ax2[1,1].set_title("Inside Temperature")
# ax2[1,1].set_ylabel("degree F")
# ax2[1,1].set_xlabel("Time (Seconds")
#
# ax2[0,1].plot(hrs, hvac_setc[:,adc_ind[adc_num]['hvac']])
# ax2[0,1].set_title("Cooling setpoint")
# ax2[0,1].set_ylabel("degree F")
# ax2[0,1].set_xlabel("Time (Seconds")
# # ax2[0].plot(out_temp)
# # ax2[1].plot(hrs, hvac_c_status[:,adc_ind[adc_num]['hvac']])
# ax2[1,0].plot(hrs,np.count_nonzero(hvac_c_status[:,adc_ind[adc_num]['hvac']], 1))
# ax2[1,0].set_title("Total number of ON AC-devices")
# ax2[1,0].set_ylabel("#")
# ax2[1,0].set_xlabel("Time (Seconds")
#
plt.show()
## ***** Feasibility check plot for solar PV and Battery ***********
# plt.rc('text', usetex=True)
fig3, ax3 = plt.subplots(2, 2, sharex='col')
P_max_av = np.sqrt(np.square(np.ones(len(cosim_data['time']))*adc_Prating[adc_num]['solarInv'])- np.square(abs(np.array(cosim_data[adc_num]['Qopt_PV']))))
ax3[0,0].plot(hrs, np.ones(len(hrs))*(adc_Prating[adc_num]['solarInv']), label='P_inv_max', color='C2')
ax3[0,0].plot(hrs, np.real(adc_Prating[adc_num]['solarVA']), label='PV_solar_max', color='C3')
ax3[0,0].step((cosim_data['time']), P_max_av, label='P_avail@Q*', color='C4', where='post')
ax3[0,0].step((cosim_data['time']),abs(np.array(cosim_data[adc_num]['Popt_PV'])),'k', linestyle='--', color='C1', where='post', label='P*_PV')
ax3[0,0].set_title("Solar P(kW) set-point feasibility for ADC "+adc_num)
ax3[0,0].set_ylabel("kW")
ax3[0,0].legend(loc='best')
# plt.xlim(left=start_time)
Q_min_av = np.sqrt(np.square(np.ones(len(hrs))*adc_Prating[adc_num]['solarInv'])- np.square(np.real(adc_Prating[adc_num]['solarVA'])))
Q_max_av = np.sqrt(np.square(np.ones(len(cosim_data['time']))*adc_Prating[adc_num]['solarInv'])- np.square(abs(np.array(cosim_data[adc_num]['Popt_PV']))))
ax3[0,1].plot(hrs, np.ones(len(hrs))*(adc_Prating[adc_num]['solarInv']), label='Q_inv_max', color='C2')
ax3[0,1].step((cosim_data['time']), Q_max_av, label='Q_avail@P*', color='C4', where='post')
ax3[0,1].step((cosim_data['time']),abs(np.array(cosim_data[adc_num]['Qopt_PV'])),'k', linestyle='--', color='C1', where='post', label='Q*_PV')
ax3[0,1].set_title("Solar Q(kVar) set-point feasibility for ADC "+adc_num)
ax3[0,1].set_ylabel("kVar")
ax3[0,1].legend(loc='best')
# plt.xlim(left=start_time)
temp = (np.square(np.ones(len(cosim_data['time']))*adc_Prating[adc_num]['battInv'])- np.square(abs(np.array(cosim_data[adc_num]['Qopt_BATT']))))
for ind in range(len(temp)):
if np.abs(temp[ind]) < 1e-8:
temp[ind] = 0
Pbatt_max_av = np.sqrt(temp)
ax3[1,0].plot(hrs, np.ones(len(hrs))*(adc_Prating[adc_num]['battInv']), label='P_inv_max', color='C2')
ax3[1,0].step((cosim_data['time']), Pbatt_max_av, label='P_avail@Q*', color='C4', where='post')
ax3[1,0].step((cosim_data['time']),abs(np.array(cosim_data[adc_num]['Popt_BATT'])),'k', linestyle='--', color='C1', where='post', label='P*_BATT')
ax3[1,0].set_title("Battery P(kW) set-point feasibility for ADC "+adc_num)
ax3[1,0].set_ylabel("kW")
ax3[1,0].set_xlabel("Time (sec)")
ax3[1,0].legend(loc='best')
# plt.xlim(left=start_time)
# Q_min_av = np.sqrt(np.square(np.ones(len(hrs))*adc_Prating[adc_num]['solarInv'])- np.square(np.real(adc_Prating[adc_num]['solarVA'])))
Qbatt_max_av = np.sqrt(np.square(np.ones(len(cosim_data['time']))*adc_Prating[adc_num]['battInv'])- np.square(abs(np.array(cosim_data[adc_num]['Popt_BATT']))))
ax3[1,1].plot(hrs, np.ones(len(hrs))*(adc_Prating[adc_num]['battInv']), label='Q_inv_max', color='C2')
ax3[1,1].step((cosim_data['time']), Qbatt_max_av, label='Q_avail@P*', color='C4', where='post')
ax3[1,1].step((cosim_data['time']),abs(np.array(cosim_data[adc_num]['Qopt_BATT'])),'k', linestyle='--', color='C1', where='post', label='Q*_BATT')
ax3[1,1].set_title("Battery Q(kVar) set-point feasibility for ADC "+adc_num)
ax3[1,1].set_ylabel("kVar")
ax3[1,1].set_xlabel("Time (sec)")
ax3[1,1].legend(loc='best')
# plt.xlim(left=start_time)
#TODO: plot battery state of charge
## ***** Feasibility check plot for solar PV and Battery ***********
## ***** Validation for Solar PV and Battery Control *************
fig4, ax4 = plt.subplots(2, 1)
ax4[0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_PV']) + np.array(cosim_data[adc_num]['Popt_BATT']),'k', color='C1', where='post', label='P*_PV_Batt')
ax4[0].plot(hrs, np.real(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout']), label='P_set_PV_Batt', color='C2', linestyle='--')
ax4[0].set_ylabel("kW")
ax4[0].set_title("Combined Performance of PV+Batt P at ADC "+adc_num)
ax4[0].legend(loc='best')
ax4[1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_PV']) + np.array(cosim_data[adc_num]['Qopt_BATT']),'k', color='C1', where='post', label='Q*_PV_Batt')
ax4[1].plot(hrs, np.real(adc_agg[adc_num]['batt_Qout'] + adc_agg[adc_num]['solar_Qout']), label='Q_set_PV_Batt', color='C2', linestyle='--')
ax4[1].set_ylabel("kVar")
ax4[1].set_title("Combined Performance of PV+Batt Q at ADC "+adc_num)
ax4[1].legend(loc='best')
fig5, ax5 = plt.subplots(2, 2, sharex='col')
ax5[0,0].plot(hrs, np.real(adc_agg[adc_num]['solar_Pout']), label='P_set_PV', color='C2')
ax5[0,0].plot(hrs, np.real(adc_agg[adc_num]['solarInv']), label='P_actual_PV', color='C6', linestyle='--')
ax5[0,0].set_ylabel("kW")
ax5[0,0].set_title("Solar P output at ADC "+adc_num)
ax5[0,0].legend(loc='best')
ax5[0,1].plot(hrs, np.real(adc_agg[adc_num]['solar_Qout']), label='Q_set_PV', color='C2')
ax5[0,1].plot(hrs, np.imag(adc_agg[adc_num]['solarInv']), label='Q_actual_PV', color='C6', linestyle='--' )
ax5[0,1].set_ylabel("kVar")
ax5[0,1].set_title("Solar Q output at ADC "+adc_num)
ax5[0,1].legend(loc='best')
ax5[1,0].plot(hrs, np.real(adc_agg[adc_num]['batt_Pout']), label='P_set_BATT', color='C2')
ax5[1,0].plot(hrs, np.real(adc_agg[adc_num]['battInv']), label='P_actual_PV', color='C6', linestyle='--')
ax5[1,0].set_ylabel("kW")
ax5[1,0].set_title("Battery P output at ADC "+adc_num)
ax5[1,0].legend(loc='best')
ax5[1,0].set_xlabel("Time (sec)")
ax5[1,1].plot(hrs, np.real(adc_agg[adc_num]['batt_Qout']), label='Q_set_BATT', color='C2')
ax5[1,1].plot(hrs, np.imag(adc_agg[adc_num]['battInv']), label='Q_actual_BATT', color='C6', linestyle='--')
ax5[1,1].set_ylabel("kVar")
ax5[1,1].set_title("Battery Q output at ADC "+adc_num)
ax5[1,1].legend(loc='best')
ax5[1,1].set_xlabel("Time (sec)")
plt.show() | [
"numpy.abs",
"json.loads",
"error_metrics.calculate",
"numpy.sqrt",
"numpy.imag",
"datetime.datetime.strptime",
"numpy.where",
"os.chdir",
"numpy.array",
"numpy.real",
"numpy.sum",
"csv.reader",
"copy.deepcopy",
"numpy.nonzero",
"time.time",
"matplotlib.pyplot.subplots",
"matplotlib.... | [((214, 299), 'os.chdir', 'os.chdir', (['"""/home/ankit/PFO-ADC-DER-Testbed/ADC-DER-Testbed/testbed/post_process"""'], {}), "('/home/ankit/PFO-ADC-DER-Testbed/ADC-DER-Testbed/testbed/post_process'\n )\n", (222, 299), False, 'import os\n'), ((400, 414), 'json.loads', 'json.loads', (['lp'], {}), '(lp)\n', (410, 414), False, 'import json\n'), ((1259, 1282), 'numpy.array', 'np.array', (['adc_nodes_map'], {}), '(adc_nodes_map)\n', (1267, 1282), True, 'import numpy as np\n'), ((1598, 1612), 'json.loads', 'json.loads', (['lp'], {}), '(lp)\n', (1608, 1612), False, 'import json\n'), ((2411, 2422), 'time.time', 'time.time', ([], {}), '()\n', (2420, 2422), False, 'import time\n'), ((5221, 5232), 'time.time', 'time.time', ([], {}), '()\n', (5230, 5232), False, 'import time\n'), ((5243, 5265), 'copy.deepcopy', 'copy.deepcopy', (['adc_ind'], {}), '(adc_ind)\n', (5256, 5265), False, 'import copy\n'), ((7489, 7546), 'error_metrics.calculate', 'error_metrics.calculate', (['adc_agg', 'adc_Prating', 'cosim_data'], {}), '(adc_agg, adc_Prating, cosim_data)\n', (7512, 7546), False, 'import error_metrics\n'), ((8118, 8150), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '"""col"""'}), "(2, 2, sharex='col')\n", (8130, 8150), True, 'import matplotlib.pyplot as plt\n'), ((11993, 12003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12001, 12003), True, 'import matplotlib.pyplot as plt\n'), ((17784, 17794), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17792, 17794), True, 'import matplotlib.pyplot as plt\n'), ((17907, 17939), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '"""col"""'}), "(2, 2, sharex='col')\n", (17919, 17939), True, 'import matplotlib.pyplot as plt\n'), ((19720, 19733), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (19727, 19733), True, 'import numpy as np\n'), ((21298, 21316), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (21310, 21316), True, 'import matplotlib.pyplot as plt\n'), ((22197, 22229), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '"""col"""'}), "(2, 2, sharex='col')\n", (22209, 22229), True, 'import matplotlib.pyplot as plt\n'), ((23529, 23539), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23537, 23539), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1167), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (1157, 1167), False, 'import csv\n'), ((7717, 7749), 'datetime.datetime.strptime', 'datetime.strptime', (['t', '"""%H:%M:%S"""'], {}), "(t, '%H:%M:%S')\n", (7734, 7749), False, 'from datetime import datetime\n'), ((8349, 8420), 'numpy.real', 'np.real', (["(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout'])"], {}), "(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout'])\n", (8356, 8420), True, 'import numpy as np\n'), ((8476, 8507), 'numpy.real', 'np.real', (["adc_agg[adc_num]['wh']"], {}), "(adc_agg[adc_num]['wh'])\n", (8483, 8507), True, 'import numpy as np\n'), ((8552, 8585), 'numpy.real', 'np.real', (["adc_agg[adc_num]['hvac']"], {}), "(adc_agg[adc_num]['hvac'])\n", (8559, 8585), True, 'import numpy as np\n'), ((9732, 9803), 'numpy.real', 'np.real', (["(adc_agg[adc_num]['batt_Qout'] + adc_agg[adc_num]['solar_Qout'])"], {}), "(adc_agg[adc_num]['batt_Qout'] + adc_agg[adc_num]['solar_Qout'])\n", (9739, 9803), True, 'import numpy as np\n'), ((9859, 9890), 'numpy.imag', 'np.imag', (["adc_agg[adc_num]['wh']"], {}), "(adc_agg[adc_num]['wh'])\n", (9866, 9890), True, 'import numpy as np\n'), ((9935, 9968), 'numpy.imag', 'np.imag', (["adc_agg[adc_num]['hvac']"], {}), "(adc_agg[adc_num]['hvac'])\n", (9942, 9968), True, 'import numpy as np\n'), ((10963, 10997), 'numpy.real', 'np.real', (["adc_agg[adc_num]['total']"], {}), "(adc_agg[adc_num]['total'])\n", (10970, 10997), True, 'import numpy as np\n'), ((11054, 11091), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt']"], {}), "(cosim_data[adc_num]['Popt'])\n", (11062, 11091), True, 'import numpy as np\n'), ((11483, 11517), 'numpy.imag', 'np.imag', (["adc_agg[adc_num]['total']"], {}), "(adc_agg[adc_num]['total'])\n", (11490, 11517), True, 'import numpy as np\n'), ((12018, 12050), 'numpy.nonzero', 'np.nonzero', (['hvac_c_status[-1, :]'], {}), '(hvac_c_status[-1, :])\n', (12028, 12050), True, 'import numpy as np\n'), ((18218, 18258), 'numpy.real', 'np.real', (["adc_Prating[adc_num]['solarVA']"], {}), "(adc_Prating[adc_num]['solarVA'])\n", (18225, 18258), True, 'import numpy as np\n'), ((21505, 21576), 'numpy.real', 'np.real', (["(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout'])"], {}), "(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout'])\n", (21512, 21576), True, 'import numpy as np\n'), ((21938, 22009), 'numpy.real', 'np.real', (["(adc_agg[adc_num]['batt_Qout'] + adc_agg[adc_num]['solar_Qout'])"], {}), "(adc_agg[adc_num]['batt_Qout'] + adc_agg[adc_num]['solar_Qout'])\n", (21945, 22009), True, 'import numpy as np\n'), ((22249, 22288), 'numpy.real', 'np.real', (["adc_agg[adc_num]['solar_Pout']"], {}), "(adc_agg[adc_num]['solar_Pout'])\n", (22256, 22288), True, 'import numpy as np\n'), ((22339, 22376), 'numpy.real', 'np.real', (["adc_agg[adc_num]['solarInv']"], {}), "(adc_agg[adc_num]['solarInv'])\n", (22346, 22376), True, 'import numpy as np\n'), ((22554, 22593), 'numpy.real', 'np.real', (["adc_agg[adc_num]['solar_Qout']"], {}), "(adc_agg[adc_num]['solar_Qout'])\n", (22561, 22593), True, 'import numpy as np\n'), ((22644, 22681), 'numpy.imag', 'np.imag', (["adc_agg[adc_num]['solarInv']"], {}), "(adc_agg[adc_num]['solarInv'])\n", (22651, 22681), True, 'import numpy as np\n'), ((22862, 22900), 'numpy.real', 'np.real', (["adc_agg[adc_num]['batt_Pout']"], {}), "(adc_agg[adc_num]['batt_Pout'])\n", (22869, 22900), True, 'import numpy as np\n'), ((22953, 22989), 'numpy.real', 'np.real', (["adc_agg[adc_num]['battInv']"], {}), "(adc_agg[adc_num]['battInv'])\n", (22960, 22989), True, 'import numpy as np\n'), ((23203, 23241), 'numpy.real', 'np.real', (["adc_agg[adc_num]['batt_Qout']"], {}), "(adc_agg[adc_num]['batt_Qout'])\n", (23210, 23241), True, 'import numpy as np\n'), ((23294, 23330), 'numpy.imag', 'np.imag', (["adc_agg[adc_num]['battInv']"], {}), "(adc_agg[adc_num]['battInv'])\n", (23301, 23330), True, 'import numpy as np\n'), ((3026, 3074), 'numpy.array', 'np.array', (["gld_data['hvac']['voltages']['values']"], {}), "(gld_data['hvac']['voltages']['values'])\n", (3034, 3074), True, 'import numpy as np\n'), ((3172, 3220), 'numpy.array', 'np.array', (["gld_data['battInv']['P_Out']['values']"], {}), "(gld_data['battInv']['P_Out']['values'])\n", (3180, 3220), True, 'import numpy as np\n'), ((3253, 3301), 'numpy.array', 'np.array', (["gld_data['battInv']['Q_Out']['values']"], {}), "(gld_data['battInv']['Q_Out']['values'])\n", (3261, 3301), True, 'import numpy as np\n'), ((3335, 3384), 'numpy.array', 'np.array', (["gld_data['solarInv']['P_Out']['values']"], {}), "(gld_data['solarInv']['P_Out']['values'])\n", (3343, 3384), True, 'import numpy as np\n'), ((3418, 3467), 'numpy.array', 'np.array', (["gld_data['solarInv']['Q_Out']['values']"], {}), "(gld_data['solarInv']['Q_Out']['values'])\n", (3426, 3467), True, 'import numpy as np\n'), ((3497, 3553), 'numpy.array', 'np.array', (["gld_data['hvac']['heating_setpoint']['values']"], {}), "(gld_data['hvac']['heating_setpoint']['values'])\n", (3505, 3553), True, 'import numpy as np\n'), ((3583, 3639), 'numpy.array', 'np.array', (["gld_data['hvac']['cooling_setpoint']['values']"], {}), "(gld_data['hvac']['cooling_setpoint']['values'])\n", (3591, 3639), True, 'import numpy as np\n'), ((3680, 3734), 'numpy.array', 'np.array', (["gld_data['hvac']['cooling_demand']['values']"], {}), "(gld_data['hvac']['cooling_demand']['values'])\n", (3688, 3734), True, 'import numpy as np\n'), ((3927, 3990), 'numpy.array', 'np.array', (["gld_data['hvac']['design_cooling_capacity']['values']"], {}), "(gld_data['hvac']['design_cooling_capacity']['values'])\n", (3935, 3990), True, 'import numpy as np\n'), ((4023, 4074), 'numpy.array', 'np.array', (["gld_data['hvac']['cooling_COP']['values']"], {}), "(gld_data['hvac']['cooling_COP']['values'])\n", (4031, 4074), True, 'import numpy as np\n'), ((4167, 4218), 'numpy.array', 'np.array', (["gld_data['wh']['tank_setpoint']['values']"], {}), "(gld_data['wh']['tank_setpoint']['values'])\n", (4175, 4218), True, 'import numpy as np\n'), ((4252, 4306), 'numpy.array', 'np.array', (["gld_data['hvac']['cooling_status']['values']"], {}), "(gld_data['hvac']['cooling_status']['values'])\n", (4260, 4306), True, 'import numpy as np\n'), ((4336, 4398), 'numpy.array', 'np.array', (["gld_data['wh']['heating_element_capacity']['values']"], {}), "(gld_data['wh']['heating_element_capacity']['values'])\n", (4344, 4398), True, 'import numpy as np\n'), ((4433, 4487), 'numpy.array', 'np.array', (["gld_data['battInv']['rated_power']['values']"], {}), "(gld_data['battInv']['rated_power']['values'])\n", (4441, 4487), True, 'import numpy as np\n'), ((4520, 4571), 'numpy.array', 'np.array', (["gld_data['batt']['rated_power']['values']"], {}), "(gld_data['batt']['rated_power']['values'])\n", (4528, 4571), True, 'import numpy as np\n'), ((4605, 4657), 'numpy.array', 'np.array', (["gld_data['solar']['rated_power']['values']"], {}), "(gld_data['solar']['rated_power']['values'])\n", (4613, 4657), True, 'import numpy as np\n'), ((4717, 4765), 'numpy.array', 'np.array', (["gld_data['battInv']['power']['values']"], {}), "(gld_data['battInv']['power']['values'])\n", (4725, 4765), True, 'import numpy as np\n'), ((4803, 4852), 'numpy.array', 'np.array', (["gld_data['solarInv']['power']['values']"], {}), "(gld_data['solarInv']['power']['values'])\n", (4811, 4852), True, 'import numpy as np\n'), ((4886, 4931), 'numpy.array', 'np.array', (["gld_data['hvac']['power']['values']"], {}), "(gld_data['hvac']['power']['values'])\n", (4894, 4931), True, 'import numpy as np\n'), ((4963, 5006), 'numpy.array', 'np.array', (["gld_data['wh']['power']['values']"], {}), "(gld_data['wh']['power']['values'])\n", (4971, 5006), True, 'import numpy as np\n'), ((5039, 5082), 'numpy.array', 'np.array', (["gld_data['solar']['VA']['values']"], {}), "(gld_data['solar']['VA']['values'])\n", (5047, 5082), True, 'import numpy as np\n'), ((6778, 6828), 'numpy.sum', 'np.sum', (["hvac_power[:, adc_ind[adc_num]['hvac']]", '(1)'], {}), "(hvac_power[:, adc_ind[adc_num]['hvac']], 1)\n", (6784, 6828), True, 'import numpy as np\n'), ((6957, 7005), 'numpy.sum', 'np.sum', (["hvac_rating[0, adc_ind[adc_num]['hvac']]"], {}), "(hvac_rating[0, adc_ind[adc_num]['hvac']])\n", (6963, 7005), True, 'import numpy as np\n'), ((7173, 7219), 'numpy.sum', 'np.sum', (["wh_power[:, adc_ind[adc_num]['wh']]", '(1)'], {}), "(wh_power[:, adc_ind[adc_num]['wh']], 1)\n", (7179, 7219), True, 'import numpy as np\n'), ((7344, 7388), 'numpy.sum', 'np.sum', (["wh_rating[0, adc_ind[adc_num]['wh']]"], {}), "(wh_rating[0, adc_ind[adc_num]['wh']])\n", (7350, 7388), True, 'import numpy as np\n'), ((8956, 8996), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_PV']"], {}), "(cosim_data[adc_num]['Popt_PV'])\n", (8964, 8996), True, 'import numpy as np\n'), ((8999, 9041), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_BATT']"], {}), "(cosim_data[adc_num]['Popt_BATT'])\n", (9007, 9041), True, 'import numpy as np\n'), ((9152, 9192), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_WH']"], {}), "(cosim_data[adc_num]['Popt_WH'])\n", (9160, 9192), True, 'import numpy as np\n'), ((9299, 9341), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_HVAC']"], {}), "(cosim_data[adc_num]['Popt_HVAC'])\n", (9307, 9341), True, 'import numpy as np\n'), ((10337, 10377), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_PV']"], {}), "(cosim_data[adc_num]['Qopt_PV'])\n", (10345, 10377), True, 'import numpy as np\n'), ((10380, 10422), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_BATT']"], {}), "(cosim_data[adc_num]['Qopt_BATT'])\n", (10388, 10422), True, 'import numpy as np\n'), ((10533, 10573), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_WH']"], {}), "(cosim_data[adc_num]['Qopt_WH'])\n", (10541, 10573), True, 'import numpy as np\n'), ((10680, 10722), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_HVAC']"], {}), "(cosim_data[adc_num]['Qopt_HVAC'])\n", (10688, 10722), True, 'import numpy as np\n'), ((11178, 11215), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt']"], {}), "(cosim_data[adc_num]['Popt'])\n", (11186, 11215), True, 'import numpy as np\n'), ((11216, 11263), 'numpy.array', 'np.array', (["cosim_data[adc_num][' Popt_unserved']"], {}), "(cosim_data[adc_num][' Popt_unserved'])\n", (11224, 11263), True, 'import numpy as np\n'), ((11572, 11609), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt']"], {}), "(cosim_data[adc_num]['Qopt'])\n", (11580, 11609), True, 'import numpy as np\n'), ((11706, 11743), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt']"], {}), "(cosim_data[adc_num]['Qopt'])\n", (11714, 11743), True, 'import numpy as np\n'), ((11744, 11791), 'numpy.array', 'np.array', (["cosim_data[adc_num][' Qopt_unserved']"], {}), "(cosim_data[adc_num][' Qopt_unserved'])\n", (11752, 11791), True, 'import numpy as np\n'), ((18425, 18465), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_PV']"], {}), "(cosim_data[adc_num]['Popt_PV'])\n", (18433, 18465), True, 'import numpy as np\n'), ((19211, 19251), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_PV']"], {}), "(cosim_data[adc_num]['Qopt_PV'])\n", (19219, 19251), True, 'import numpy as np\n'), ((19657, 19674), 'numpy.abs', 'np.abs', (['temp[ind]'], {}), '(temp[ind])\n', (19663, 19674), True, 'import numpy as np\n'), ((19972, 20014), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_BATT']"], {}), "(cosim_data[adc_num]['Popt_BATT'])\n", (19980, 20014), True, 'import numpy as np\n'), ((20808, 20850), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_BATT']"], {}), "(cosim_data[adc_num]['Qopt_BATT'])\n", (20816, 20850), True, 'import numpy as np\n'), ((21350, 21390), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_PV']"], {}), "(cosim_data[adc_num]['Popt_PV'])\n", (21358, 21390), True, 'import numpy as np\n'), ((21393, 21435), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_BATT']"], {}), "(cosim_data[adc_num]['Popt_BATT'])\n", (21401, 21435), True, 'import numpy as np\n'), ((21783, 21823), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_PV']"], {}), "(cosim_data[adc_num]['Qopt_PV'])\n", (21791, 21823), True, 'import numpy as np\n'), ((21826, 21868), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_BATT']"], {}), "(cosim_data[adc_num]['Qopt_BATT'])\n", (21834, 21868), True, 'import numpy as np\n'), ((1390, 1427), 'numpy.where', 'np.where', (['(adc_nodes_map[:, 0] == node)'], {}), '(adc_nodes_map[:, 0] == node)\n', (1398, 1427), True, 'import numpy as np\n'), ((3771, 3827), 'numpy.array', 'np.array', (["gld_data['hvac']['fan_design_power']['values']"], {}), "(gld_data['hvac']['fan_design_power']['values'])\n", (3779, 3827), True, 'import numpy as np\n'), ((5444, 5500), 'numpy.sum', 'np.sum', (["battInv_power[:, adc_ind[adc_num]['battInv']]", '(1)'], {}), "(battInv_power[:, adc_ind[adc_num]['battInv']], 1)\n", (5450, 5500), True, 'import numpy as np\n'), ((5546, 5601), 'numpy.sum', 'np.sum', (["battInv_Pout[:, adc_ind[adc_num]['battInv']]", '(1)'], {}), "(battInv_Pout[:, adc_ind[adc_num]['battInv']], 1)\n", (5552, 5601), True, 'import numpy as np\n'), ((5649, 5704), 'numpy.sum', 'np.sum', (["battInv_Qout[:, adc_ind[adc_num]['battInv']]", '(1)'], {}), "(battInv_Qout[:, adc_ind[adc_num]['battInv']], 1)\n", (5655, 5704), True, 'import numpy as np\n'), ((5818, 5871), 'numpy.sum', 'np.sum', (["battInv_rated[0, adc_ind[adc_num]['battInv']]"], {}), "(battInv_rated[0, adc_ind[adc_num]['battInv']])\n", (5824, 5871), True, 'import numpy as np\n'), ((6027, 6085), 'numpy.sum', 'np.sum', (["solarInv_power[:, adc_ind[adc_num]['solarInv']]", '(1)'], {}), "(solarInv_power[:, adc_ind[adc_num]['solarInv']], 1)\n", (6033, 6085), True, 'import numpy as np\n'), ((6134, 6191), 'numpy.sum', 'np.sum', (["solarInv_Pout[:, adc_ind[adc_num]['solarInv']]", '(1)'], {}), "(solarInv_Pout[:, adc_ind[adc_num]['solarInv']], 1)\n", (6140, 6191), True, 'import numpy as np\n'), ((6240, 6297), 'numpy.sum', 'np.sum', (["solarInv_Qout[:, adc_ind[adc_num]['solarInv']]", '(1)'], {}), "(solarInv_Qout[:, adc_ind[adc_num]['solarInv']], 1)\n", (6246, 6297), True, 'import numpy as np\n'), ((6441, 6493), 'numpy.sum', 'np.sum', (["solar_rated[0, adc_ind[adc_num]['solarInv']]"], {}), "(solar_rated[0, adc_ind[adc_num]['solarInv']])\n", (6447, 6493), True, 'import numpy as np\n'), ((6543, 6595), 'numpy.sum', 'np.sum', (["solar_VA[:, adc_ind[adc_num]['solarInv']]", '(1)'], {}), "(solar_VA[:, adc_ind[adc_num]['solarInv']], 1)\n", (6549, 6595), True, 'import numpy as np\n'), ((18778, 18818), 'numpy.real', 'np.real', (["adc_Prating[adc_num]['solarVA']"], {}), "(adc_Prating[adc_num]['solarVA'])\n", (18785, 18818), True, 'import numpy as np\n'), ((19575, 19617), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_BATT']"], {}), "(cosim_data[adc_num]['Qopt_BATT'])\n", (19583, 19617), True, 'import numpy as np\n'), ((18051, 18091), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Qopt_PV']"], {}), "(cosim_data[adc_num]['Qopt_PV'])\n", (18059, 18091), True, 'import numpy as np\n'), ((18932, 18972), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_PV']"], {}), "(cosim_data[adc_num]['Popt_PV'])\n", (18940, 18972), True, 'import numpy as np\n'), ((20524, 20566), 'numpy.array', 'np.array', (["cosim_data[adc_num]['Popt_BATT']"], {}), "(cosim_data[adc_num]['Popt_BATT'])\n", (20532, 20566), True, 'import numpy as np\n')] |
from __future__ import print_function
import sys, os.path as path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from summarizer.utils.reader import read_csv
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import os
import argparse
from sets import Set
import matplotlib as mpl
mpl.use('pgf')
def figsize(scale):
fig_width_pt = 455.24408 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def savefig(filename):
plt.savefig( "figs/" + '{}.pgf'.format(filename))
plt.savefig("figs/" + '{}.pdf'.format(filename))
class Aggregator(object):
def __init__(self):
self.count_ub_reach = []
self.no_accepts = []
self.no_rejects = []
self.iterations = []
self.R1_scores = []
self.R2_scores = []
self.SU4_scores = []
self.clusters = []
self.summaries = []
def load_data(self, filename, cluster_id, info_per_user):
self.clusters.append(cluster_id)
self.rows = read_csv(filename)
self.info_num = info_per_user
def plot_distribution(self, mean, sigma, array):
vlines = [mean-(1*sigma), mean, mean+(1*sigma)]
for val in vlines:
plt.axvline(val, color='k', linestyle='--')
bins = np.linspace(mean-(4*sigma), mean+(4*sigma), 200)
plt.hist(array, bins, alpha=0.5)
y = mlab.normpdf(bins, mean, sigma)
plt.plot(bins, y, 'r--')
plt.subplots_adjust(left=0.15)
plt.show()
print(mean, sigma)
def get_summaries(self, cluster_id, user_id):
ub_summary = self.summaries[cluster_id][user_id][0]
soa_summary = self.summaries[cluster_id][user_id][1]
last = len(self.R2_scores[cluster_id][user_id])
print(self.R2_scores[cluster_id][user_id][0], self.R2_scores[cluster_id][user_id][1], self.R2_scores[cluster_id][user_id][last-1] )
print('UB summary:\n', ub_summary)
print('SOA summary:\n', soa_summary)
def print_min_max_avg_std(self, array, tag):
np_array = np.array(array)
mean = np.mean(np_array)
sigma = np.std(np_array)
print('%s\nmin:%4f max:%4f avg:%4f std:%4f' % (tag, np.min(np_array), np.max(np_array), mean, sigma))
'''
if tag == 'R2 ub_soa diff:' or tag == 'R2 ub_last diff:':
self.plot_distribution(mean, sigma, array)
'''
return mean
def print_results(self):
ub_count, total_ub = 0.0, 0.0
accepts, rejects, iterations = [], [], []
R1_ub_last_diff, R2_ub_last_diff, SU4_ub_last_diff, other_accepts, other_rejects, other_iterations = [], [], [], [], [], []
other_R1_ub_last_diff, other_R2_ub_last_diff = [], []
reach_R1_ub_last_diff, reach_R2_ub_last_diff = [], []
R1_ub_soa_diff, R2_ub_soa_diff, SU4_ub_soa_diff = [], [], []
R1_system, R2_system, SU4_system = [], [], []
R1_UB, R2_UB, SU4_UB = [], [], []
R1_SOA, R2_SOA, SU4_SOA = [], [], []
R1_soa_last_diff, R2_soa_last_diff, SU4_soa_last_diff = [], [], []
#num_clusters = len(self.count_ub_reach)
num_clusters = len(self.R1_scores)
for cluster in range(num_clusters):
no_users = len(self.count_ub_reach[cluster])
for user in range(no_users):
total_ub += 1
last = len(self.R1_scores[cluster][user])
index = np.argmax(np.array(self.R2_scores[cluster][user][1:last]))
index = index+1
'''
print(self.R2_scores[cluster][user][1:last])
print(self.R2_scores[cluster][user][1:index+1])
print(self.no_accepts[cluster][user][1:index+1])
'''
accepts.append(sum(self.no_accepts[cluster][user][1:index]))
rejects.append(sum(self.no_rejects[cluster][user][1:index]))
iterations.append(index)
R1_ub_last_diff.append(self.R1_scores[cluster][user][0]-self.R1_scores[cluster][user][index])
R2_ub_last_diff.append(self.R2_scores[cluster][user][0]-self.R2_scores[cluster][user][index])
SU4_ub_last_diff.append(self.SU4_scores[cluster][user][0]-self.SU4_scores[cluster][user][index])
R1_ub_soa_diff.append(self.R1_scores[cluster][user][0]-self.R1_scores[cluster][user][1])
R2_ub_soa_diff.append(self.R2_scores[cluster][user][0]-self.R2_scores[cluster][user][1])
SU4_ub_soa_diff.append(self.SU4_scores[cluster][user][0]-self.SU4_scores[cluster][user][1])
R1_system.append(self.R1_scores[cluster][user][index])
R2_system.append(self.R2_scores[cluster][user][index])
SU4_system.append(self.SU4_scores[cluster][user][index])
R1_UB.append(self.R1_scores[cluster][user][0])
R2_UB.append(self.R2_scores[cluster][user][0])
SU4_UB.append(self.SU4_scores[cluster][user][0])
R1_SOA.append(self.R1_scores[cluster][user][1])
R2_SOA.append(self.R2_scores[cluster][user][1])
SU4_SOA.append(self.SU4_scores[cluster][user][1])
R1_soa_last_diff.append(self.R1_scores[cluster][user][1] - self.R1_scores[cluster][user][index])
R2_soa_last_diff.append(self.R2_scores[cluster][user][1] - self.R2_scores[cluster][user][index])
SU4_soa_last_diff.append(self.SU4_scores[cluster][user][1] - self.SU4_scores[cluster][user][index])
if self.count_ub_reach[cluster][user] == 1:
ub_count += 1
reach_R1_ub_last_diff.append(self.R1_scores[cluster][user][0]-self.R1_scores[cluster][user][index])
reach_R2_ub_last_diff.append(self.R2_scores[cluster][user][0]-self.R2_scores[cluster][user][index])
if self.count_ub_reach[cluster][user] == 0:
other_accepts.append(sum(self.no_accepts[cluster][user][:-1]))
other_rejects.append(sum(self.no_rejects[cluster][user][:-1]))
other_iterations.append(self.iterations[cluster][user])
other_R1_ub_last_diff.append(self.R1_scores[cluster][user][0]-self.R1_scores[cluster][user][index])
other_R2_ub_last_diff.append(self.R2_scores[cluster][user][0]-self.R2_scores[cluster][user][index])
'''
if self.count_ub_reach[cluster].count(1) == no_users:
print('All One Cluster: %s' % (self.clusters[cluster]))
print('Iterations', self.iterations[cluster])
if self.count_ub_reach[cluster].count(0) == no_users:
print('All Zero Cluster: %s' % (self.clusters[cluster]))
print('Iterations', self.iterations[cluster])
'''
print('No. of clusters:%d' % (num_clusters))
print('Total UB_Reach:%d/%d' % (ub_count, total_ub))
print('Total rejects avg:%d min,max:%d,%d' % (np.mean(np.array(rejects)), min(rejects), max(rejects)))
print('Total accepts avg:%d min,max:%d,%d' % (np.mean(np.array(accepts)), min(accepts), max(accepts)))
print('Total iterations avg:%d min,max:%d,%d' % (np.mean(np.array(iterations)), min(iterations), max(iterations)))
max_diff_index = R2_soa_last_diff.index(min(R2_soa_last_diff))
max_cluster, max_user = max_diff_index/self.users, max_diff_index%self.users
#print('Cluster with max difference:', self.clusters[max_cluster], max_user)
#self.get_summaries(max_cluster, max_user)
'''
self.print_min_max_avg_std(reach_R1_ub_last_diff, 'Reached R1 ub_last diff:\n')
self.print_min_max_avg_std(reach_R2_ub_last_diff, 'Reached R2 ub_last diff:\n')
self.print_min_max_avg_std(other_R1_ub_last_diff, 'Other R1 ub_last diff:\n')
self.print_min_max_avg_std(other_R2_ub_last_diff, 'Other R2 ub_last diff:\n')
'''
'''
self.print_min_max_avg_std(R1_ub_soa_diff, 'R1 ub_soa diff:')
self.print_min_max_avg_std(R2_ub_soa_diff, 'R2 ub_soa diff:')
self.print_min_max_avg_std(R1_ub_last_diff, 'R1 ub_last diff:')
self.print_min_max_avg_std(R2_ub_last_diff, 'R2 ub_last diff:')
'''
avg_r1_ub = self.print_min_max_avg_std(R1_UB, 'R1 UB')
avg_r2_ub = self.print_min_max_avg_std(R2_UB, 'R2 UB')
avg_r4_ub = self.print_min_max_avg_std(SU4_UB, 'SU4 UB')
avg_r1_sys = self.print_min_max_avg_std(R1_system, 'R1 system')
avg_r2_sys = self.print_min_max_avg_std(R2_system, 'R2 system')
avg_r4_ub = self.print_min_max_avg_std(SU4_system, 'SU4 system')
avg_r1_soa = self.print_min_max_avg_std(R1_SOA, 'R1 SOA')
avg_r2_soa = self.print_min_max_avg_std(R2_SOA, 'R2 SOA')
avg_r4_ub = self.print_min_max_avg_std(SU4_SOA, 'SU4 SOA')
avg_accepts = sum(accepts)*1.0/len(accepts)
avg_rejects = sum(rejects)*1.0/len(rejects)
return avg_accepts, avg_rejects, avg_r2_ub, avg_r2_sys, avg_r2_soa
def aggregate_scores(self, break_iteration):
try:
ub_scores = self.rows[0]
except:
return
self.users = len(ub_scores[1:])/self.info_num
#TODO: Change the initialization
R1scores = [[] for _ in range(self.users)]
R2scores = [[] for _ in range(self.users)]
accepts = [[] for _ in range(self.users)]
rejects = [[] for _ in range(self.users)]
summaries = [[] for _ in range(self.users)]
SU4scores = [[] for _ in range(self.users)]
for iteration in range(0,len(self.rows)):
if break_iteration !='last' and iteration == int(break_iteration)+1:
break
row = self.rows[iteration]
for user in range(self.users):
index = user*self.info_num
val = row[1+index]
if val != "":
R1scores[user].append(float(row[1+index]))
R2scores[user].append(float(row[2+index]))
SU4scores[user].append(float(row[3+index]))
accepts[user].append(int(row[4+index]))
rejects[user].append(int(row[5+index]))
summaries[user].append(str(row[6+index]))
ub_reach, user_iterations = [], []
for user in range(self.users):
last = len(R1scores[user])
user_iterations.append(last-1)
if R1scores[user][0] <= R1scores[user][last-1] and R2scores[user][0] <= R2scores[user][last-1]:
#print('Ub_score:', R1scores[user][0], R2scores[user][0])
#print('Break point:', R1scores[user][last-1], R2scores[user][last-1])
ub_reach.append(1)
continue
if R2scores[user][0] <= R2scores[user][last-1]:
ub_reach.append(1)
continue
else:
ub_reach.append(0)
self.iterations.append(user_iterations)
self.count_ub_reach.append(ub_reach)
self.no_accepts.append(accepts)
self.no_rejects.append(rejects)
self.R1_scores.append(R1scores)
self.R2_scores.append(R2scores)
self.SU4_scores.append(SU4scores)
self.summaries.append(summaries)
"""
plt.subplot(211)
for user in range(2):
plt.plot(range(len(R2scores[user][1:])), len(R2scores[user][1:]) *[R2scores[user][0]], 'k--', label='UB%s' % (str(user)))
plt.plot(range(len(R2scores[user][1:])), R2scores[user][1:], 'r', label='User %s' % (str(user)))
plt.legend(loc="lower right")
plt.subplot(212)
plt.plot(range(len(y[2][1:])), len(y[2][1:]) *[y[2][0]], 'k--', label='UB3')
plt.plot(range(len(y[2][1:])), y[2][1:],'y', label='User 3')
plt.plot(range(len(y[3][1:])), len(y[3][1:]) *[y[3][0]], 'k--', label='UB4')
plt.plot(range(len(y[3][1:])), y[3][1:], 'g', label='User 4')
plt.legend(loc="lower right")
plt.xlabel("No. of iterations")
plt.ylabel(rouge_type)
plt.yscale("linear")
plt.show()
"""
def get_args():
''' This function parses and return arguments passed in'''
parser = argparse.ArgumentParser(description='Results Aggregator')
parser.add_argument('-d', '--data_set', type= str, help='Datset ex. DUC2004', required=True)
parser.add_argument('-l', '--len_summary', type= str, help='Summary Size', required=False)
parser.add_argument('-a', '--annotation', type= str, help='Annotation Type', required=False)
args = parser.parse_args()
data_set = args.data_set
len_summary = args.len_summary
annotation = args.annotation
return data_set, len_summary, annotation
def plot_aggregate(labels, accepts_rejects, rejects, ub_score, sys_score, soa, break_iteration, filename):
colors = ['g','b','r', 'y']
linestyles = ['->', '-o', '-', '-x']
f, axis = plt.subplots(2, sharex=True, sharey=False, figsize=(4, 6))
axis[0] = plt.subplot2grid((8, 12), (0, 0), rowspan=5, colspan=12)
axis[1] = plt.subplot2grid((8, 12), (5, 0), rowspan=3, colspan=12)
axis[0].plot(range(len(accepts_rejects[0][0:break_iteration])), len(accepts_rejects[0][0:break_iteration]) * [ub_score[0][0]], 'k--', label = 'Upper bound', linewidth=2)
common_score = []
for i in range(len(labels)):
common_score.append(sys_score[i][0])
initial_score = max(set(common_score), key=common_score.count)
for i in range(len(labels)):
sys_score[i][0] = initial_score
for i in range(len(labels)):
y = sys_score[i][0:break_iteration]
axis[0].plot(range(len(y)), y, linestyles[i], color=colors[i], label='%s' % labels[i], linewidth=1.5)
axis[0].set_ylabel('ROUGE 2', fontsize=15)
axis[0].legend(loc="best", fontsize=12)
axis[0].set_xticks(np.arange(0, break_iteration, 1))
axis[0].set_autoscale_on(True)
axis[0].grid(True)
f.subplots_adjust(hspace=0.1)
for i in range(len(labels)):
y = accepts_rejects[i][1:break_iteration+1]
axis[1].plot(range(len(y)), y, linestyles[i], color=colors[i], label='%s' % labels[i], linewidth=1.5)
axis[1].grid(True)
axis[1].set_xlabel("No. of iterations", fontsize=13)
axis[1].set_ylabel('No. of feedbacks', fontsize=13)
axis[1].set_xticks(np.arange(0, break_iteration, 1))
axis[1].set_xticklabels(np.arange(0, break_iteration, 1))
plt.tight_layout()
savefig(filename)
if __name__ == '__main__':
data_set, len_summary, annotation = get_args()
methods = ['active_learning2', 'active_learning','ilp_feedback', 'accept_reject']
labels = ['Active+','Active', 'Joint', 'Accept']
total_iterations = 11
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 10, # LaTeX default is 10pt font.
"text.fontsize": 10,
"legend.fontsize": 10, # Make the legend/label fonts a little smaller
"xtick.labelsize": 12,
"ytick.labelsize": 12,
"figure.figsize": figsize(0.9), # default fig size of 0.9 textwidth
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
mpl.rcParams.update(pgf_with_latex)
score_type = 'R2_score'
accepts_rejects, rejects = [[] for i in range(len(methods))], [[] for i in range(len(methods))]
ub_score, sys_score, soa_score = [[] for i in range(len(methods))], [[] for i in range(len(methods))], [[] for i in range(len(methods))]
inter_topics = Set()
for index, method in enumerate(methods):
if len_summary!=None and annotation == None:
data_path = '../data/scores/%s/%s_%s_%s' % (data_set, method, data_set, len_summary)
if annotation!=None and len_summary!=None:
data_path = '../data/scores/%s/%s_%s_%s_%s' % (data_set, method, annotation, data_set,len_summary)
if annotation!=None and len_summary==None:
data_path = '../data/scores/%s/%s_%s_%s' % (data_set, method, annotation, data_set)
if annotation==None and len_summary==None:
data_path = '../data/scores/%s/%s_%s' % (data_set, method, data_set)
topics = [fileid[:-4] for fileid in os.listdir(data_path)]
if index == 0:
inter_topics = Set(topics)
else:
inter_topics = inter_topics.intersection(topics)
file_ids = {}
for break_iteration in range(1, total_iterations+2):
for index, method in enumerate(methods):
print('Method:%s, index:%d' % (method, index))
if len_summary!=None and annotation == None:
data_path = '../data/scores/%s/%s_%s_%s' % (data_set, method, data_set, len_summary)
if annotation!=None and len_summary!=None:
data_path = '../data/scores/%s/%s_%s_%s_%s' % (data_set, method, annotation, data_set,len_summary)
if annotation!=None and len_summary==None:
data_path = '../data/scores/%s/%s_%s_%s' % (data_set, method, annotation, data_set)
if annotation==None and len_summary==None:
data_path = '../data/scores/%s/%s_%s' % (data_set, method, data_set)
aggregate = Aggregator()
for fileid in os.listdir(data_path):
filename = '%s/%s' % (data_path, fileid)
topic = fileid[:-4]
if topic not in inter_topics:
continue
data_org_path = '../data/processed/%s/%s/summaries' % (data_set, topic)
num_users = len(os.listdir(data_org_path))
#print(filename)
cluster_id = fileid[:-4]
aggregate.load_data(filename, cluster_id, 6)
aggregate.aggregate_scores(break_iteration)
items = aggregate.print_results()
avg_accepts, avg_rejects, avg_r2_ub, avg_r2_sys, avg_r2_soa = items
accepts_rejects[index].append(avg_accepts)
rejects[index].append(avg_rejects)
ub_score[index].append(avg_r2_ub)
sys_score[index].append(avg_r2_sys)
soa_score[index].append(avg_r2_soa)
filename = '%s' % (data_set)
plot_aggregate(labels, accepts_rejects, rejects, ub_score, sys_score, soa_score, total_iterations, filename)
| [
"matplotlib.pyplot.hist",
"numpy.sqrt",
"numpy.array",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.subplot2grid",
"sets.Set",
"numpy.arange",
"numpy.mean",
"matplotlib.mlab.normpdf",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"nu... | [((348, 362), 'matplotlib.use', 'mpl.use', (['"""pgf"""'], {}), "('pgf')\n", (355, 362), True, 'import matplotlib as mpl\n'), ((12684, 12741), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Results Aggregator"""'}), "(description='Results Aggregator')\n", (12707, 12741), False, 'import argparse\n'), ((13405, 13463), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)', 'sharey': '(False)', 'figsize': '(4, 6)'}), '(2, sharex=True, sharey=False, figsize=(4, 6))\n', (13417, 13463), True, 'import matplotlib.pyplot as plt\n'), ((13478, 13534), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(8, 12)', '(0, 0)'], {'rowspan': '(5)', 'colspan': '(12)'}), '((8, 12), (0, 0), rowspan=5, colspan=12)\n', (13494, 13534), True, 'import matplotlib.pyplot as plt\n'), ((13549, 13605), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(8, 12)', '(5, 0)'], {'rowspan': '(3)', 'colspan': '(12)'}), '((8, 12), (5, 0), rowspan=3, colspan=12)\n', (13565, 13605), True, 'import matplotlib.pyplot as plt\n'), ((14909, 14927), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14925, 14927), True, 'import matplotlib.pyplot as plt\n'), ((16241, 16276), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['pgf_with_latex'], {}), '(pgf_with_latex)\n', (16260, 16276), True, 'import matplotlib as mpl\n'), ((16568, 16573), 'sets.Set', 'Set', ([], {}), '()\n', (16571, 16573), False, 'from sets import Set\n'), ((1413, 1431), 'summarizer.utils.reader.read_csv', 'read_csv', (['filename'], {}), '(filename)\n', (1421, 1431), False, 'from summarizer.utils.reader import read_csv\n'), ((1679, 1731), 'numpy.linspace', 'np.linspace', (['(mean - 4 * sigma)', '(mean + 4 * sigma)', '(200)'], {}), '(mean - 4 * sigma, mean + 4 * sigma, 200)\n', (1690, 1731), True, 'import numpy as np\n'), ((1736, 1768), 'matplotlib.pyplot.hist', 'plt.hist', (['array', 'bins'], {'alpha': '(0.5)'}), '(array, bins, alpha=0.5)\n', (1744, 1768), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1813), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['bins', 'mean', 'sigma'], {}), '(bins, mean, sigma)\n', (1794, 1813), True, 'import matplotlib.mlab as mlab\n'), ((1822, 1846), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', 'y', '"""r--"""'], {}), "(bins, y, 'r--')\n", (1830, 1846), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1885), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)'}), '(left=0.15)\n', (1874, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1904), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1902, 1904), True, 'import matplotlib.pyplot as plt\n'), ((2458, 2473), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (2466, 2473), True, 'import numpy as np\n'), ((2489, 2506), 'numpy.mean', 'np.mean', (['np_array'], {}), '(np_array)\n', (2496, 2506), True, 'import numpy as np\n'), ((2523, 2539), 'numpy.std', 'np.std', (['np_array'], {}), '(np_array)\n', (2529, 2539), True, 'import numpy as np\n'), ((14327, 14359), 'numpy.arange', 'np.arange', (['(0)', 'break_iteration', '(1)'], {}), '(0, break_iteration, 1)\n', (14336, 14359), True, 'import numpy as np\n'), ((14809, 14841), 'numpy.arange', 'np.arange', (['(0)', 'break_iteration', '(1)'], {}), '(0, break_iteration, 1)\n', (14818, 14841), True, 'import numpy as np\n'), ((14871, 14903), 'numpy.arange', 'np.arange', (['(0)', 'break_iteration', '(1)'], {}), '(0, break_iteration, 1)\n', (14880, 14903), True, 'import numpy as np\n'), ((573, 585), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (580, 585), True, 'import numpy as np\n'), ((1619, 1662), 'matplotlib.pyplot.axvline', 'plt.axvline', (['val'], {'color': '"""k"""', 'linestyle': '"""--"""'}), "(val, color='k', linestyle='--')\n", (1630, 1662), True, 'import matplotlib.pyplot as plt\n'), ((17328, 17339), 'sets.Set', 'Set', (['topics'], {}), '(topics)\n', (17331, 17339), False, 'from sets import Set\n'), ((18287, 18308), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (18297, 18308), False, 'import os\n'), ((121, 143), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (133, 143), True, 'import sys, os.path as path\n'), ((17255, 17276), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (17265, 17276), False, 'import os\n'), ((2600, 2616), 'numpy.min', 'np.min', (['np_array'], {}), '(np_array)\n', (2606, 2616), True, 'import numpy as np\n'), ((2618, 2634), 'numpy.max', 'np.max', (['np_array'], {}), '(np_array)\n', (2624, 2634), True, 'import numpy as np\n'), ((3833, 3880), 'numpy.array', 'np.array', (['self.R2_scores[cluster][user][1:last]'], {}), '(self.R2_scores[cluster][user][1:last])\n', (3841, 3880), True, 'import numpy as np\n'), ((18599, 18624), 'os.listdir', 'os.listdir', (['data_org_path'], {}), '(data_org_path)\n', (18609, 18624), False, 'import os\n'), ((7427, 7444), 'numpy.array', 'np.array', (['rejects'], {}), '(rejects)\n', (7435, 7444), True, 'import numpy as np\n'), ((7539, 7556), 'numpy.array', 'np.array', (['accepts'], {}), '(accepts)\n', (7547, 7556), True, 'import numpy as np\n'), ((7653, 7673), 'numpy.array', 'np.array', (['iterations'], {}), '(iterations)\n', (7661, 7673), True, 'import numpy as np\n')] |
from pylagrit import PyLaGriT
import numpy
x = numpy.arange(0,10.1,1)
y = x
z = [0,1]
lg = PyLaGriT()
mqua = lg.gridder(x,y,z,elem_type='hex',connect=True)
mqua.rotateln([mqua.xmin-0.1,0,0],[mqua.xmax+0.1,0,0],25)
mqua.dump_exo('rotated.exo')
mqua.dump_ats_xml('rotated.xml','rotated.exo')
mqua.paraview()
| [
"pylagrit.PyLaGriT",
"numpy.arange"
] | [((48, 72), 'numpy.arange', 'numpy.arange', (['(0)', '(10.1)', '(1)'], {}), '(0, 10.1, 1)\n', (60, 72), False, 'import numpy\n'), ((93, 103), 'pylagrit.PyLaGriT', 'PyLaGriT', ([], {}), '()\n', (101, 103), False, 'from pylagrit import PyLaGriT\n')] |
## interaction / scripts / create_translation_repository.py
'''
This script will pre-calculate the translation operators for a given bounding
box, max level, and frequency steps for a multi-level fast multipole algorithm.
This can take hours to days depending on the number of threads available, size
of bounding box, number of levels etc. The output is stored in a single H5 file.
To use, run with a corresponding yaml config file for setting the input
parameters.
python create_translation_repository.py <path to config file>
Author: <NAME> (<EMAIL>)
'''
import numpy as np
import pandas as pd
import sqlite3 as sql
import multiprocessing
from itertools import repeat
from contextlib import closing
import os
from tqdm import tqdm
from interaction3.bem.core import fma_functions as fma
from interaction3.bem.core.db_functions import get_order
# register adapters for sqlite to convert numpy types
sql.register_adapter(np.float64, float)
sql.register_adapter(np.float32, float)
sql.register_adapter(np.int64, int)
sql.register_adapter(np.int32, int)
## PROCESS FUNCTIONS ##
def generate_translations(file, f, k, dims, levels, orders_db):
xdim, ydim = dims
minlevel, maxlevel = levels
for l in range(minlevel, maxlevel + 1):
order = get_order(orders_db, f, l)
qrule = fma.fft_quadrule(order, order)
group_xdim, group_ydim = xdim / (2 ** l), ydim / (2 ** l)
kcoordT = qrule['kcoordT']
theta = qrule['theta']
phi = qrule['phi']
unique_coords = fma.get_unique_coords()
for coords in unique_coords:
r = coords * np.array([group_xdim, group_ydim, 1])
rhat = r / fma.mag(r)
cos_angle = rhat.dot(kcoordT)
translation = np.ascontiguousarray(fma.mod_ff2nf_op(fma.mag(r), cos_angle, k, order))
with write_lock:
with closing(sql.connect(file)) as conn:
update_translations_table(conn, f, k, l, order, tuple(coords), theta, phi, translation)
def init_process(_write_lock):
global write_lock
write_lock = _write_lock
def process(proc_args):
file, f, k, dims, levels, orders_db = proc_args
generate_translations(file, f, k, dims, levels, orders_db)
with write_lock:
with closing(sql.connect(file)) as conn:
update_progress(conn, f)
## ENTRY POINT ##
def main(**kwargs):
threads = kwargs['threads']
freqs = kwargs['freqs']
levels = kwargs['levels']
dims = kwargs['dims']
c = kwargs['sound_speed']
file = kwargs['file']
orders_db = kwargs['orders_db']
# set default threads to logical core count
if threads is None:
threads = multiprocessing.cpu_count()
kwargs['threads'] = threads
# path to this module's directory
module_dir = os.path.dirname(os.path.realpath(__file__))
# set default file name for database
if file is None:
file = os.path.join(module_dir, 'translations_dims_{:0.4f}_{:0.4f}.db'.format(*dims))
kwargs['file'] = file
# set default file nam of orders database to use
if orders_db is None:
orders_db = os.path.join(module_dir, 'orders_dims_{:0.4f}_{:0.4f}.db'.format(*dims))
kwargs['orders_db'] = orders_db
# read orders database and form interpolating functions
# orders_interp_funcs = get_orders_interp_funcs(orders_db, levels)
# check for existing file
if os.path.isfile(file):
# conn = sql.connect(file)
response = input('Database ' + str(file) + ' already exists. \nContinue (c), Overwrite (o), or Do nothing ('
'any other key)?')
if response.lower() in ['o', 'overwrite']:
os.remove(file)
# determine frequencies and wavenumbers
f_start, f_stop, f_step = freqs
fs = np.arange(f_start, f_stop + f_step, f_step)
ks = 2 * np.pi * fs / c
# create database
with closing(sql.connect(file)) as conn:
# create database tables
create_metadata_table(conn, **kwargs)
create_frequencies_table(conn, fs, ks)
create_levels_table(conn, levels)
create_coordinates_table(conn)
create_translations_table(conn)
elif response.lower() in ['c', 'continue']:
with closing(sql.connect(file)) as conn:
query = '''
SELECT frequency, wavenumber FROM frequencies
WHERE is_complete=0
'''
table = pd.read_sql(query, conn)
fs = np.array(table['frequency'])
ks = np.array(table['wavenumber'])
else:
raise Exception('Database already exists')
else:
# Make directories if they do not exist
file_dir = os.path.dirname(file)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
# determine frequencies and wavenumbers
f_start, f_stop, f_step = freqs
fs = np.arange(f_start, f_stop + f_step, f_step)
ks = 2 * np.pi * fs / c
# create database
with closing(sql.connect(file)) as conn:
# create database tables
create_metadata_table(conn, **kwargs)
create_frequencies_table(conn, fs, ks)
create_levels_table(conn, levels)
create_coordinates_table(conn)
create_translations_table(conn)
try:
# start multiprocessing pool and run process\
write_lock = multiprocessing.Lock()
pool = multiprocessing.Pool(threads, initializer=init_process, initargs=(write_lock,))
proc_args = [(file, f, k, dims, levels, orders_db) for f, k in zip(fs, ks)]
result = pool.imap_unordered(process, proc_args)
for r in tqdm(result, desc='Building', total=len(fs)):
pass
except Exception as e:
print(e)
finally:
pool.terminate()
pool.close()
## DATABASE FUNCTIONS ##
def create_metadata_table(conn, **kwargs):
'''
'''
table = [[str(v) for v in list(kwargs.values())]]
columns = list(kwargs.keys())
pd.DataFrame(table, columns=columns, dtype=str).to_sql('metadata', conn, if_exists='replace', index=False)
def create_frequencies_table(conn, fs, ks):
'''
'''
# create table
query = '''
CREATE TABLE frequencies (
frequency float,
wavenumber float,
is_complete boolean
)
'''
conn.execute(query)
# create unique index on frequency
query = '''
CREATE UNIQUE INDEX frequency_index ON frequencies (frequency)
'''
conn.execute(query)
# create unique index on wavenumber
query = '''
CREATE UNIQUE INDEX wavenumber_index ON frequencies (wavenumber)
'''
conn.execute(query)
# insert values into table
query = '''
INSERT INTO frequencies (frequency, wavenumber, is_complete)
VALUES (?, ?, ?)
'''
conn.executemany(query, zip(fs, ks, repeat(False)))
conn.commit()
def update_progress(conn, f):
query = '''
UPDATE frequencies SET is_complete=1 WHERE frequency=?
'''
conn.execute(query, [f,])
conn.commit()
def create_levels_table(conn, levels):
'''
'''
minlevel, maxlevel = levels
# create table
query = '''
CREATE TABLE levels (
level int
)
'''
conn.execute(query)
# create unique index on level
query = '''
CREATE UNIQUE INDEX level_index ON levels (level)
'''
conn.execute(query)
# insert values into table
query = '''
INSERT INTO levels (level)
VALUES (?)
'''
conn.executemany(query, list((x,) for x in range(minlevel, maxlevel + 1)))
conn.commit()
def create_coordinates_table(conn):
unique_coords = fma.get_unique_coords()
query = '''
CREATE TABLE coordinates (
x int,
y int,
z int
)
'''
conn.execute(query)
query = '''
CREATE UNIQUE INDEX coordinates_index ON coordinates (x, y, z)
'''
conn.execute(query)
query = '''
INSERT INTO coordinates
VALUES (?, ?, ?)
'''
conn.executemany(query, unique_coords)
conn.commit()
def create_translations_table(conn):
query = '''
CREATE TABLE translations (
id INTEGER PRIMARY KEY,
frequency float,
wavenumber float,
level int,
x int,
y int,
z int,
theta float,
phi float,
ntheta int,
nphi int,
translation_order int,
translation_real float,
translation_imag float,
FOREIGN KEY (frequency) REFERENCES frequencies (frequency),
FOREIGN KEY (wavenumber) REFERENCES frequencies (wavenumber),
FOREIGN KEY (level) REFERENCES levels (level),
FOREIGN KEY (x, y, z) REFERENCES coordinates (x, y, z)
)
'''
conn.execute(query)
query = '''
CREATE INDEX translation_index ON translations (frequency, level, x, y, z)
'''
conn.execute(query)
conn.commit()
def update_translations_table(conn, f, k, l, order, coord, thetas, phis, translations):
x, y, z = coord
thetav, phiv = np.meshgrid(thetas, phis, indexing='ij')
ntheta = len(thetas)
nphi = len(phis)
query = '''
INSERT INTO translations (frequency, wavenumber, level, x, y, z, ntheta, nphi, theta, phi,
translation_order, translation_real, translation_imag)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
conn.executemany(query, zip(repeat(f), repeat(k), repeat(l), repeat(x), repeat(y), repeat(z), repeat(ntheta),
repeat(nphi), thetav.ravel(), phiv.ravel(), repeat(order),
np.real(translations.ravel()), np.imag(translations.ravel())))
conn.commit()
## COMMAND LINE INTERFACE ##
if __name__ == '__main__':
import argparse
# default arguments
nthreads = None
freqs = 50e3, 50e6, 50e3
levels = 2, 6
dims = 4e-3, 4e-3
sound_speed = 1500
file = None
orders_db = None
# define and parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='?', default=file)
parser.add_argument('-t', '--threads', type=int, default=nthreads)
parser.add_argument('-f', '--freqs', nargs=3, type=float, default=freqs)
parser.add_argument('-l', '--levels', nargs=2, type=int, default=levels)
parser.add_argument('-d', '--dims', nargs=2, type=float, default=dims)
parser.add_argument('-o', '--orders-db', type=str, default=orders_db)
parser.add_argument('--sound-speed', type=float, default=sound_speed)
args = vars(parser.parse_args())
main(**args)
| [
"interaction3.bem.core.db_functions.get_order",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.arange",
"itertools.repeat",
"os.remove",
"os.path.exists",
"argparse.ArgumentParser",
"pandas.DataFrame",
"numpy.meshgrid",
"interaction3.bem.core.fma_functions.fft_quadrule",
"interaction3.bem.... | [((907, 946), 'sqlite3.register_adapter', 'sql.register_adapter', (['np.float64', 'float'], {}), '(np.float64, float)\n', (927, 946), True, 'import sqlite3 as sql\n'), ((947, 986), 'sqlite3.register_adapter', 'sql.register_adapter', (['np.float32', 'float'], {}), '(np.float32, float)\n', (967, 986), True, 'import sqlite3 as sql\n'), ((987, 1022), 'sqlite3.register_adapter', 'sql.register_adapter', (['np.int64', 'int'], {}), '(np.int64, int)\n', (1007, 1022), True, 'import sqlite3 as sql\n'), ((1023, 1058), 'sqlite3.register_adapter', 'sql.register_adapter', (['np.int32', 'int'], {}), '(np.int32, int)\n', (1043, 1058), True, 'import sqlite3 as sql\n'), ((3436, 3456), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3450, 3456), False, 'import os\n'), ((8070, 8093), 'interaction3.bem.core.fma_functions.get_unique_coords', 'fma.get_unique_coords', ([], {}), '()\n', (8091, 8093), True, 'from interaction3.bem.core import fma_functions as fma\n'), ((9643, 9683), 'numpy.meshgrid', 'np.meshgrid', (['thetas', 'phis'], {'indexing': '"""ij"""'}), "(thetas, phis, indexing='ij')\n", (9654, 9683), True, 'import numpy as np\n'), ((10613, 10638), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10636, 10638), False, 'import argparse\n'), ((1267, 1293), 'interaction3.bem.core.db_functions.get_order', 'get_order', (['orders_db', 'f', 'l'], {}), '(orders_db, f, l)\n', (1276, 1293), False, 'from interaction3.bem.core.db_functions import get_order\n'), ((1311, 1341), 'interaction3.bem.core.fma_functions.fft_quadrule', 'fma.fft_quadrule', (['order', 'order'], {}), '(order, order)\n', (1327, 1341), True, 'from interaction3.bem.core import fma_functions as fma\n'), ((1527, 1550), 'interaction3.bem.core.fma_functions.get_unique_coords', 'fma.get_unique_coords', ([], {}), '()\n', (1548, 1550), True, 'from interaction3.bem.core import fma_functions as fma\n'), ((2700, 2727), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2725, 2727), False, 'import multiprocessing\n'), ((2836, 2862), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2852, 2862), False, 'import os\n'), ((4907, 4928), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (4922, 4928), False, 'import os\n'), ((5106, 5149), 'numpy.arange', 'np.arange', (['f_start', '(f_stop + f_step)', 'f_step'], {}), '(f_start, f_stop + f_step, f_step)\n', (5115, 5149), True, 'import numpy as np\n'), ((5616, 5638), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (5636, 5638), False, 'import multiprocessing\n'), ((5654, 5733), 'multiprocessing.Pool', 'multiprocessing.Pool', (['threads'], {'initializer': 'init_process', 'initargs': '(write_lock,)'}), '(threads, initializer=init_process, initargs=(write_lock,))\n', (5674, 5733), False, 'import multiprocessing\n'), ((3746, 3761), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (3755, 3761), False, 'import os\n'), ((3876, 3919), 'numpy.arange', 'np.arange', (['f_start', '(f_stop + f_step)', 'f_step'], {}), '(f_start, f_stop + f_step, f_step)\n', (3885, 3919), True, 'import numpy as np\n'), ((4944, 4968), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (4958, 4968), False, 'import os\n'), ((4982, 5003), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (4993, 5003), False, 'import os\n'), ((6242, 6289), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {'columns': 'columns', 'dtype': 'str'}), '(table, columns=columns, dtype=str)\n', (6254, 6289), True, 'import pandas as pd\n'), ((7182, 7195), 'itertools.repeat', 'repeat', (['(False)'], {}), '(False)\n', (7188, 7195), False, 'from itertools import repeat\n'), ((10025, 10034), 'itertools.repeat', 'repeat', (['f'], {}), '(f)\n', (10031, 10034), False, 'from itertools import repeat\n'), ((10036, 10045), 'itertools.repeat', 'repeat', (['k'], {}), '(k)\n', (10042, 10045), False, 'from itertools import repeat\n'), ((10047, 10056), 'itertools.repeat', 'repeat', (['l'], {}), '(l)\n', (10053, 10056), False, 'from itertools import repeat\n'), ((10058, 10067), 'itertools.repeat', 'repeat', (['x'], {}), '(x)\n', (10064, 10067), False, 'from itertools import repeat\n'), ((10069, 10078), 'itertools.repeat', 'repeat', (['y'], {}), '(y)\n', (10075, 10078), False, 'from itertools import repeat\n'), ((10080, 10089), 'itertools.repeat', 'repeat', (['z'], {}), '(z)\n', (10086, 10089), False, 'from itertools import repeat\n'), ((10091, 10105), 'itertools.repeat', 'repeat', (['ntheta'], {}), '(ntheta)\n', (10097, 10105), False, 'from itertools import repeat\n'), ((10139, 10151), 'itertools.repeat', 'repeat', (['nphi'], {}), '(nphi)\n', (10145, 10151), False, 'from itertools import repeat\n'), ((10183, 10196), 'itertools.repeat', 'repeat', (['order'], {}), '(order)\n', (10189, 10196), False, 'from itertools import repeat\n'), ((1615, 1652), 'numpy.array', 'np.array', (['[group_xdim, group_ydim, 1]'], {}), '([group_xdim, group_ydim, 1])\n', (1623, 1652), True, 'import numpy as np\n'), ((1676, 1686), 'interaction3.bem.core.fma_functions.mag', 'fma.mag', (['r'], {}), '(r)\n', (1683, 1686), True, 'from interaction3.bem.core import fma_functions as fma\n'), ((2293, 2310), 'sqlite3.connect', 'sql.connect', (['file'], {}), '(file)\n', (2304, 2310), True, 'import sqlite3 as sql\n'), ((4682, 4710), 'numpy.array', 'np.array', (["table['frequency']"], {}), "(table['frequency'])\n", (4690, 4710), True, 'import numpy as np\n'), ((4728, 4757), 'numpy.array', 'np.array', (["table['wavenumber']"], {}), "(table['wavenumber'])\n", (4736, 4757), True, 'import numpy as np\n'), ((5230, 5247), 'sqlite3.connect', 'sql.connect', (['file'], {}), '(file)\n', (5241, 5247), True, 'import sqlite3 as sql\n'), ((1794, 1804), 'interaction3.bem.core.fma_functions.mag', 'fma.mag', (['r'], {}), '(r)\n', (1801, 1804), True, 'from interaction3.bem.core import fma_functions as fma\n'), ((4012, 4029), 'sqlite3.connect', 'sql.connect', (['file'], {}), '(file)\n', (4023, 4029), True, 'import sqlite3 as sql\n'), ((4639, 4663), 'pandas.read_sql', 'pd.read_sql', (['query', 'conn'], {}), '(query, conn)\n', (4650, 4663), True, 'import pandas as pd\n'), ((1887, 1904), 'sqlite3.connect', 'sql.connect', (['file'], {}), '(file)\n', (1898, 1904), True, 'import sqlite3 as sql\n'), ((4415, 4432), 'sqlite3.connect', 'sql.connect', (['file'], {}), '(file)\n', (4426, 4432), True, 'import sqlite3 as sql\n')] |
import pandas as pd
import numpy as np
import umap
import sklearn.cluster as cluster
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
import spacy
import unicodedata
import matplotlib.pyplot as plt
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
JULIA_VARIABLE_CSV_PATH = "ExperimentData/JuliaVariableData.csv"
CLUSTER_LABEL_CSV_PATH = "clusteringLabels.csv"
KMEANS_CLUSTER_LABEL_CSV_PATH = "ExperimentData/KmeansCluster.csv"
KMEANS_CLUSTER_TRUTH_CSV_PATH = "ExperimentData/KmeanClusterTruths.csv"
KMEANS_PREDICTED_CSV_PATH = "ExperimentData/KmeansPredicted.csv"
PREDICTED_UMAP_CSV_PATH = "ExperimentData/simPredictedUmapClusters.csv"
def createWord2Vec(data):
nlp = spacy.load('en_core_web_md')
tokenList = []
for phrase in data:
token = nlp(phrase)
tokenList.append(token.vector)
return np.asarray(tokenList)
def useUMAP(tokenList):
db = DBSCAN(eps=0.3, min_samples=2).fit(np.asarray(tokenList))
umapModel = umap.UMAP(random_state=42).fit(np.asarray(tokenList))
standardEmbedding = umapModel.transform(tokenList)
db_umap = DBSCAN(eps=0.3, min_samples=2).fit(standardEmbedding)
return np.asarray(db.labels_), np.asarray(db_umap.labels_)
def writeUMAP_DBSCAN_CSV(subj_array, labels, umapLabels, labelsSimArray, \
uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray):
logging.info("Writing CSV")
outputString = "node,labels,umapLabels,dbscanSim,UMAPsim,out_sampleDBSCAN,out_sampleUMAP\n"
for i in range(len(labels)):
outputString += str(subj_array[i]) + ","\
+ str(labels[i]) + ","\
+str(umapLabels[i]) + ","\
+ str(labelsSimArray[i]) + ","\
+ str(uMapLabelsSimArray[i])+ ","\
+ str(OutSampleLabelsSimArray[i]) + ","\
+ str(OutSampleUMAPSimArray[i]) + "\n"
with open(CLUSTER_LABEL_CSV_PATH, 'w') as filetowrite:
filetowrite.write(outputString)
filetowrite.close()
def generatePairs(labels, umapLabels, data):
nlp = spacy.load('en_core_web_md')
labelsSimArray = []
uMapLabelsSimArray = []
OutSampleLabelsSimArray = []
OutSampleUMAPSimArray = []
labels_sim = 0;
umapLabels_sim = 0;
outsample_labels_sim = 0;
outsample_umap_sim = 0;
for i in range(len(data)):
logging.info("Iterating Word " + str(i))
for j in range(len(data)):
if i != j:
token1 = nlp(data[i])
token2 = nlp(data[j])
if(labels[i] == labels[j]):
labels_sim += token1.similarity(token2)
if(umapLabels[i] == umapLabels[j]):
umapLabels_sim += token1.similarity(token2)
if(labels [i] != labels[j]):
outsample_labels_sim += token1.similarity(token2)
if(umapLabels[i] != umapLabels[j]):
outsample_umap_sim += token1.similarity(token2)
if j == len(data)-1:
labelsSimArray.append(float(labels_sim/(list(labels).count(labels[i])-1)))
uMapLabelsSimArray.append(float(umapLabels_sim/(list(umapLabels).count(umapLabels[i])-1)))
if len(labels)-list(labels).count(labels[i]) == 0:
OutSampleLabelsSimArray.append(1)
else:
OutSampleLabelsSimArray.append(float(outsample_labels_sim/(len(labels)-1-list(labels).count(labels[i]))))
if len(umapLabels)-list(umapLabels).count(umapLabels[i]) == 0:
OutSampleUMAPSimArray.append(1)
else:
OutSampleUMAPSimArray.append(float(outsample_umap_sim/(len(umapLabels)-1-list(umapLabels).count(umapLabels[i]))))
labels_sim = 0;
umapLabels_sim = 0;
outsample_labels_sim = 0;
outsample_umap_sim = 0;
return labelsSimArray, uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray
def createCluster(svoFile):
SVOdata = pd.read_csv(svoFile)
subj_array = list(SVOdata["subject"])
obj_array = list(SVOdata["object"])
totalNodes = subj_array + obj_array
tokenList = createWord2Vec(totalNodes)
#Use UMAP Clustering
labels,umapLabels = useUMAP(tokenList)
#Retrieves Labels for Similarity
labelsSimArray, uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray = \
generatePairs(labels, umapLabels, totalNodes)
#Writes CSV for UMAP vs DBScan Labels
writeUMAP_DBSCAN_CSV(totalNodes, labels, umapLabels, labelsSimArray, \
uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray )
def cleanVariables(variableArray):
for i in range(len(variableArray)):
variableArray[i] = str(variableArray[i]).replace(",", " ")
variableArray[i] = str(variableArray[i]).replace("_", " ")
variableArray[i] = containsGreek(variableArray[i])
return variableArray
def containsGreek(inputString):
greekLetters = []
for s in inputString:
name = unicodedata.name(chr(ord(s)))
if "GREEK" in name:
greekLetters.append(s)
for letter in greekLetters:
name = unicodedata.name(chr(ord(letter))).split(" ")[3]
name = name.lower().capitalize()
inputString = inputString.replace(letter, str(name) + str(" "))
return inputString
def useKmeans(trainTokenList, K_size, variableTokenList):
print(type(trainTokenList), type(K_size), type(variableTokenList))
umapModel = umap.UMAP(random_state=42).fit(np.asarray(trainTokenList))
trainEmbedding = umapModel.transform(trainTokenList)
predictEmbedding = umapModel.transform(variableTokenList)
kmeans = KMeans(n_clusters=K_size, random_state = 0).fit(trainEmbedding)
return kmeans.labels_, kmeans.predict(predictEmbedding)
def writeCSV(variable_array, predictedLabels, fileName):
logging.info("generating CSV " + fileName)
outputString = "variable,cluster\n"
for i in range(len(variable_array)):
outputString += str(variable_array[i].replace(",", " ")) + "," + str(predictedLabels[i]) + "\n"
with open(fileName, 'w') as filetowrite:
filetowrite.write(outputString)
filetowrite.close()
def groupNodesByCluster(umapData):
maxNoClusters = max(list(umapData["umapLabels"]))
clusteredNodes = []
for i in range(maxNoClusters + 1):
temp_bin = []
for j in range(len(list(umapData["umapLabels"]))):
if list(umapData["umapLabels"])[j] == i:
temp_bin.append(list(umapData["node"])[j])
clusteredNodes.append(temp_bin)
return clusteredNodes
def groupNodesByKMeansCluster(kMeansData):
maxNoClusters = max(list(kMeansData["cluster"]))
clusteredNodes = []
for i in range(maxNoClusters + 1):
temp_bin = []
for j in range(len(list(kMeansData["cluster"]))):
if list(kMeansData["cluster"])[j] == i:
temp_bin.append(list(kMeansData["variable"])[j])
clusteredNodes.append(temp_bin)
return clusteredNodes
def getSimilarityLabels(clusteredNodes, variable_array):
labels = []
nlp = spacy.load('en_core_web_md')
count = 0
for variable in variable_array:
logging.info("Comparing Variable No: " + str(count))
count += 1
variableToken = nlp(variable)
highest_average = -9000
label = 0
for clusterNo in range(len(clusteredNodes)):
average = 0
for node in clusteredNodes[clusterNo]:
nodeToken = nlp(node)
average += variableToken.similarity(nodeToken)
average /= len(clusteredNodes[clusterNo])
if average > highest_average:
highest_average = average
label = clusterNo
labels.append(label)
return labels
def calculateKMeansAccuracy():
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(KMEANS_PREDICTED_CSV_PATH)
labeled = list(labeledData["KMeansLabels"])
predicted = list(predictedData["cluster"])
count = 0
for i in range(len(predicted)):
if labeled[i] == predicted[i]:
count += 1
logging.info("KMeans Accuracy is : " + str(float(count/len(predicted))))
def calculateSimAccuracy():
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(PREDICTED_UMAP_CSV_PATH)
labeled = list(labeledData["DBSCANLabels"])
predicted = list(predictedData["cluster"])
count = 0
for i in range(len(predicted)):
if labeled[i] == predicted[i]:
count += 1
logging.info("Similar Cluster Assignment Accuracy is : " + str(float(count/len(predicted))))
def runKMeansExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
kmeansTrainData = list(umapData["node"])
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
variableTokenList = createWord2Vec(variable_array)
trainTokenList = createWord2Vec(kmeansTrainData)
print(len(trainTokenList))
K_size = max(list(umapData["umapLabels"]))
trainLabels, predictedLabels = useKmeans(trainTokenList, K_size, variableTokenList)
writeCSV(kmeansTrainData, trainLabels, KMEANS_CLUSTER_LABEL_CSV_PATH)
writeCSV(variable_array, predictedLabels, KMEANS_PREDICTED_CSV_PATH)
calculateKMeansAccuracy()
def runUMapSimilarityExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
clusteredNodes = groupNodesByCluster(umapData)
labels = getSimilarityLabels(clusteredNodes, variable_array)
writeCSV(variable_array, labels, PREDICTED_UMAP_CSV_PATH)
calculateSimAccuracy()
def getAverageSimilarity(variable_array, clusteredNodes, predictedLabels):
nlp = spacy.load('en_core_web_md')
averageSimArray = []
for i in range(len(variable_array)):
averageSim = 0
for word in clusteredNodes[predictedLabels[i]]:
token1 = nlp(word)
token2 = nlp(variable_array[i])
averageSim += token1.similarity(token2)
averageSimArray.append(float(averageSim/ len(clusteredNodes[predictedLabels[i]])))
return averageSimArray
def runCombinationExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
kmeansTrainData = list(umapData["node"])
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
variableTokenList = createWord2Vec(variable_array)
trainTokenList = createWord2Vec(kmeansTrainData)
K_size = max(list(umapData["umapLabels"]))
trainLabels, predictedLabels = useKmeans(trainTokenList, K_size, variableTokenList)
writeCSV(kmeansTrainData, trainLabels, KMEANS_CLUSTER_LABEL_CSV_PATH)
clusteredNodes = groupNodesByKMeansCluster(pd.read_csv(KMEANS_CLUSTER_LABEL_CSV_PATH))
averageSimArray = getAverageSimilarity(variable_array, clusteredNodes, predictedLabels)
writeCSV(variable_array, predictedLabels, KMEANS_PREDICTED_CSV_PATH)
graphCombinationExp(averageSimArray)
return averageSimArray
def graphCombinationExp(averageSimArray):
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(KMEANS_CLUSTER_TRUTH_CSV_PATH)
labeled = list(labeledData["KMeansLabels"])
predicted = list(predictedData["cluster"])
thresholdArray = []
accuracy = []
numberOfAssignments = []
threshold = .01
while threshold < .95:
assignmentCount = 0
denominatorCount = 0
for i in range(len(predicted)):
if averageSimArray[i] > threshold:
denominatorCount += 1
if labeled[i] == predicted[i] and averageSimArray[i] > threshold:
assignmentCount += 1
if denominatorCount != 0:
accuracy.append(float(assignmentCount/denominatorCount))
else:
accuracy.append(1.0)
numberOfAssignments.append(float(assignmentCount/len(predicted)))
thresholdArray.append(threshold)
threshold += .02
numberOfAssignments = np.divide(np.asarray(numberOfAssignments), numberOfAssignments[0])
plt.figure(0)
plt.title("Accuracy vs Normalized True Assignments")
plt.plot(thresholdArray, accuracy, color="blue", label="Accuracy")
plt.plot(thresholdArray, numberOfAssignments, color="orange", label="Normalized True Assigns" )
plt.legend(loc="upper right")
plt.xticks(np.arange(0, 1, step=0.1))
plt.xlabel("Similarity Threshold")
plt.ylabel("Normalized Values")
idx = np.argwhere(np.diff(np.sign(numberOfAssignments - accuracy))).flatten()
plt.plot(thresholdArray[int(idx)], numberOfAssignments[int(idx)], 'ro')
logging.info("Intersection Threshold is: " + str(thresholdArray[int(idx)]))
| [
"logging.basicConfig",
"logging.getLogger",
"sklearn.cluster.KMeans",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"spacy.load",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"sklearn.cluster.DBSCAN",
"matplotlib.pyplot.figure",
"numpy.sign",
"uma... | [((234, 307), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(message)s', level=logging.INFO)\n", (253, 307), False, 'import logging\n'), ((784, 812), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (794, 812), False, 'import spacy\n'), ((935, 956), 'numpy.asarray', 'np.asarray', (['tokenList'], {}), '(tokenList)\n', (945, 956), True, 'import numpy as np\n'), ((1483, 1510), 'logging.info', 'logging.info', (['"""Writing CSV"""'], {}), "('Writing CSV')\n", (1495, 1510), False, 'import logging\n'), ((2126, 2154), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (2136, 2154), False, 'import spacy\n'), ((4131, 4151), 'pandas.read_csv', 'pd.read_csv', (['svoFile'], {}), '(svoFile)\n', (4142, 4151), True, 'import pandas as pd\n'), ((6028, 6070), 'logging.info', 'logging.info', (["('generating CSV ' + fileName)"], {}), "('generating CSV ' + fileName)\n", (6040, 6070), False, 'import logging\n'), ((7298, 7326), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (7308, 7326), False, 'import spacy\n'), ((8048, 8084), 'pandas.read_csv', 'pd.read_csv', (['JULIA_VARIABLE_CSV_PATH'], {}), '(JULIA_VARIABLE_CSV_PATH)\n', (8059, 8084), True, 'import pandas as pd\n'), ((8105, 8143), 'pandas.read_csv', 'pd.read_csv', (['KMEANS_PREDICTED_CSV_PATH'], {}), '(KMEANS_PREDICTED_CSV_PATH)\n', (8116, 8143), True, 'import pandas as pd\n'), ((8479, 8515), 'pandas.read_csv', 'pd.read_csv', (['JULIA_VARIABLE_CSV_PATH'], {}), '(JULIA_VARIABLE_CSV_PATH)\n', (8490, 8515), True, 'import pandas as pd\n'), ((8536, 8572), 'pandas.read_csv', 'pd.read_csv', (['PREDICTED_UMAP_CSV_PATH'], {}), '(PREDICTED_UMAP_CSV_PATH)\n', (8547, 8572), True, 'import pandas as pd\n'), ((8921, 8957), 'pandas.read_csv', 'pd.read_csv', (['JULIA_VARIABLE_CSV_PATH'], {}), '(JULIA_VARIABLE_CSV_PATH)\n', (8932, 8957), True, 'import pandas as pd\n'), ((8973, 9008), 'pandas.read_csv', 'pd.read_csv', (['CLUSTER_LABEL_CSV_PATH'], {}), '(CLUSTER_LABEL_CSV_PATH)\n', (8984, 9008), True, 'import pandas as pd\n'), ((9716, 9752), 'pandas.read_csv', 'pd.read_csv', (['JULIA_VARIABLE_CSV_PATH'], {}), '(JULIA_VARIABLE_CSV_PATH)\n', (9727, 9752), True, 'import pandas as pd\n'), ((9768, 9803), 'pandas.read_csv', 'pd.read_csv', (['CLUSTER_LABEL_CSV_PATH'], {}), '(CLUSTER_LABEL_CSV_PATH)\n', (9779, 9803), True, 'import pandas as pd\n'), ((10253, 10281), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (10263, 10281), False, 'import spacy\n'), ((10722, 10758), 'pandas.read_csv', 'pd.read_csv', (['JULIA_VARIABLE_CSV_PATH'], {}), '(JULIA_VARIABLE_CSV_PATH)\n', (10733, 10758), True, 'import pandas as pd\n'), ((10774, 10809), 'pandas.read_csv', 'pd.read_csv', (['CLUSTER_LABEL_CSV_PATH'], {}), '(CLUSTER_LABEL_CSV_PATH)\n', (10785, 10809), True, 'import pandas as pd\n'), ((11723, 11759), 'pandas.read_csv', 'pd.read_csv', (['JULIA_VARIABLE_CSV_PATH'], {}), '(JULIA_VARIABLE_CSV_PATH)\n', (11734, 11759), True, 'import pandas as pd\n'), ((11780, 11822), 'pandas.read_csv', 'pd.read_csv', (['KMEANS_CLUSTER_TRUTH_CSV_PATH'], {}), '(KMEANS_CLUSTER_TRUTH_CSV_PATH)\n', (11791, 11822), True, 'import pandas as pd\n'), ((12727, 12740), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (12737, 12740), True, 'import matplotlib.pyplot as plt\n'), ((12745, 12797), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy vs Normalized True Assignments"""'], {}), "('Accuracy vs Normalized True Assignments')\n", (12754, 12797), True, 'import matplotlib.pyplot as plt\n'), ((12802, 12868), 'matplotlib.pyplot.plot', 'plt.plot', (['thresholdArray', 'accuracy'], {'color': '"""blue"""', 'label': '"""Accuracy"""'}), "(thresholdArray, accuracy, color='blue', label='Accuracy')\n", (12810, 12868), True, 'import matplotlib.pyplot as plt\n'), ((12873, 12972), 'matplotlib.pyplot.plot', 'plt.plot', (['thresholdArray', 'numberOfAssignments'], {'color': '"""orange"""', 'label': '"""Normalized True Assigns"""'}), "(thresholdArray, numberOfAssignments, color='orange', label=\n 'Normalized True Assigns')\n", (12881, 12972), True, 'import matplotlib.pyplot as plt\n'), ((12973, 13002), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (12983, 13002), True, 'import matplotlib.pyplot as plt\n'), ((13049, 13083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Similarity Threshold"""'], {}), "('Similarity Threshold')\n", (13059, 13083), True, 'import matplotlib.pyplot as plt\n'), ((13088, 13119), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized Values"""'], {}), "('Normalized Values')\n", (13098, 13119), True, 'import matplotlib.pyplot as plt\n'), ((308, 327), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (325, 327), False, 'import logging\n'), ((1033, 1054), 'numpy.asarray', 'np.asarray', (['tokenList'], {}), '(tokenList)\n', (1043, 1054), True, 'import numpy as np\n'), ((1104, 1125), 'numpy.asarray', 'np.asarray', (['tokenList'], {}), '(tokenList)\n', (1114, 1125), True, 'import numpy as np\n'), ((1264, 1286), 'numpy.asarray', 'np.asarray', (['db.labels_'], {}), '(db.labels_)\n', (1274, 1286), True, 'import numpy as np\n'), ((1288, 1315), 'numpy.asarray', 'np.asarray', (['db_umap.labels_'], {}), '(db_umap.labels_)\n', (1298, 1315), True, 'import numpy as np\n'), ((5678, 5704), 'numpy.asarray', 'np.asarray', (['trainTokenList'], {}), '(trainTokenList)\n', (5688, 5704), True, 'import numpy as np\n'), ((11380, 11422), 'pandas.read_csv', 'pd.read_csv', (['KMEANS_CLUSTER_LABEL_CSV_PATH'], {}), '(KMEANS_CLUSTER_LABEL_CSV_PATH)\n', (11391, 11422), True, 'import pandas as pd\n'), ((12666, 12697), 'numpy.asarray', 'np.asarray', (['numberOfAssignments'], {}), '(numberOfAssignments)\n', (12676, 12697), True, 'import numpy as np\n'), ((13018, 13043), 'numpy.arange', 'np.arange', (['(0)', '(1)'], {'step': '(0.1)'}), '(0, 1, step=0.1)\n', (13027, 13043), True, 'import numpy as np\n'), ((998, 1028), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.3)', 'min_samples': '(2)'}), '(eps=0.3, min_samples=2)\n', (1004, 1028), False, 'from sklearn.cluster import DBSCAN\n'), ((1073, 1099), 'umap.UMAP', 'umap.UMAP', ([], {'random_state': '(42)'}), '(random_state=42)\n', (1082, 1099), False, 'import umap\n'), ((1198, 1228), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.3)', 'min_samples': '(2)'}), '(eps=0.3, min_samples=2)\n', (1204, 1228), False, 'from sklearn.cluster import DBSCAN\n'), ((5647, 5673), 'umap.UMAP', 'umap.UMAP', ([], {'random_state': '(42)'}), '(random_state=42)\n', (5656, 5673), False, 'import umap\n'), ((5840, 5881), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'K_size', 'random_state': '(0)'}), '(n_clusters=K_size, random_state=0)\n', (5846, 5881), False, 'from sklearn.cluster import KMeans\n'), ((13150, 13189), 'numpy.sign', 'np.sign', (['(numberOfAssignments - accuracy)'], {}), '(numberOfAssignments - accuracy)\n', (13157, 13189), True, 'import numpy as np\n')] |
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to construct DELF feature extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from PIL import Image
from delf import feature_extractor
# Minimum dimensions below which DELF features are not extracted (empty
# features are returned). This applies after any resizing is performed.
_MIN_HEIGHT = 10
_MIN_WIDTH = 10
def ResizeImage(image, config):
"""Resizes image according to config.
Args:
image: Uint8 array with shape (height, width, 3).
config: DelfConfig proto containing the model configuration.
Returns:
resized_image: Uint8 array with resized image.
scale_factor: Float with factor used for resizing (If upscaling, larger than
1; if downscaling, smaller than 1).
Raises:
ValueError: If `image` has incorrect number of dimensions/channels.
"""
if image.ndim != 3:
raise ValueError('image has incorrect number of dimensions: %d' %
image.ndims)
height, width, channels = image.shape
if channels != 3:
raise ValueError('image has incorrect number of channels: %d' % channels)
if config.max_image_size != -1 and (width > config.max_image_size or
height > config.max_image_size):
scale_factor = config.max_image_size / max(width, height)
elif config.min_image_size != -1 and (width < config.min_image_size and
height < config.min_image_size):
scale_factor = config.min_image_size / max(width, height)
else:
# No resizing needed, early return.
return image, 1.0
new_shape = (int(width * scale_factor), int(height * scale_factor))
pil_image = Image.fromarray(image)
resized_image = np.array(pil_image.resize(new_shape, resample=Image.BILINEAR))
return resized_image, scale_factor
def MakeExtractor(sess, config, import_scope=None):
"""Creates a function to extract features from an image.
Args:
sess: TensorFlow session to use.
config: DelfConfig proto containing the model configuration.
import_scope: Optional scope to use for model.
Returns:
Function that receives an image and returns features.
"""
tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING],
config.model_path,
import_scope=import_scope)
import_scope_prefix = import_scope + '/' if import_scope is not None else ''
input_image = sess.graph.get_tensor_by_name('%sinput_image:0' %
import_scope_prefix)
input_score_threshold = sess.graph.get_tensor_by_name('%sinput_abs_thres:0' %
import_scope_prefix)
input_image_scales = sess.graph.get_tensor_by_name('%sinput_scales:0' %
import_scope_prefix)
input_max_feature_num = sess.graph.get_tensor_by_name(
'%sinput_max_feature_num:0' % import_scope_prefix)
boxes = sess.graph.get_tensor_by_name('%sboxes:0' % import_scope_prefix)
raw_descriptors = sess.graph.get_tensor_by_name('%sfeatures:0' %
import_scope_prefix)
feature_scales = sess.graph.get_tensor_by_name('%sscales:0' %
import_scope_prefix)
attention_with_extra_dim = sess.graph.get_tensor_by_name('%sscores:0' %
import_scope_prefix)
attention = tf.reshape(attention_with_extra_dim,
[tf.shape(attention_with_extra_dim)[0]])
locations, descriptors = feature_extractor.DelfFeaturePostProcessing(
boxes, raw_descriptors, config)
def ExtractorFn(image):
"""Receives an image and returns DELF features.
If image is too small, returns empty set of features.
Args:
image: Uint8 array with shape (height, width, 3) containing the RGB image.
Returns:
Tuple (locations, descriptors, feature_scales, attention)
"""
resized_image, scale_factor = ResizeImage(image, config)
# If the image is too small, returns empty features.
if resized_image.shape[0] < _MIN_HEIGHT or resized_image.shape[
1] < _MIN_WIDTH:
return np.array([]), np.array([]), np.array([]), np.array([])
(locations_out, descriptors_out, feature_scales_out,
attention_out) = sess.run(
[locations, descriptors, feature_scales, attention],
feed_dict={
input_image: resized_image,
input_score_threshold: config.delf_local_config.score_threshold,
input_image_scales: list(config.image_scales),
input_max_feature_num: config.delf_local_config.max_feature_num
})
rescaled_locations_out = locations_out / scale_factor
return (rescaled_locations_out, descriptors_out, feature_scales_out,
attention_out)
return ExtractorFn
| [
"PIL.Image.fromarray",
"tensorflow.shape",
"tensorflow.saved_model.loader.load",
"numpy.array",
"delf.feature_extractor.DelfFeaturePostProcessing"
] | [((2438, 2460), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (2453, 2460), False, 'from PIL import Image\n'), ((2933, 3055), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['sess', '[tf.saved_model.tag_constants.SERVING]', 'config.model_path'], {'import_scope': 'import_scope'}), '(sess, [tf.saved_model.tag_constants.SERVING],\n config.model_path, import_scope=import_scope)\n', (2959, 3055), True, 'import tensorflow as tf\n'), ((4348, 4423), 'delf.feature_extractor.DelfFeaturePostProcessing', 'feature_extractor.DelfFeaturePostProcessing', (['boxes', 'raw_descriptors', 'config'], {}), '(boxes, raw_descriptors, config)\n', (4391, 4423), False, 'from delf import feature_extractor\n'), ((4280, 4314), 'tensorflow.shape', 'tf.shape', (['attention_with_extra_dim'], {}), '(attention_with_extra_dim)\n', (4288, 4314), True, 'import tensorflow as tf\n'), ((4972, 4984), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4980, 4984), True, 'import numpy as np\n'), ((4986, 4998), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4994, 4998), True, 'import numpy as np\n'), ((5000, 5012), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5008, 5012), True, 'import numpy as np\n'), ((5014, 5026), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5022, 5026), True, 'import numpy as np\n')] |
"""This module contains functions that visualise solar agent control."""
from __future__ import annotations
from typing import Tuple, Dict, List
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from solara.plot.constants import COLORS, LABELS, MARKERS
def default_setup(figsize=None) -> None:
"""Setup default matplotlib settings."""
if figsize is None:
figsize = (6, 3)
plt.figure(figsize=figsize, dpi=100, tight_layout=True)
sns.set_style("ticks", {"dashes": False})
sns.set_context("paper")
def plot_episode(
data: Dict[str, np.array],
colors: Dict[str, str] = None,
labels: Dict[str, str] = None,
markers: Dict[str, str] = None,
selected_keys: List[str] = None,
num_timesteps: int = 25,
iteration: int = None,
title: str = "Episode Trajectory",
y_max: float = 4,
y_min: float = -2.5,
show_grid: bool = True,
figsize: Tuple = (4.62, 3),
rewards_key: str = "rewards",
dpi: int = 100,
include_episode_stats: bool = True,
):
"""Plot a single episode of battery control problem."""
# default_setup()
matplotlib.rc("text", usetex=True)
if colors is None:
colors = COLORS
if labels is None:
labels = LABELS
if markers is None:
markers = MARKERS
x = np.arange(0, num_timesteps)
if rewards_key in data.keys():
episode_reward = sum(data[rewards_key])
else:
episode_reward = None
# Setting up the figure
_, ax = plt.subplots(figsize=figsize, dpi=dpi)
ax.set_xticks([0, 5, 10, 15, 20, 23], minor=False)
ax.set_xticks(x, minor=True)
ax.set_xticklabels([0, 5, 10, 15, 20, 23], minor=False)
if show_grid:
ax.yaxis.grid(True, which="major")
ax.xaxis.grid(True, which="major")
ax.xaxis.grid(True, which="minor")
# ax.set_prop_cycle("color", colors)
# Plotting the data
for name, values in data.items():
if selected_keys is None or name in selected_keys:
if name in colors.keys():
color = colors[name]
else:
color = None
if name in labels.keys():
label = labels[name]
else:
label = name
if name in markers.keys():
marker = markers[name]
else:
marker = "."
label = label.replace("$", "\\$")
ax.plot(values, label=label, marker=marker, color=color)
if title is not None:
if iteration is not None:
iteration_str = "Iteration {:2.0f}, ".format(iteration)
else:
iteration_str = ""
if episode_reward is not None:
title += " ({}Overall reward: {:.3f})".format(
iteration_str, episode_reward
)
plt.title(title)
plt.ylabel("kW / kWh / other")
plt.xlabel("Time step")
# Adding overall data
if "power_diff" in data:
power_diff_sum = float(sum(data["power_diff"]))
else:
power_diff_sum = 0
handles, _ = ax.get_legend_handles_labels()
if include_episode_stats:
ep_summary_stats = (
# "\\rule{{67pt}}{{0.25pt}}"
"\n \\textbf{{Episode statistics}}"
"\n Sum of rewards: {:>8.3f} \\\\"
"\n Sum of costs: {:>15.3f} \\\\"
"\n Sum of penalties: {:>11.3f}"
).format(
float(sum(data["rewards"])),
float(sum(data["cost"])),
power_diff_sum,
)
handles.append(matplotlib.patches.Patch(color="none", label=ep_summary_stats))
plt.legend(
bbox_to_anchor=(1.02, 1.025),
loc="upper left",
edgecolor="grey",
handles=handles,
# title="\\textbf{{Legend}}",
)
plt.ylim(ymin=y_min, ymax=y_max)
# plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"seaborn.set_context",
"seaborn.set_style",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"matplotlib.patches.Patch",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"numpy.ara... | [((439, 494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(100)', 'tight_layout': '(True)'}), '(figsize=figsize, dpi=100, tight_layout=True)\n', (449, 494), True, 'import matplotlib.pyplot as plt\n'), ((499, 540), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""', "{'dashes': False}"], {}), "('ticks', {'dashes': False})\n", (512, 540), True, 'import seaborn as sns\n'), ((545, 569), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (560, 569), True, 'import seaborn as sns\n'), ((1151, 1185), 'matplotlib.rc', 'matplotlib.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1164, 1185), False, 'import matplotlib\n'), ((1342, 1369), 'numpy.arange', 'np.arange', (['(0)', 'num_timesteps'], {}), '(0, num_timesteps)\n', (1351, 1369), True, 'import numpy as np\n'), ((1535, 1573), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (1547, 1573), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2916), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""kW / kWh / other"""'], {}), "('kW / kWh / other')\n", (2896, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2921, 2944), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time step"""'], {}), "('Time step')\n", (2931, 2944), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3756), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.02, 1.025)', 'loc': '"""upper left"""', 'edgecolor': '"""grey"""', 'handles': 'handles'}), "(bbox_to_anchor=(1.02, 1.025), loc='upper left', edgecolor='grey',\n handles=handles)\n", (3669, 3756), True, 'import matplotlib.pyplot as plt\n'), ((3835, 3867), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': 'y_min', 'ymax': 'y_max'}), '(ymin=y_min, ymax=y_max)\n', (3843, 3867), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2881), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2874, 2881), True, 'import matplotlib.pyplot as plt\n'), ((3590, 3652), 'matplotlib.patches.Patch', 'matplotlib.patches.Patch', ([], {'color': '"""none"""', 'label': 'ep_summary_stats'}), "(color='none', label=ep_summary_stats)\n", (3614, 3652), False, 'import matplotlib\n')] |
"""Tests for plotting."""
import contextlib
import io
import warnings
import matplotlib.axes
import matplotlib.collections
import matplotlib.figure
import matplotlib.legend
import matplotlib.lines
import matplotlib.pyplot as plt
import numpy as np
import os
import unittest
import aspecd.exceptions
from aspecd import plotting, utils, dataset
class TestPlotter(unittest.TestCase):
def setUp(self):
self.plotter = plotting.Plotter()
self.filename = 'Testfile.png'
def tearDown(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_has_plot_method(self):
self.assertTrue(hasattr(self.plotter, 'plot'))
self.assertTrue(callable(self.plotter.plot))
def test_name_property_equals_full_class_name(self):
full_class_name = utils.full_class_name(self.plotter)
self.assertEqual(self.plotter.name, full_class_name)
def test_has_parameters_property(self):
self.assertTrue(hasattr(self.plotter, 'parameters'))
def test_parameters_property_is_dict(self):
self.assertTrue(isinstance(self.plotter.parameters, dict))
def test_has_properties_property(self):
self.assertTrue(hasattr(self.plotter, 'properties'))
def test_has_description_property(self):
self.assertTrue(hasattr(self.plotter, 'description'))
def test_description_property_is_string(self):
self.assertTrue(isinstance(self.plotter.description, str))
def test_has_figure_property(self):
self.assertTrue(hasattr(self.plotter, 'figure'))
def test_has_fig_property(self):
self.assertTrue(hasattr(self.plotter, 'fig'))
def test_fig_property_and_figure_property_are_identical(self):
self.assertTrue(self.plotter.figure is self.plotter.fig)
def test_has_axes_property(self):
self.assertTrue(hasattr(self.plotter, 'axes'))
def test_has_ax_property(self):
self.assertTrue(hasattr(self.plotter, 'axes'))
def test_ax_property_and_axes_property_are_identical(self):
self.assertTrue(self.plotter.axes is self.plotter.ax)
def test_has_filename_property(self):
self.assertTrue(hasattr(self.plotter, 'filename'))
def test_has_caption_property(self):
self.assertTrue(hasattr(self.plotter, 'caption'))
def test_has_style_property(self):
self.assertTrue(hasattr(self.plotter, 'style'))
def test_has_save_method(self):
self.assertTrue(hasattr(self.plotter, 'save'))
self.assertTrue(callable(self.plotter.save))
def test_plot_sets_figure_property(self):
self.plotter.plot()
self.assertTrue(isinstance(self.plotter.figure,
matplotlib.figure.Figure))
plt.close(self.plotter.figure)
def test_plot_sets_fig_property(self):
self.plotter.plot()
self.assertTrue(isinstance(self.plotter.fig, matplotlib.figure.Figure))
plt.close(self.plotter.fig)
def test_plot_sets_axes_property(self):
self.plotter.plot()
self.assertTrue(isinstance(self.plotter.axes, matplotlib.axes.Axes))
plt.close(self.plotter.figure)
def test_plot_sets_ax_property(self):
self.plotter.plot()
self.assertTrue(isinstance(self.plotter.ax, matplotlib.axes.Axes))
plt.close(self.plotter.figure)
def test_plot_sets_no_new_figure_property_if_existing(self):
fig, ax = plt.subplots()
self.plotter.figure = fig
self.plotter.axes = ax
self.plotter.plot()
self.assertIs(fig, self.plotter.figure)
def test_plot_sets_no_new_axes_property_if_existing(self):
fig, ax = plt.subplots()
self.plotter.figure = fig
self.plotter.axes = ax
self.plotter.plot()
self.assertIs(ax, self.plotter.axes)
def test_save_without_saver_raises(self):
with self.assertRaises(aspecd.exceptions.MissingSaverError):
self.plotter.save()
def test_save_returns_saver(self):
saver = plotting.Saver()
saver.filename = self.filename
self.plotter.plot()
returned_saver = self.plotter.save(saver)
self.assertTrue(isinstance(returned_saver, plotting.Saver))
def test_save_sets_plot_in_saver(self):
saver = plotting.Saver()
saver.filename = self.filename
self.plotter.plot()
returned_saver = self.plotter.save(saver)
self.assertEqual(returned_saver.plotter, self.plotter)
def test_save_sets_filename(self):
saver = plotting.Saver()
saver.filename = self.filename
self.plotter.plot()
self.plotter.save(saver)
self.assertEqual(self.filename, self.plotter.filename)
def test_plot_applies_properties(self):
self.plotter.properties.figure.dpi = 300.0
self.plotter.plot()
self.assertEqual(self.plotter.properties.figure.dpi,
self.plotter.figure.dpi)
def test_plot_with_unknown_style_raises(self):
self.plotter.style = 'foo'
with self.assertRaises(aspecd.exceptions.StyleNotFoundError):
self.plotter.plot()
def test_plot_adds_zero_lines(self):
self.plotter.parameters['show_zero_lines'] = True
self.plotter.plot()
self.assertEqual(2, len(self.plotter.ax.get_lines()))
def test_plot_without_zero_lines_does_not_add_zero_lines(self):
self.plotter.parameters['show_zero_lines'] = False
self.plotter.plot()
self.assertEqual(0, len(self.plotter.ax.get_lines()))
def test_plot_applies_properties_to_zero_lines(self):
self.plotter.parameters['show_zero_lines'] = True
self.plotter.properties.zero_lines.color = '#999'
self.plotter.plot()
self.assertEqual(self.plotter.properties.zero_lines.color,
self.plotter.ax.get_lines()[0]._color)
class TestSinglePlotter(unittest.TestCase):
def setUp(self):
self.plotter = plotting.SinglePlotter()
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_has_drawing_property(self):
self.assertTrue(hasattr(self.plotter, 'drawing'))
def test_plot_without_dataset_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDatasetError):
self.plotter.plot()
def test_plot_with_preset_dataset(self):
self.plotter.dataset = dataset.Dataset()
self.plotter.plot()
def test_plot_from_dataset_sets_dataset(self):
test_dataset = dataset.Dataset()
plotter = test_dataset.plot(self.plotter)
self.assertTrue(isinstance(plotter.dataset, dataset.Dataset))
def test_plot_with_dataset(self):
test_dataset = dataset.Dataset()
self.plotter.plot(dataset=test_dataset)
self.assertGreater(len(test_dataset.representations), 0)
def test_plot_with_dataset_sets_axes_labels(self):
test_dataset = dataset.Dataset()
test_dataset.data.axes[0].quantity = 'foo'
test_dataset.data.axes[0].unit = 'bar'
test_dataset.data.axes[1].quantity = 'foo'
test_dataset.data.axes[1].unit = 'bar'
xlabel = '$' + test_dataset.data.axes[0].quantity + '$' + ' / ' + \
test_dataset.data.axes[0].unit
ylabel = '$' + test_dataset.data.axes[1].quantity + '$' + ' / ' + \
test_dataset.data.axes[1].unit
plotter = test_dataset.plot(self.plotter)
self.assertEqual(xlabel, plotter.axes.get_xlabel())
self.assertEqual(ylabel, plotter.axes.get_ylabel())
def test_axes_labels_with_empty_unit_without_slash(self):
test_dataset = dataset.Dataset()
test_dataset.data.axes[0].quantity = 'foo'
test_dataset.data.axes[0].unit = ''
test_dataset.data.axes[1].quantity = 'foo'
test_dataset.data.axes[1].unit = ''
xlabel = '$' + test_dataset.data.axes[0].quantity + '$'
ylabel = '$' + test_dataset.data.axes[1].quantity + '$'
plotter = test_dataset.plot(self.plotter)
self.assertEqual(xlabel, plotter.axes.get_xlabel())
self.assertEqual(ylabel, plotter.axes.get_ylabel())
def test_plot_returns_dataset(self):
test_dataset = self.plotter.plot(dataset=dataset.Dataset())
self.assertTrue(isinstance(test_dataset, dataset.Dataset))
def test_plot_checks_applicability(self):
class MyPlotter(aspecd.plotting.SinglePlotter):
@staticmethod
def applicable(dataset):
return False
dataset = aspecd.dataset.Dataset()
plotter = MyPlotter()
with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError):
dataset.plot(plotter)
def test_plot_check_applicability_prints_helpful_message(self):
class MyPlotter(aspecd.plotting.SinglePlotter):
@staticmethod
def applicable(dataset):
return False
dataset = aspecd.dataset.Dataset()
dataset.id = "foo"
plotter = MyPlotter()
message = "MyPlotter not applicable to dataset with id foo"
with self.assertRaisesRegex(
aspecd.exceptions.NotApplicableToDatasetError, message):
dataset.plot(plotter)
class TestSinglePlotter1D(unittest.TestCase):
def setUp(self):
self.plotter = plotting.SinglePlotter1D()
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_has_type_property(self):
self.assertTrue(hasattr(self.plotter, 'type'))
def test_set_type(self):
plot_type = 'scatter'
self.plotter.type = plot_type
self.assertEqual(self.plotter.type, plot_type)
def test_setting_wrong_type_raises(self):
with self.assertRaises(TypeError):
self.plotter.type = 'foo'
def test_plot_sets_drawing(self):
self.plotter.plot(dataset=dataset.Dataset())
self.assertTrue(self.plotter.drawing)
def test_plot_with_2D_data_raises(self):
dataset_ = dataset.Dataset()
dataset_.data.data = np.random.rand(3, 2)
with self.assertRaises(
aspecd.exceptions.NotApplicableToDatasetError):
self.plotter.plot(dataset_)
def test_set_line_colour_from_dict(self):
line_colour = '#cccccc'
properties = {'drawing': {'color': line_colour}}
self.plotter.properties.from_dict(properties)
self.assertEqual(line_colour, self.plotter.properties.drawing.color)
def test_plot_sets_correct_line_color(self):
color = '#cccccc'
dict_ = {'drawing': {'color': color}}
self.plotter.properties.from_dict(dict_)
self.plotter.plot(dataset=dataset.Dataset())
self.assertEqual(color, self.plotter.drawing.get_color())
def test_plot_sets_axes_xlabel(self):
label = 'foo bar'
dict_ = {'axes': {'xlabel': label}}
self.plotter.properties.from_dict(dict_)
self.plotter.plot(dataset=dataset.Dataset())
self.assertEqual(label, self.plotter.axes.get_xlabel())
def test_plot_adds_no_x_zero_line_if_out_of_range(self):
self.plotter.parameters['show_zero_lines'] = True
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([10])+5
plotter = dataset_.plot(self.plotter)
self.assertEqual([0., 0.], plotter.ax.get_lines()[1].get_xdata())
def test_plot_adds_no_y_zero_line_if_out_of_range(self):
self.plotter.parameters['show_zero_lines'] = True
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([10])-0.5
dataset_.data.axes[0].values = np.linspace(4, 5, 10)
plotter = dataset_.plot(self.plotter)
self.assertEqual([0., 0.], plotter.ax.get_lines()[1].get_ydata())
def test_plot_with_show_legend_sets_legend_label(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([10])-0.5
dataset_.data.axes[0].values = np.linspace(4, 5, 10)
dataset_.label = 'foo'
self.plotter.parameters['show_legend'] = True
plotter = dataset_.plot(self.plotter)
self.assertEqual(dataset_.label,
plotter.legend.get_texts()[0].get_text())
def test_axes_tight_x_sets_xlim_to_data_limits(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([100])
dataset_.data.axes[0].values = np.linspace(np.pi, 2*np.pi, 100)
self.plotter.parameters['tight'] = 'x'
plotter = dataset_.plot(self.plotter)
self.assertEqual(dataset_.data.axes[0].values[0],
plotter.axes.get_xlim()[0])
def test_axes_tight_y_sets_xlim_to_data_limits(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([100])
dataset_.data.axes[0].values = np.linspace(np.pi, 2*np.pi, 100)
self.plotter.parameters['tight'] = 'y'
plotter = dataset_.plot(self.plotter)
self.assertEqual(dataset_.data.data.min(),
plotter.axes.get_ylim()[0])
def test_axes_tight_both_sets_xlim_and_ylim_to_data_limits(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([100])
dataset_.data.axes[0].values = np.linspace(np.pi, 2*np.pi, 100)
self.plotter.parameters['tight'] = 'both'
plotter = dataset_.plot(self.plotter)
self.assertEqual(dataset_.data.axes[0].values[0],
plotter.axes.get_xlim()[0])
self.assertEqual(dataset_.data.data.min(),
plotter.axes.get_ylim()[0])
class TestSinglePlotter2D(unittest.TestCase):
def setUp(self):
self.plotter = plotting.SinglePlotter2D()
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_plot_with_1D_dataset_raises(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5])
with self.assertRaises(
aspecd.exceptions.NotApplicableToDatasetError):
dataset_.plot(self.plotter)
def test_has_type_property(self):
self.assertTrue(hasattr(self.plotter, 'type'))
def test_set_type(self):
plot_type = 'contour'
self.plotter.type = plot_type
self.assertEqual(self.plotter.type, plot_type)
def test_setting_wrong_type_raises(self):
with self.assertRaises(TypeError):
self.plotter.type = 'foo'
def test_plot_sets_drawing(self):
dataset_ = dataset.Dataset()
dataset_.data.data = np.random.rand(3, 2)
self.plotter.plot(dataset=dataset_)
self.assertTrue(self.plotter.drawing)
def test_plot_with_dataset_sets_axes_labels(self):
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
xlabel = '$' + test_dataset.data.axes[0].quantity + '$' + ' / ' + \
test_dataset.data.axes[0].unit
ylabel = '$' + test_dataset.data.axes[1].quantity + '$' + ' / ' + \
test_dataset.data.axes[1].unit
plotter = test_dataset.plot(self.plotter)
self.assertEqual(xlabel, plotter.axes.get_xlabel())
self.assertEqual(ylabel, plotter.axes.get_ylabel())
def test_plot_with_dataset_sets_axes_limits(self):
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[0].values = np.linspace(5, 10, 5)
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
test_dataset.data.axes[1].values = np.linspace(50, 100, 5)
xlimits = tuple(test_dataset.data.axes[0].values[[0, -1]])
ylimits = tuple(test_dataset.data.axes[1].values[[0, -1]])
plotter = test_dataset.plot(self.plotter)
self.assertEqual(xlimits, plotter.axes.get_xlim())
self.assertEqual(ylimits, plotter.axes.get_ylim())
def test_plot_contour(self):
self.plotter.type = 'contour'
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
test_dataset.plot(self.plotter)
def test_plot_with_switched_axes(self):
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[0].values = np.linspace(5, 10, 5)
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
test_dataset.data.axes[1].values = np.linspace(50, 100, 5)
xlimits = tuple(test_dataset.data.axes[1].values[[0, -1]])
ylimits = tuple(test_dataset.data.axes[0].values[[0, -1]])
self.plotter.parameters['switch_axes'] = True
plotter = test_dataset.plot(self.plotter)
self.assertEqual(xlimits, plotter.axes.get_xlim())
self.assertEqual(ylimits, plotter.axes.get_ylim())
def test_plot_contour_with_levels(self):
self.plotter.type = 'contour'
self.plotter.parameters['levels'] = 40
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
self.assertGreaterEqual(len(plotter.drawing.levels),
self.plotter.parameters['levels'] - 5)
def test_set_cmap_from_dict(self):
cmap = 'RdGy'
properties = {'drawing': {'cmap': cmap}}
self.plotter.properties.from_dict(properties)
self.assertEqual(cmap, self.plotter.properties.drawing.cmap)
def test_plot_sets_correct_cmap(self):
cmap = 'RdGy'
dict_ = {'drawing': {'cmap': cmap}}
self.plotter.properties.from_dict(dict_)
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
self.plotter.plot(dataset=test_dataset)
self.assertEqual(cmap, self.plotter.drawing.cmap.name)
def test_plot_imshow_with_levels_ignores_levels(self):
self.plotter.parameters['levels'] = 40
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
test_dataset.plot(self.plotter)
def test_plot_imshow_sets_aspect_to_auto(self):
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
test_dataset.plot(self.plotter)
self.assertEqual('auto', self.plotter.ax._aspect)
def test_show_contour_lines_plots_contour_lines_in_contourf(self):
self.plotter.type = 'contourf'
self.plotter.parameters['show_contour_lines'] = True
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
line_collection = [isinstance(x, matplotlib.collections.LineCollection)
for x in plotter.ax.get_children()]
self.assertTrue(any(line_collection))
def test_contour_plot_sets_correct_linewidths(self):
self.plotter.type = 'contour'
dict_ = {'drawing': {'linewidths': 2}}
self.plotter.properties.from_dict(dict_)
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
line_collection = [
x for x in plotter.ax.get_children()
if isinstance(x, matplotlib.collections.LineCollection)
]
self.assertEqual(dict_['drawing']['linewidths'],
line_collection[0].get_linewidths()[0])
def test_contour_plot_sets_correct_linestyles(self):
self.plotter.type = 'contour'
dict_ = {'drawing': {'linestyles': ':', 'linewidths': 1}}
self.plotter.properties.from_dict(dict_)
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
line_collection = [
x for x in plotter.ax.get_children()
if isinstance(x, matplotlib.collections.LineCollection)
]
# linestyle ':' => (0.0, [1.0, 1.65]) for linewidth = 1
self.assertEqual((0.0, [1.0, 1.65]),
line_collection[0].get_linestyles()[0])
def test_contour_plot_sets_correct_colors(self):
self.plotter.type = 'contour'
dict_ = {'drawing': {'colors': 'k'}}
self.plotter.properties.from_dict(dict_)
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
line_collection = [
x for x in plotter.ax.get_children()
if isinstance(x, matplotlib.collections.LineCollection)
]
self.assertListEqual([0., 0., 0., 1.],
list(line_collection[0].get_colors()[0]))
def test_contourf_plot_with_contour_lines_sets_correct_linewidths(self):
self.plotter.type = 'contourf'
self.plotter.parameters['show_contour_lines'] = True
dict_ = {'drawing': {'linewidths': 2}}
self.plotter.properties.from_dict(dict_)
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
line_collection = [
x for x in plotter.ax.get_children()
if isinstance(x, matplotlib.collections.LineCollection)
]
self.assertEqual(dict_['drawing']['linewidths'],
line_collection[0].get_linewidths()[0])
def test_contourf_plot_with_contour_lines_sets_correct_linestyles(self):
self.plotter.type = 'contourf'
self.plotter.parameters['show_contour_lines'] = True
dict_ = {'drawing': {'linestyles': ':', 'linewidths': 1}}
self.plotter.properties.from_dict(dict_)
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
line_collection = [
x for x in plotter.ax.get_children()
if isinstance(x, matplotlib.collections.LineCollection)
]
# linestyle ':' => (0.0, [1.0, 1.65]) for linewidth = 1
self.assertEqual((0.0, [1.0, 1.65]),
line_collection[0].get_linestyles()[0])
def test_contourf_plot_with_contour_lines_sets_correct_colors(self):
self.plotter.type = 'contourf'
self.plotter.parameters['show_contour_lines'] = True
dict_ = {'drawing': {'colors': 'k'}}
self.plotter.properties.from_dict(dict_)
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.random([5, 5])
plotter = test_dataset.plot(self.plotter)
line_collection = [
x for x in plotter.ax.get_children()
if isinstance(x, matplotlib.collections.LineCollection)
]
self.assertListEqual([0., 0., 0., 1.],
list(line_collection[0].get_colors()[0]))
class TestSinglePlotter2DStacked(unittest.TestCase):
def setUp(self):
self.plotter = plotting.SinglePlotter2DStacked()
self.filename = 'foo.pdf'
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
if os.path.exists(self.filename):
os.remove(self.filename)
def test_instantiate_class(self):
pass
def test_class_has_sensible_description(self):
self.assertIn('stack', self.plotter.description)
def test_plot_with_1D_dataset_raises(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5])
with self.assertRaises(
aspecd.exceptions.NotApplicableToDatasetError):
dataset_.plot(self.plotter)
def test_parameters_have_stacking_dimension_key(self):
self.assertIn('stacking_dimension', self.plotter.parameters)
def test_plot_consists_of_correct_number_of_lines(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
self.assertGreaterEqual(10, len(plotter.axes.get_lines()))
def test_plot_along_zero_dim_consists_of_correct_number_of_lines(self):
self.plotter.parameters['stacking_dimension'] = 0
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
self.assertGreaterEqual(5, len(plotter.axes.get_lines()))
def test_plot_stacks_plots(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
self.assertGreater(max(plotter.axes.get_lines()[5].get_ydata()),
max(plotter.axes.get_lines()[0].get_ydata())*3)
def test_plot_with_zero_offset_preserves_offset(self):
self.plotter.parameters['offset'] = 0
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
self.assertEqual(0, plotter.parameters['offset'])
def test_plot_along_zero_dim_stacks_plots(self):
self.plotter.parameters['stacking_dimension'] = 0
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
self.assertGreater(max(plotter.axes.get_lines()[4].get_ydata()),
max(plotter.axes.get_lines()[0].get_ydata())*3)
def test_plot_along_zero_dim_sets_correct_axes_labels(self):
self.plotter.parameters['stacking_dimension'] = 0
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
plotter = test_dataset.plot(self.plotter)
self.assertIn(test_dataset.data.axes[1].unit,
plotter.axes.get_xlabel())
def test_plot_sets_correct_axes_limits(self):
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[0].values = np.linspace(5, 10, 5)
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
test_dataset.data.axes[1].values = np.linspace(50, 100, 10)
plotter = test_dataset.plot(self.plotter)
xlimits = tuple(test_dataset.data.axes[0].values[[0, -1]])
self.assertLessEqual(plotter.axes.get_xlim()[0], xlimits[0])
self.assertGreaterEqual(plotter.axes.get_xlim()[1], xlimits[1])
def test_plot_along_zero_dim_sets_correct_axes_limits(self):
self.plotter.parameters['stacking_dimension'] = 0
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[0].values = np.linspace(5, 10, 5)
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
test_dataset.data.axes[1].values = np.linspace(50, 100, 10)
plotter = test_dataset.plot(self.plotter)
xlimits = tuple(test_dataset.data.axes[1].values[[0, -1]])
self.assertLessEqual(plotter.axes.get_xlim()[0], xlimits[0])
self.assertGreaterEqual(plotter.axes.get_xlim()[1], xlimits[1])
def test_plot_with_offset_stacks_plots_accordingly(self):
self.plotter.parameters['offset'] = 2
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
self.assertGreater(max(plotter.axes.get_lines()[5].get_ydata()),
max(plotter.axes.get_lines()[0].get_ydata())*10)
def test_plot_sets_drawings(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
dataset_.plot(self.plotter)
self.assertEqual(10, len(self.plotter.drawing))
def test_plot_applies_drawing_properties_to_all_drawings(self):
self.plotter.properties.drawing.color = '#aaccee'
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
self.assertEqual(self.plotter.properties.drawing.color,
plotter.axes.get_lines()[0]._color)
self.assertEqual(self.plotter.properties.drawing.color,
plotter.axes.get_lines()[4]._color)
def test_set_color_from_dict(self):
color = '#aaccee'
properties = {'drawing': {'color': color}}
self.plotter.properties.from_dict(properties)
self.assertEqual(color, self.plotter.properties.drawing.color)
def test_save_plot_with_set_color_does_not_raise(self):
self.plotter.properties.drawing.color = '#aaccee'
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
plotter = dataset_.plot(self.plotter)
saver_ = aspecd.plotting.Saver()
saver_.filename = self.filename
plotter.save(saver_)
self.assertTrue(os.path.exists(self.filename))
def test_plot_sets_correct_yticks(self):
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
test_dataset.data.axes[1].values = np.linspace(50, 100, 10)
plotter = test_dataset.plot(self.plotter)
self.assertEqual(10, len(plotter.axes.get_yticks()))
def test_plot_along_zero_dim_sets_correct_yticks(self):
self.plotter.parameters['stacking_dimension'] = 0
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[0].values = np.linspace(5, 10, 5)
plotter = test_dataset.plot(self.plotter)
self.assertEqual(5, len(plotter.axes.get_yticks()))
def test_plot_sets_correct_yticklabels(self):
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
test_dataset.data.axes[1].values = np.linspace(50, 100, 10)
plotter = test_dataset.plot(self.plotter)
self.assertEqual(test_dataset.data.axes[1].values[0].astype(str),
plotter.axes.get_yticklabels()[0].get_text())
def test_plot_along_zero_dim_sets_correct_yticklabels(self):
self.plotter.parameters['stacking_dimension'] = 0
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[0].quantity = 'zero'
test_dataset.data.axes[0].unit = 'foo'
test_dataset.data.axes[0].values = np.linspace(5, 10, 5)
plotter = test_dataset.plot(self.plotter)
self.assertEqual(test_dataset.data.axes[0].values[0].astype(str),
plotter.axes.get_yticklabels()[0].get_text())
def test_plot_with_ytick_format_sets_correct_yticklabels(self):
test_dataset = aspecd.dataset.CalculatedDataset()
test_dataset.data.data = np.random.random([5, 10]) - 0.5
test_dataset.data.axes[1].quantity = 'one'
test_dataset.data.axes[1].unit = 'bar'
test_dataset.data.axes[1].values = np.linspace(50, 100, 10)
self.plotter.parameters["yticklabelformat"] = '%.2f'
plotter = test_dataset.plot(self.plotter)
self.assertEqual('%.2f' % test_dataset.data.axes[1].values[2],
plotter.axes.get_yticklabels()[2].get_text())
def test_plot_zero_lines_for_each_trace(self):
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.random.random([5, 10]) - 0.5
self.plotter.parameters['show_zero_lines'] = True
plotter = dataset_.plot(self.plotter)
self.assertGreaterEqual(20, len(plotter.axes.get_lines()))
class TestMultiPlotter(unittest.TestCase):
def setUp(self):
self.plotter = plotting.MultiPlotter()
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_has_datasets_property(self):
self.assertTrue(hasattr(self.plotter, 'datasets'))
def test_datasets_property_is_list(self):
self.assertTrue(isinstance(self.plotter.datasets, list))
def test_plot_without_datasets_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDatasetError):
self.plotter.plot()
def test_plot_with_datasets(self):
self.plotter.datasets.append(dataset.Dataset())
self.plotter.plot()
def test_parameters_have_axes_key(self):
self.assertIn('axes', self.plotter.parameters)
def test_parameters_axes_is_list_of_axes_objects(self):
self.assertTrue(isinstance(self.plotter.parameters['axes'], list))
self.assertTrue(self.plotter.parameters['axes'])
for axis in self.plotter.parameters['axes']:
self.assertTrue(isinstance(axis, dataset.Axis))
def test_plot_with_axes_in_parameters_sets_axes_labels(self):
self.plotter.parameters['axes'][0].quantity = 'foo'
self.plotter.parameters['axes'][0].unit = 'bar'
self.plotter.parameters['axes'][1].quantity = 'foo2'
self.plotter.parameters['axes'][1].unit = 'bar2'
xlabel = '$' + self.plotter.parameters['axes'][0].quantity + \
'$' + ' / ' + self.plotter.parameters['axes'][0].unit
ylabel = '$' + self.plotter.parameters['axes'][1].quantity + \
'$' + ' / ' + self.plotter.parameters['axes'][1].unit
self.plotter.datasets.append(dataset.Dataset())
self.plotter.plot()
self.assertEqual(xlabel, self.plotter.axes.get_xlabel())
self.assertEqual(ylabel, self.plotter.axes.get_ylabel())
def test_plot_with_datasets_with_identical_axes_sets_axes_labels(self):
test_dataset0 = dataset.Dataset()
test_dataset0.data.axes[0].quantity = 'foo'
test_dataset0.data.axes[0].unit = 'bar'
test_dataset0.data.axes[1].quantity = 'foo'
test_dataset0.data.axes[1].unit = 'bar'
test_dataset1 = dataset.Dataset()
test_dataset1.data.axes[0].quantity = 'foo'
test_dataset1.data.axes[0].unit = 'bar'
test_dataset1.data.axes[1].quantity = 'foo'
test_dataset1.data.axes[1].unit = 'bar'
xlabel = '$' + test_dataset0.data.axes[0].quantity + '$' + ' / ' + \
test_dataset0.data.axes[0].unit
ylabel = '$' + test_dataset0.data.axes[1].quantity + '$' + ' / ' + \
test_dataset0.data.axes[1].unit
self.plotter.datasets.append(test_dataset0)
self.plotter.datasets.append(test_dataset1)
self.plotter.plot()
self.assertEqual(xlabel, self.plotter.axes.get_xlabel())
self.assertEqual(ylabel, self.plotter.axes.get_ylabel())
def test_plot_with_datasets_adds_drawing_properties(self):
self.plotter.datasets.append(dataset.Dataset())
self.plotter.plot()
self.assertEqual(len(self.plotter.datasets),
len(self.plotter.properties.drawings))
def test_plot_with_show_legend_set_to_true_adds_legend(self):
self.plotter.datasets.append(dataset.Dataset())
self.plotter.parameters['show_legend'] = True
with contextlib.redirect_stderr(io.StringIO()):
self.plotter.plot()
self.assertIs(type(self.plotter.legend), matplotlib.legend.Legend)
def test_axes_properties_set_axes_labels(self):
self.plotter.properties.axes.xlabel = 'foo'
self.plotter.properties.axes.ylabel = 'bar'
test_dataset = dataset.Dataset()
test_dataset.data.axes[0].quantity = 'foo'
test_dataset.data.axes[0].unit = 'bar'
test_dataset.data.axes[1].quantity = 'foo'
test_dataset.data.axes[1].unit = 'bar'
self.plotter.datasets.append(test_dataset)
self.plotter.plot()
self.assertEqual(self.plotter.properties.axes.xlabel,
self.plotter.axes.get_xlabel())
self.assertEqual(self.plotter.properties.axes.ylabel,
self.plotter.axes.get_ylabel())
def test_plot_checks_applicability(self):
class MyPlotter(aspecd.plotting.MultiPlotter):
@staticmethod
def applicable(dataset):
return False
dataset1 = aspecd.dataset.Dataset()
dataset2 = aspecd.dataset.Dataset()
plotter = MyPlotter()
plotter.datasets.append(dataset1)
plotter.datasets.append(dataset2)
with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError):
plotter.plot()
def test_plot_checks_applicability_and_prints_helpful_message(self):
class MyPlotter(aspecd.plotting.MultiPlotter):
@staticmethod
def applicable(dataset):
return False
dataset1 = aspecd.dataset.Dataset()
dataset2 = aspecd.dataset.Dataset()
plotter = MyPlotter()
plotter.datasets.append(dataset1)
plotter.datasets.append(dataset2)
message = "MyPlotter not applicable to one or more datasets"
with self.assertRaisesRegex(
aspecd.exceptions.NotApplicableToDatasetError, message):
plotter.plot()
class TestMultiPlotter1D(unittest.TestCase):
def setUp(self):
self.plotter = plotting.MultiPlotter1D()
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_description_is_sensible(self):
self.assertNotIn('Abstract', self.plotter.description)
def test_properties_are_of_correct_type(self):
self.assertIs(type(self.plotter.properties),
aspecd.plotting.MultiPlot1DProperties)
def test_has_type_property(self):
self.assertTrue(hasattr(self.plotter, 'type'))
def test_set_type(self):
plot_type = 'loglog'
self.plotter.type = plot_type
self.assertEqual(self.plotter.type, plot_type)
def test_setting_wrong_type_raises(self):
with self.assertRaises(TypeError):
self.plotter.type = 'foo'
def test_plot_with_2D_data_raises(self):
dataset_ = dataset.Dataset()
dataset_.data.data = np.random.rand(3, 2)
self.plotter.datasets.append(dataset_)
with self.assertRaises(
aspecd.exceptions.NotApplicableToDatasetError):
self.plotter.plot()
def test_plot_with_datasets(self):
self.plotter.datasets.append(dataset.Dataset())
self.plotter.plot()
def test_plot_with_datasets_adds_drawing_to_properties(self):
self.plotter.datasets.append(dataset.Dataset())
self.plotter.plot()
self.assertEqual(1, len(self.plotter.properties.drawings))
def test_added_drawing_is_correct_type(self):
self.plotter.datasets.append(dataset.Dataset())
self.plotter.plot()
self.assertIs(type(self.plotter.properties.drawings[0]),
aspecd.plotting.LineProperties)
def test_plot_sets_correct_line_color(self):
color = '#abcdef'
dict_ = {'drawings': [{'color': color}]}
self.plotter.properties.from_dict(dict_)
self.plotter.datasets.append(dataset.Dataset())
self.plotter.plot()
self.assertEqual(color, self.plotter.drawings[0].get_color())
def test_plot_with_show_legend_sets_legend_label(self):
dataset_ = dataset.Dataset()
dataset_.label = 'foo'
self.plotter.datasets.append(dataset_)
self.plotter.parameters['show_legend'] = True
self.plotter.plot()
self.assertEqual(dataset_.label,
self.plotter.legend.get_texts()[0].get_text())
class TestMultiPlotter1DStacked(unittest.TestCase):
def setUp(self):
self.plotter = plotting.MultiPlotter1DStacked()
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.sin(np.linspace(0, 2*np.pi))
self.plotter.datasets.append(dataset_)
self.plotter.datasets.append(dataset_)
self.plotter.datasets.append(dataset_)
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_description_is_sensible(self):
self.assertNotIn('Abstract', self.plotter.description)
def test_plot_stacks_plots(self):
self.plotter.plot()
self.assertLess(min(self.plotter.axes.get_lines()[2].get_ydata()),
min(self.plotter.axes.get_lines()[0].get_ydata())*2)
def test_plot_removes_yticks(self):
self.plotter.plot()
self.assertEqual(0, len(self.plotter.axes.get_yticklabels()))
def test_plot_has_zero_lines_turned_off_by_default(self):
self.plotter.plot()
self.assertFalse(self.plotter.parameters["show_zero_lines"])
def test_parameters_have_offset_key(self):
self.assertIn('offset', self.plotter.parameters)
def test_plot_stacks_plots_with_given_offset(self):
self.plotter.parameters["offset"] = 10
self.plotter.plot()
self.assertLess(min(self.plotter.axes.get_lines()[2].get_ydata()),
min(self.plotter.axes.get_lines()[0].get_ydata())*20)
def test_plot_zero_lines_for_each_trace(self):
self.plotter.parameters['show_zero_lines'] = True
self.plotter.plot()
self.assertEqual(2*len(self.plotter.datasets),
len(self.plotter.axes.get_lines()))
def test_plot_zero_lines_for_each_trace_at_correct_position(self):
self.plotter.parameters['show_zero_lines'] = True
self.plotter.plot()
self.assertGreater(0, self.plotter.axes.get_lines()[-1].get_ydata()[0])
class TestCompositePlotter(unittest.TestCase):
def setUp(self):
self.plotter = plotting.CompositePlotter()
self.dataset = aspecd.dataset.CalculatedDataset()
self.dataset.data.data = np.sin(np.linspace(0, 2*np.pi, 101))
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_description_is_sensible(self):
self.assertIn('Composite', self.plotter.description)
def test_has_grid_dimensions_property(self):
self.assertTrue(hasattr(self.plotter, 'grid_dimensions'))
def test_has_subplot_locations_property(self):
self.assertTrue(hasattr(self.plotter, 'subplot_locations'))
def test_has_axes_positions_property(self):
self.assertTrue(hasattr(self.plotter, 'axes_positions'))
def test_has_plotter_property(self):
self.assertTrue(hasattr(self.plotter, 'plotter'))
def test_plot_with_single_subplot_adds_axis_to_axes(self):
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
single_plotter.dataset = self.dataset
self.plotter.plotter.append(single_plotter)
self.plotter.plot()
self.assertEqual(1, len(self.plotter.axes))
def test_plot_with_multiple_subplots_adds_axes_to_axes(self):
self.plotter.grid_dimensions = [2, 2]
self.plotter.subplot_locations = [[0, 0, 1, 1],
[1, 0, 1, 1],
[0, 1, 2, 1]]
single_plotter = plotting.SinglePlotter1D()
single_plotter.dataset = self.dataset
self.plotter.plotter.append(single_plotter)
self.plotter.plotter.append(single_plotter)
self.plotter.plotter.append(single_plotter)
self.plotter.plot()
self.assertEqual(len(self.plotter.subplot_locations),
len(self.plotter.axes))
def test_plot_with_single_subplot_and_plotter_plots_line(self):
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
single_plotter.dataset = self.dataset
self.plotter.plotter.append(single_plotter)
self.plotter.plot()
self.assertTrue(self.plotter.axes[0].has_data())
def test_plot_without_plotter_raises(self):
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
with self.assertRaises(aspecd.exceptions.MissingPlotterError):
self.plotter.plot()
def test_plot_with_not_enough_plotters_raises(self):
self.plotter.grid_dimensions = [2, 2]
self.plotter.subplot_locations = [[0, 0, 1, 1],
[1, 0, 1, 1],
[0, 1, 2, 1]]
single_plotter = plotting.SinglePlotter1D()
single_plotter.dataset = self.dataset
self.plotter.plotter.append(single_plotter)
self.plotter.plotter.append(single_plotter)
with self.assertRaises(aspecd.exceptions.MissingPlotterError):
self.plotter.plot()
def test_plot_sets_axes_position(self):
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
self.plotter.axes_positions = [[0.2, 0.2, -0.2, -0.2]]
single_plotter = plotting.SinglePlotter1D()
single_plotter.dataset = self.dataset
self.plotter.plotter.append(single_plotter)
self.plotter.plot()
offsets = self.plotter.axes_positions[0]
axis_position = [0.125 + offsets[0]*0.775, 0.110 + offsets[1]*0.77,
offsets[2]*0.775, offsets[3]*0.77]
self.assertListEqual(axis_position,
list(self.plotter.axes[0].get_position().bounds))
def test_plot_shows_legend(self):
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
single_plotter.dataset = self.dataset
single_plotter.parameters['show_legend'] = True
self.plotter.plotter.append(single_plotter)
with contextlib.redirect_stderr(io.StringIO()):
self.plotter.plot()
self.assertTrue(isinstance(self.plotter.axes[0].get_legend(),
matplotlib.legend.Legend))
class TestSingleCompositePlotter(unittest.TestCase):
def setUp(self):
self.plotter = plotting.SingleCompositePlotter()
def tearDown(self):
if self.plotter.fig:
plt.close(self.plotter.fig)
def test_instantiate_class(self):
pass
def test_description_is_sensible(self):
self.assertIn('single dataset', self.plotter.description)
def test_plot_without_dataset_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDatasetError):
self.plotter.plot()
def test_plot_with_preset_dataset(self):
self.plotter.dataset = dataset.Dataset()
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
self.plotter.plotter.append(single_plotter)
self.plotter.plot()
def test_plot_from_dataset_sets_dataset(self):
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
self.plotter.plotter.append(single_plotter)
test_dataset = dataset.Dataset()
plotter = test_dataset.plot(self.plotter)
self.assertTrue(isinstance(plotter.dataset, dataset.Dataset))
def test_plot_with_dataset(self):
self.plotter.grid_dimensions = [1, 1]
self.plotter.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
self.plotter.plotter.append(single_plotter)
test_dataset = dataset.Dataset()
self.plotter.plot(dataset=test_dataset)
self.assertGreater(len(test_dataset.representations), 0)
def test_plot_checks_applicability(self):
class MyPlotter(aspecd.plotting.SingleCompositePlotter):
@staticmethod
def applicable(dataset):
return False
dataset = aspecd.dataset.Dataset()
plotter = MyPlotter()
with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError):
dataset.plot(plotter)
def test_plot_check_applicability_prints_helpful_message(self):
class MyPlotter(aspecd.plotting.SingleCompositePlotter):
@staticmethod
def applicable(dataset):
return False
dataset = aspecd.dataset.Dataset()
dataset.id = "foo"
plotter = MyPlotter()
message = "MyPlotter not applicable to dataset with id foo"
with self.assertRaisesRegex(
aspecd.exceptions.NotApplicableToDatasetError, message):
dataset.plot(plotter)
class TestSaver(unittest.TestCase):
def setUp(self):
self.saver = plotting.Saver()
self.filename = 'test.pdf'
def tearDown(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
if self.saver.plotter and self.saver.plotter.fig:
plt.close(self.saver.plotter.fig)
def test_instantiate_class(self):
pass
def test_has_save_method(self):
self.assertTrue(hasattr(self.saver, 'save'))
self.assertTrue(callable(self.saver.save))
def test_save_without_filename_raises(self):
with self.assertRaises(aspecd.exceptions.MissingFilenameError):
self.saver.save(plotting.Plotter())
def test_with_filename_set_previously(self):
self.saver.plotter = plotting.Plotter()
self.saver.plotter.plot()
self.saver.filename = self.filename
self.saver.save()
def test_instantiate_with_filename_sets_filename(self):
self.saver = plotting.Saver(self.filename)
self.assertEqual(self.saver.filename, self.filename)
def test_save_without_plotter_raises(self):
self.saver.filename = self.filename
with self.assertRaises(aspecd.exceptions.MissingPlotError):
self.saver.save()
def test_save_with_plotter_sets_plotter(self):
plotter = plotting.Plotter()
plotter.plot()
self.saver.filename = self.filename
self.saver.save(plotter)
self.assertEqual(self.saver.plotter, plotter)
def test_has_parameters_property(self):
self.assertTrue(hasattr(self.saver, 'parameters'))
def test_parameters_property_is_dict(self):
self.assertTrue(isinstance(self.saver.parameters, dict))
def test_save_creates_file(self):
plotter = plotting.Plotter()
plotter.plot()
self.saver.filename = self.filename
self.saver.save(plotter)
self.assertTrue(os.path.isfile(self.filename))
def test_set_format_parameter_adds_extension(self):
plotter = plotting.Plotter()
plotter.plot()
self.filename = 'test.pdf'
self.saver.filename, _ = os.path.splitext(self.filename)
self.saver.parameters["format"] = 'pdf'
self.saver.save(plotter)
self.assertTrue(os.path.isfile(self.filename))
def test_set_format_parameter_corrects_extension(self):
plotter = plotting.Plotter()
plotter.plot()
self.filename = 'test.pdf'
basename, _ = os.path.splitext(self.filename)
self.saver.parameters["format"] = 'pdf'
self.saver.filename = '.'.join([basename, "png"])
self.saver.save(plotter)
self.assertTrue(os.path.isfile(self.filename))
def test_set_format_parameter_writes_appropriate_file(self):
plotter = plotting.Plotter()
plotter.plot()
self.filename = 'test.pdf'
self.saver.filename, _ = os.path.splitext(self.filename)
self.saver.parameters["format"] = 'pdf'
self.saver.save(plotter)
self.assertTrue(os.path.isfile(self.filename))
def test_save_with_singleplotter1d(self):
test_dataset = dataset.Dataset()
plotter = plotting.SinglePlotter1D()
plotter = test_dataset.plot(plotter)
plotter.plot()
self.saver.filename = self.filename
self.saver.save(plotter)
def test_save_with_singleplotter2d(self):
test_dataset = dataset.Dataset()
test_dataset.data.data = np.random.rand(3, 2)
plotter = plotting.SinglePlotter2D()
plotter = test_dataset.plot(plotter)
plotter.plot()
self.saver.filename = self.filename
self.saver.save(plotter)
def test_save_with_multiplotter(self):
plotter = plotting.MultiPlotter()
plotter.datasets.append(dataset.Dataset())
plotter.plot()
self.saver.filename = self.filename
self.saver.save(plotter)
class TestCaption(unittest.TestCase):
def setUp(self):
self.caption = plotting.Caption()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.caption, 'to_dict'))
self.assertTrue(callable(self.caption.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.caption, 'from_dict'))
self.assertTrue(callable(self.caption.from_dict))
def test_has_title_property(self):
self.assertTrue(hasattr(self.caption, 'title'))
def test_has_text_property(self):
self.assertTrue(hasattr(self.caption, 'text'))
def test_has_parameters_property(self):
self.assertTrue(hasattr(self.caption, 'parameters'))
class TestDrawingProperties(unittest.TestCase):
def setUp(self):
self.drawing_properties = plotting.DrawingProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.drawing_properties, 'to_dict'))
self.assertTrue(callable(self.drawing_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.drawing_properties, 'from_dict'))
self.assertTrue(callable(self.drawing_properties.from_dict))
def test_has_properties(self):
for prop in ['label']:
self.assertTrue(hasattr(self.drawing_properties, prop))
def test_has_apply_method(self):
self.assertTrue(hasattr(self.drawing_properties, 'apply'))
self.assertTrue(callable(self.drawing_properties.apply))
def test_apply_without_argument_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDrawingError):
self.drawing_properties.apply()
def test_apply_sets_properties(self):
self.drawing_properties.label = 'foo'
line = matplotlib.lines.Line2D([0, 1], [0, 0])
self.drawing_properties.apply(drawing=line)
self.assertEqual(self.drawing_properties.label, line.get_label())
def test_apply_with_nonexisting_property_issues_log_message(self):
self.drawing_properties.foobar = 'foo'
line = matplotlib.lines.Line2D([0, 1], [0, 0])
with self.assertLogs(__package__, level='DEBUG') as cm:
self.drawing_properties.apply(drawing=line)
self.assertIn('"{}" has no setter for attribute "{}", hence not '
'set'.format(line.__class__, "foobar"), cm.output[0])
class TestLineProperties(unittest.TestCase):
def setUp(self):
self.line_properties = plotting.LineProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.line_properties, 'to_dict'))
self.assertTrue(callable(self.line_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.line_properties, 'from_dict'))
self.assertTrue(callable(self.line_properties.from_dict))
def test_has_properties(self):
for prop in ['color', 'drawstyle', 'label', 'linestyle', 'linewidth',
'marker']:
self.assertTrue(hasattr(self.line_properties, prop))
def test_has_apply_method(self):
self.assertTrue(hasattr(self.line_properties, 'apply'))
self.assertTrue(callable(self.line_properties.apply))
def test_apply_without_argument_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDrawingError):
self.line_properties.apply()
def test_apply_sets_properties(self):
self.line_properties.label = 'foo'
# noinspection PyUnresolvedReferences
line = matplotlib.lines.Line2D([0, 1], [0, 0])
self.line_properties.apply(drawing=line)
self.assertEqual(self.line_properties.label, line.get_label())
class TestSurfaceProperties(unittest.TestCase):
def setUp(self):
self.properties = plotting.SurfaceProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.properties, 'to_dict'))
self.assertTrue(callable(self.properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.properties, 'from_dict'))
self.assertTrue(callable(self.properties.from_dict))
def test_has_properties(self):
for prop in ['cmap']:
self.assertTrue(hasattr(self.properties, prop))
def test_has_apply_method(self):
self.assertTrue(hasattr(self.properties, 'apply'))
self.assertTrue(callable(self.properties.apply))
def test_apply_without_argument_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDrawingError):
self.properties.apply()
@unittest.skip
def test_apply_sets_properties(self):
self.properties.cmap = 'RdGy'
# noinspection PyUnresolvedReferences
contour = matplotlib.lines.Line2D([0, 1], [0, 0])
self.properties.apply(drawing=contour)
self.assertEqual(self.properties.cmap, contour.cmap.name)
class TestPlotProperties(unittest.TestCase):
def setUp(self):
self.plot_properties = plotting.PlotProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'to_dict'))
self.assertTrue(callable(self.plot_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'from_dict'))
self.assertTrue(callable(self.plot_properties.from_dict))
def test_has_figure_property(self):
self.assertTrue(hasattr(self.plot_properties, 'figure'))
def test_has_apply_method(self):
self.assertTrue(hasattr(self.plot_properties, 'apply'))
self.assertTrue(callable(self.plot_properties.apply))
def test_apply_without_argument_raises(self):
with self.assertRaises(aspecd.exceptions.MissingPlotterError):
self.plot_properties.apply()
def test_apply_sets_properties(self):
self.plot_properties.figure.dpi = 300.0
plot = plotting.Plotter()
plot.plot()
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.figure.dpi,
plot.figure.get_dpi())
plt.close(plot.figure)
class TestFigureProperties(unittest.TestCase):
def setUp(self):
self.figure_properties = plotting.FigureProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.figure_properties, 'to_dict'))
self.assertTrue(callable(self.figure_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.figure_properties, 'from_dict'))
self.assertTrue(callable(self.figure_properties.from_dict))
def test_has_properties(self):
for prop in ['size', 'dpi', 'title']:
self.assertTrue(hasattr(self.figure_properties, prop))
def test_has_apply_method(self):
self.assertTrue(hasattr(self.figure_properties, 'apply'))
self.assertTrue(callable(self.figure_properties.apply))
def test_apply_without_argument_raises(self):
with self.assertRaises(aspecd.exceptions.MissingFigureError):
self.figure_properties.apply()
def test_apply_sets_figure_dpi(self):
self.figure_properties.dpi = 300.0
plot = plotting.Plotter()
plot.plot()
self.figure_properties.apply(figure=plot.figure)
self.assertEqual(self.figure_properties.dpi, plot.figure.get_dpi())
plt.close(plot.figure)
def test_apply_sets_figure_size(self):
self.figure_properties.size = (10, 5)
plot = plotting.Plotter()
plot.plot()
self.figure_properties.apply(figure=plot.figure)
self.assertListEqual(list(self.figure_properties.size),
list(plot.figure.get_size_inches()))
plt.close(plot.figure)
def test_apply_sets_figure_title(self):
self.figure_properties.title = 'foo'
plot = plotting.Plotter()
plot.plot()
self.figure_properties.apply(figure=plot.figure)
self.assertEqual(self.figure_properties.title,
plot.figure._suptitle.get_text())
plt.close(plot.figure)
class TestAxisProperties(unittest.TestCase):
def setUp(self):
self.axis_properties = plotting.AxesProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.axis_properties, 'to_dict'))
self.assertTrue(callable(self.axis_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.axis_properties, 'from_dict'))
self.assertTrue(callable(self.axis_properties.from_dict))
def test_has_properties(self):
for prop in ['aspect', 'facecolor', 'position', 'title',
'xlabel', 'xlim', 'xscale', 'xticklabels', 'xticks',
'ylabel', 'ylim', 'yscale', 'yticklabels', 'yticks']:
self.assertTrue(hasattr(self.axis_properties, prop))
def test_has_apply_properties_method(self):
self.assertTrue(hasattr(self.axis_properties, 'apply'))
self.assertTrue(callable(self.axis_properties.apply))
def test_apply_properties_without_argument_raises(self):
with self.assertRaises(aspecd.exceptions.MissingAxisError):
self.axis_properties.apply()
def test_apply_properties_sets_axis_properties(self):
self.axis_properties.xlabel = 'foo'
plot = plotting.Plotter()
plot.plot()
self.axis_properties.apply(axes=plot.axes)
self.assertEqual(self.axis_properties.xlabel, plot.axes.get_xlabel())
plt.close(plot.figure)
def test_apply_properties_from_dict_sets_axis_properties(self):
label = 'foo'
properties = {'axes': {'xlabel': label}}
plot = plotting.MultiPlotter1D()
plot.datasets.append(aspecd.dataset.Dataset())
plot.properties.from_dict(properties)
plot.plot()
self.assertEqual(label, plot.axes.get_xlabel())
plt.close(plot.figure)
def test_set_xticks(self):
self.axis_properties.xticks = np.linspace(0, 1, 11)
plot = plotting.Plotter()
plot.plot()
self.axis_properties.apply(axes=plot.axes)
self.assertListEqual(list(self.axis_properties.xticks),
list(plot.axes.get_xticks()))
plt.close(plot.figure)
def test_set_xtick_labels(self):
self.axis_properties.xticks = np.linspace(0, 1, 11)
self.axis_properties.xticklabels = np.linspace(2, 3, 11).astype(str)
plot = plotting.Plotter()
plot.plot()
self.axis_properties.apply(axes=plot.axes)
self.assertEqual(self.axis_properties.xticklabels[5],
plot.axes.get_xticklabels()[5].get_text())
plt.close(plot.figure)
def test_set_yticks(self):
self.axis_properties.yticks = np.linspace(0, 1, 11)
plot = plotting.Plotter()
plot.plot()
self.axis_properties.apply(axes=plot.axes)
self.assertListEqual(list(self.axis_properties.yticks),
list(plot.axes.get_yticks()))
plt.close(plot.figure)
def test_set_ytick_labels(self):
self.axis_properties.yticks = np.linspace(0, 1, 11)
self.axis_properties.yticklabels = np.linspace(2, 3, 11).astype(str)
plot = plotting.Plotter()
plot.plot()
self.axis_properties.apply(axes=plot.axes)
self.assertEqual(self.axis_properties.yticklabels[5],
plot.axes.get_yticklabels()[5].get_text())
plt.close(plot.figure)
def test_set_ticks_and_labels_does_not_issue_warning(self):
self.axis_properties.xticks = np.linspace(0, 1, 11)
self.axis_properties.xticklabels = np.linspace(2, 3, 11).astype(str)
plot = plotting.Plotter()
plot.plot()
with warnings.catch_warnings(record=True) as warning:
self.axis_properties.apply(axes=plot.axes)
self.assertFalse(len(warning))
plt.close(plot.figure)
class TestLegendProperties(unittest.TestCase):
def setUp(self):
self.legend_properties = plotting.LegendProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.legend_properties, 'to_dict'))
self.assertTrue(callable(self.legend_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.legend_properties, 'from_dict'))
self.assertTrue(callable(self.legend_properties.from_dict))
def test_has_properties(self):
for prop in ['loc', 'frameon']:
self.assertTrue(hasattr(self.legend_properties, prop))
def test_has_apply_method(self):
self.assertTrue(hasattr(self.legend_properties, 'apply'))
self.assertTrue(callable(self.legend_properties.apply))
def test_apply_without_argument_raises(self):
with self.assertRaises(aspecd.exceptions.MissingLegendError):
self.legend_properties.apply()
def test_apply_properties_sets_legend_properties(self):
self.legend_properties.loc = 'center'
plot = plotting.Plotter()
plot.plot()
with contextlib.redirect_stderr(io.StringIO()):
legend = plot.axes.legend()
self.legend_properties.apply(legend=legend)
self.assertEqual(self.legend_properties.loc, legend.loc)
plt.close(plot.figure)
def test_location_sets_legend_loc(self):
location = 5
self.legend_properties.location = location
plot = plotting.Plotter()
plot.properties.legend = self.legend_properties
plot.parameters['show_legend'] = True
with contextlib.redirect_stderr(io.StringIO()):
plot.plot()
legend = plot.legend
self.assertEqual(location, legend._loc)
plt.close(plot.figure)
def test_location_from_dict_sets_legend_loc(self):
location = 5
properties = {'legend': {'location': location}}
plot = plotting.Plotter()
plot.properties.from_dict(properties)
plot.parameters['show_legend'] = True
with contextlib.redirect_stderr(io.StringIO()):
plot.plot()
legend = plot.legend
self.assertEqual(location, legend._loc)
plt.close(plot.figure)
def test_frameon_sets_legend_frameon(self):
frameon = False
self.legend_properties.frameon = frameon
plot = plotting.Plotter()
plot.properties.legend = self.legend_properties
plot.parameters['show_legend'] = True
with contextlib.redirect_stderr(io.StringIO()):
plot.plot()
legend = plot.legend
self.assertEqual(frameon, legend.get_frame_on())
plt.close(plot.figure)
def test_location_not_included_in_to_dict(self):
self.assertNotIn('location', self.legend_properties.to_dict())
class TestGridProperties(unittest.TestCase):
def setUp(self):
self.grid_properties = plotting.GridProperties()
def test_instantiate_class(self):
pass
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.grid_properties, 'to_dict'))
self.assertTrue(callable(self.grid_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.grid_properties, 'from_dict'))
self.assertTrue(callable(self.grid_properties.from_dict))
def test_has_properties(self):
for prop in ['show', 'ticks', 'axis', 'lines']:
self.assertTrue(hasattr(self.grid_properties, prop))
def test_has_apply_method(self):
self.assertTrue(hasattr(self.grid_properties, 'apply'))
self.assertTrue(callable(self.grid_properties.apply))
def test_apply_without_argument_raises(self):
with self.assertRaises(TypeError):
self.grid_properties.apply()
def test_lines_color_is_sensible_for_grid(self):
self.assertEqual('#cccccc', self.grid_properties.lines.color)
def test_apply_properties_sets_properties(self):
self.grid_properties.show = True
self.grid_properties.lines.color = '#cccccc'
plot = plotting.Plotter()
plot.plot()
self.grid_properties.apply(axes=plot.axes)
self.assertEqual(self.grid_properties.lines.color,
plot.axes.xaxis.get_gridlines()[0].get_color())
plt.close(plot.figure)
class TestSinglePlotProperties(unittest.TestCase):
def setUp(self):
self.plot_properties = plotting.SinglePlotProperties()
def test_instantiate_class(self):
pass
def test_has_figure_property(self):
self.assertTrue(hasattr(self.plot_properties, 'figure'))
def test_has_axes_property(self):
self.assertTrue(hasattr(self.plot_properties, 'axes'))
def test_has_grid_property(self):
self.assertTrue(hasattr(self.plot_properties, 'grid'))
def test_has_drawing_property(self):
self.assertTrue(hasattr(self.plot_properties, 'drawing'))
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'to_dict'))
self.assertTrue(callable(self.plot_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'from_dict'))
self.assertTrue(callable(self.plot_properties.from_dict))
def test_apply_sets_axis_properties(self):
self.plot_properties.axes.xlabel = 'foo'
plot = plotting.SinglePlotter()
plot.plot(dataset=dataset.Dataset())
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.axes.xlabel,
plot.axes.get_xlabel())
plt.close(plot.figure)
def test_apply_sets_grid_properties(self):
self.plot_properties.grid.show = True
self.plot_properties.grid.lines.color = '#000000'
plot = plotting.SinglePlotter()
plot.plot(dataset=dataset.Dataset())
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.grid.lines.color,
plot.axes.xaxis.get_gridlines()[0].get_color())
plt.close(plot.figure)
def test_apply_sets_drawing_properties(self):
self.plot_properties.drawing.label = 'foo'
plot = plotting.SinglePlotter1D()
plot.plot(dataset=dataset.Dataset())
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.drawing.label,
plot.drawing.get_label())
plt.close(plot.figure)
class TestSinglePlot1DProperties(unittest.TestCase):
def setUp(self):
self.plot_properties = plotting.SinglePlot1DProperties()
def test_instantiate_class(self):
pass
def test_apply_sets_drawing_properties(self):
self.plot_properties.drawing.linewidth = 2.0
plot = plotting.SinglePlotter1D()
plot.plot(dataset=dataset.Dataset())
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.drawing.linewidth,
plot.drawing.get_linewidth())
plt.close(plot.figure)
class TestSinglePlot2DProperties(unittest.TestCase):
def setUp(self):
self.plot_properties = plotting.SinglePlot2DProperties()
def test_instantiate_class(self):
pass
def test_apply_sets_drawing_properties(self):
self.plot_properties.drawing.cmap = 'RdGy'
plot = plotting.SinglePlotter2D()
dataset_ = dataset.Dataset()
dataset_.data.data = np.random.random([5, 5])
plot.plot(dataset=dataset_)
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.drawing.cmap,
plot.drawing.cmap.name)
plt.close(plot.figure)
class TestMultiPlotProperties(unittest.TestCase):
def setUp(self):
self.plot_properties = plotting.MultiPlotProperties()
def test_instantiate_class(self):
pass
def test_has_figure_property(self):
self.assertTrue(hasattr(self.plot_properties, 'figure'))
def test_has_axes_property(self):
self.assertTrue(hasattr(self.plot_properties, 'axes'))
def test_has_grid_property(self):
self.assertTrue(hasattr(self.plot_properties, 'grid'))
def test_has_drawings_property(self):
self.assertTrue(hasattr(self.plot_properties, 'drawings'))
def test_has_legend_property(self):
self.assertTrue(hasattr(self.plot_properties, 'legend'))
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'to_dict'))
self.assertTrue(callable(self.plot_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'from_dict'))
self.assertTrue(callable(self.plot_properties.from_dict))
def test_apply_sets_axis_properties(self):
self.plot_properties.axes.xlabel = 'foo'
plot = plotting.MultiPlotter()
plot.datasets = [dataset.Dataset()]
plot.plot()
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.axes.xlabel,
plot.axes.get_xlabel())
plt.close(plot.figure)
def test_apply_sets_grid_properties(self):
self.plot_properties.grid.show = True
self.plot_properties.grid.lines.color = '#000000'
plot = plotting.SinglePlotter()
plot.plot(dataset=dataset.Dataset())
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.grid.lines.color,
plot.axes.xaxis.get_gridlines()[0].get_color())
plt.close(plot.figure)
def test_apply_sets_legend_properties(self):
self.plot_properties.legend.loc = 'center'
plotter = plotting.MultiPlotter()
dataset_ = dataset.Dataset()
dataset_.label = 'foo'
plotter.datasets = [dataset_]
plotter.plot()
with contextlib.redirect_stderr(io.StringIO()):
plotter.legend = plotter.axes.legend()
self.plot_properties.apply(plotter=plotter)
self.assertEqual(self.plot_properties.legend.loc,
plotter.legend.loc)
plt.close(plotter.figure)
def test_from_dict_sets_drawings(self):
dict_ = {'drawings': [{'label': 'foo'}]}
self.plot_properties.from_dict(dict_)
self.assertEqual('foo', self.plot_properties.drawings[0].label)
def test_from_dict_sets_multiple_drawings(self):
dict_ = {'drawings': [{'label': 'foo'}, {'label': 'bar'}]}
self.plot_properties.from_dict(dict_)
self.assertEqual('foo', self.plot_properties.drawings[0].label)
self.assertEqual('bar', self.plot_properties.drawings[1].label)
def test_from_dict_does_not_add_drawing_if_it_exists(self):
self.plot_properties.drawings.append(
aspecd.plotting.DrawingProperties())
dict_ = {'drawings': [{'label': 'foo'}]}
self.plot_properties.from_dict(dict_)
self.assertEqual(1, len(self.plot_properties.drawings))
def test_from_dict_adds_missing_drawing(self):
dict_ = {'drawings': [{'label': 'foo'}]}
self.plot_properties.from_dict(dict_)
self.assertEqual(1, len(self.plot_properties.drawings))
def test_from_dict_adds_missing_drawings(self):
dict_ = {'drawings': [{'label': 'foo'}, {'label': 'bar'}]}
self.plot_properties.from_dict(dict_)
self.assertEqual(2, len(self.plot_properties.drawings))
def test_from_dict_sets_legend(self):
dict_ = {'legend': {'loc': 'center'}, 'drawings': [{'label': 'foo'}]}
self.plot_properties.from_dict(dict_)
self.assertEqual('center', self.plot_properties.legend.loc)
class TestMultiPlot1DProperties(unittest.TestCase):
def setUp(self):
self.plot_properties = plotting.MultiPlot1DProperties()
def test_instantiate_class(self):
pass
def test_added_drawing_is_line_properties_object(self):
self.plot_properties.add_drawing()
self.assertIs(type(self.plot_properties.drawings[0]),
aspecd.plotting.LineProperties)
def test_added_drawing_has_correct_default_colour(self):
property_cycle = plt.rcParams['axes.prop_cycle'].by_key()
colour = property_cycle["color"][0]
self.plot_properties.add_drawing()
self.assertEqual(colour, self.plot_properties.drawings[0].color)
def test_drawing_has_correct_color_if_more_drawings_than_colors(self):
property_cycle = plt.rcParams['axes.prop_cycle'].by_key()
colour = property_cycle["color"][0]
for idx in range(0, len(property_cycle["color"])+1):
self.plot_properties.add_drawing()
self.assertEqual(colour, self.plot_properties.drawings[0].color)
def test_added_drawing_has_correct_default_linewidth(self):
linewidth = plt.rcParams['lines.linewidth']
self.plot_properties.add_drawing()
self.assertEqual(linewidth, self.plot_properties.drawings[0].linewidth)
def test_added_drawing_has_correct_default_linestyle(self):
linewidth = plt.rcParams['lines.linestyle']
self.plot_properties.add_drawing()
self.assertEqual(linewidth, self.plot_properties.drawings[0].linestyle)
def test_added_drawing_has_correct_default_marker(self):
linewidth = plt.rcParams['lines.marker']
self.plot_properties.add_drawing()
self.assertEqual(linewidth, self.plot_properties.drawings[0].marker)
class TestCompositePlotProperties(unittest.TestCase):
def setUp(self):
self.plot_properties = plotting.CompositePlotProperties()
def test_instantiate_class(self):
pass
def test_has_figure_property(self):
self.assertTrue(hasattr(self.plot_properties, 'figure'))
def test_has_axes_property(self):
self.assertTrue(hasattr(self.plot_properties, 'axes'))
def test_has_to_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'to_dict'))
self.assertTrue(callable(self.plot_properties.to_dict))
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.plot_properties, 'from_dict'))
self.assertTrue(callable(self.plot_properties.from_dict))
def test_apply_sets_axis_properties(self):
self.plot_properties.axes.xlabel = 'foo'
plot = plotting.CompositePlotter()
plot.grid_dimensions = [1, 1]
plot.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.sin(np.linspace(0, 2*np.pi, 101))
single_plotter.dataset = dataset_
plot.plotter.append(single_plotter)
plot.plot()
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.axes.xlabel,
plot.axes[0].get_xlabel())
plt.close(plot.figure)
def test_apply_sets_axis_properties_for_multiple_plots(self):
self.plot_properties.axes.xlabel = 'foo'
plot = plotting.CompositePlotter()
plot.grid_dimensions = [2, 1]
plot.subplot_locations = [[0, 0, 1, 1], [1, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.sin(np.linspace(0, 2*np.pi, 101))
single_plotter.dataset = dataset_
plot.plotter.append(single_plotter)
plot.plotter.append(single_plotter)
plot.plot()
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.axes.xlabel,
plot.axes[0].get_xlabel())
self.assertEqual(self.plot_properties.axes.xlabel,
plot.axes[1].get_xlabel())
plt.close(plot.figure)
def test_apply_overrides_axis_properties(self):
self.plot_properties.axes.xlabel = 'foo'
plot = plotting.CompositePlotter()
plot.grid_dimensions = [1, 1]
plot.subplot_locations = [[0, 0, 1, 1]]
single_plotter = plotting.SinglePlotter1D()
single_plotter.properties.axes.xlabel = 'bar'
dataset_ = aspecd.dataset.CalculatedDataset()
dataset_.data.data = np.sin(np.linspace(0, 2*np.pi, 101))
single_plotter.dataset = dataset_
plot.plotter.append(single_plotter)
plot.plot()
self.plot_properties.apply(plotter=plot)
self.assertEqual(self.plot_properties.axes.xlabel,
plot.axes[0].get_xlabel())
plt.close(plot.figure)
| [
"numpy.random.rand",
"aspecd.plotting.MultiPlotter",
"aspecd.plotting.Caption",
"aspecd.plotting.SinglePlotProperties",
"aspecd.plotting.SinglePlotter1D",
"aspecd.dataset.Dataset",
"os.remove",
"os.path.exists",
"aspecd.plotting.GridProperties",
"aspecd.plotting.Plotter",
"aspecd.plotting.Compos... | [((429, 447), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (445, 447), False, 'from aspecd import plotting, utils, dataset\n'), ((523, 552), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (537, 552), False, 'import os\n'), ((941, 976), 'aspecd.utils.full_class_name', 'utils.full_class_name', (['self.plotter'], {}), '(self.plotter)\n', (962, 976), False, 'from aspecd import plotting, utils, dataset\n'), ((2873, 2903), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.figure'], {}), '(self.plotter.figure)\n', (2882, 2903), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3091), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (3073, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3250, 3280), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.figure'], {}), '(self.plotter.figure)\n', (3259, 3280), True, 'import matplotlib.pyplot as plt\n'), ((3435, 3465), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.figure'], {}), '(self.plotter.figure)\n', (3444, 3465), True, 'import matplotlib.pyplot as plt\n'), ((3550, 3564), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3562, 3564), True, 'import matplotlib.pyplot as plt\n'), ((3788, 3802), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3800, 3802), True, 'import matplotlib.pyplot as plt\n'), ((4145, 4161), 'aspecd.plotting.Saver', 'plotting.Saver', ([], {}), '()\n', (4159, 4161), False, 'from aspecd import plotting, utils, dataset\n'), ((4408, 4424), 'aspecd.plotting.Saver', 'plotting.Saver', ([], {}), '()\n', (4422, 4424), False, 'from aspecd import plotting, utils, dataset\n'), ((4661, 4677), 'aspecd.plotting.Saver', 'plotting.Saver', ([], {}), '()\n', (4675, 4677), False, 'from aspecd import plotting, utils, dataset\n'), ((6097, 6121), 'aspecd.plotting.SinglePlotter', 'plotting.SinglePlotter', ([], {}), '()\n', (6119, 6121), False, 'from aspecd import plotting, utils, dataset\n'), ((6597, 6614), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (6612, 6614), False, 'from aspecd import plotting, utils, dataset\n'), ((6718, 6735), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (6733, 6735), False, 'from aspecd import plotting, utils, dataset\n'), ((6918, 6935), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (6933, 6935), False, 'from aspecd import plotting, utils, dataset\n'), ((7128, 7145), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (7143, 7145), False, 'from aspecd import plotting, utils, dataset\n'), ((7846, 7863), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (7861, 7863), False, 'from aspecd import plotting, utils, dataset\n'), ((9535, 9561), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (9559, 9561), False, 'from aspecd import plotting, utils, dataset\n'), ((10286, 10303), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (10301, 10303), False, 'from aspecd import plotting, utils, dataset\n'), ((10333, 10353), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (10347, 10353), True, 'import numpy as np\n'), ((11943, 11964), 'numpy.linspace', 'np.linspace', (['(4)', '(5)', '(10)'], {}), '(4, 5, 10)\n', (11954, 11964), True, 'import numpy as np\n'), ((12295, 12316), 'numpy.linspace', 'np.linspace', (['(4)', '(5)', '(10)'], {}), '(4, 5, 10)\n', (12306, 12316), True, 'import numpy as np\n'), ((12698, 12721), 'numpy.random.random', 'np.random.random', (['[100]'], {}), '([100])\n', (12714, 12721), True, 'import numpy as np\n'), ((12761, 12795), 'numpy.linspace', 'np.linspace', (['np.pi', '(2 * np.pi)', '(100)'], {}), '(np.pi, 2 * np.pi, 100)\n', (12772, 12795), True, 'import numpy as np\n'), ((13140, 13163), 'numpy.random.random', 'np.random.random', (['[100]'], {}), '([100])\n', (13156, 13163), True, 'import numpy as np\n'), ((13203, 13237), 'numpy.linspace', 'np.linspace', (['np.pi', '(2 * np.pi)', '(100)'], {}), '(np.pi, 2 * np.pi, 100)\n', (13214, 13237), True, 'import numpy as np\n'), ((13587, 13610), 'numpy.random.random', 'np.random.random', (['[100]'], {}), '([100])\n', (13603, 13610), True, 'import numpy as np\n'), ((13650, 13684), 'numpy.linspace', 'np.linspace', (['np.pi', '(2 * np.pi)', '(100)'], {}), '(np.pi, 2 * np.pi, 100)\n', (13661, 13684), True, 'import numpy as np\n'), ((14086, 14112), 'aspecd.plotting.SinglePlotter2D', 'plotting.SinglePlotter2D', ([], {}), '()\n', (14110, 14112), False, 'from aspecd import plotting, utils, dataset\n'), ((14391, 14412), 'numpy.random.random', 'np.random.random', (['[5]'], {}), '([5])\n', (14407, 14412), True, 'import numpy as np\n'), ((14982, 14999), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (14997, 14999), False, 'from aspecd import plotting, utils, dataset\n'), ((15029, 15049), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (15043, 15049), True, 'import numpy as np\n'), ((15219, 15236), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (15234, 15236), False, 'from aspecd import plotting, utils, dataset\n'), ((15270, 15294), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (15286, 15294), True, 'import numpy as np\n'), ((15989, 16006), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (16004, 16006), False, 'from aspecd import plotting, utils, dataset\n'), ((16040, 16064), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (16056, 16064), True, 'import numpy as np\n'), ((16207, 16228), 'numpy.linspace', 'np.linspace', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (16218, 16228), True, 'import numpy as np\n'), ((16370, 16393), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(5)'], {}), '(50, 100, 5)\n', (16381, 16393), True, 'import numpy as np\n'), ((16791, 16808), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (16806, 16808), False, 'from aspecd import plotting, utils, dataset\n'), ((16842, 16866), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (16858, 16866), True, 'import numpy as np\n'), ((16975, 16992), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (16990, 16992), False, 'from aspecd import plotting, utils, dataset\n'), ((17026, 17050), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (17042, 17050), True, 'import numpy as np\n'), ((17193, 17214), 'numpy.linspace', 'np.linspace', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (17204, 17214), True, 'import numpy as np\n'), ((17356, 17379), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(5)'], {}), '(50, 100, 5)\n', (17367, 17379), True, 'import numpy as np\n'), ((17890, 17907), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (17905, 17907), False, 'from aspecd import plotting, utils, dataset\n'), ((17941, 17965), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (17957, 17965), True, 'import numpy as np\n'), ((18564, 18581), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (18579, 18581), False, 'from aspecd import plotting, utils, dataset\n'), ((18615, 18639), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (18631, 18639), True, 'import numpy as np\n'), ((18881, 18898), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (18896, 18898), False, 'from aspecd import plotting, utils, dataset\n'), ((18932, 18956), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (18948, 18956), True, 'import numpy as np\n'), ((19073, 19090), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (19088, 19090), False, 'from aspecd import plotting, utils, dataset\n'), ((19124, 19148), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (19140, 19148), True, 'import numpy as np\n'), ((19442, 19459), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (19457, 19459), False, 'from aspecd import plotting, utils, dataset\n'), ((19493, 19517), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (19509, 19517), True, 'import numpy as np\n'), ((19972, 19989), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (19987, 19989), False, 'from aspecd import plotting, utils, dataset\n'), ((20023, 20047), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (20039, 20047), True, 'import numpy as np\n'), ((20609, 20626), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (20624, 20626), False, 'from aspecd import plotting, utils, dataset\n'), ((20660, 20684), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (20676, 20684), True, 'import numpy as np\n'), ((21273, 21290), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (21288, 21290), False, 'from aspecd import plotting, utils, dataset\n'), ((21324, 21348), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (21340, 21348), True, 'import numpy as np\n'), ((21969, 21986), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (21984, 21986), False, 'from aspecd import plotting, utils, dataset\n'), ((22020, 22044), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (22036, 22044), True, 'import numpy as np\n'), ((22688, 22705), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (22703, 22705), False, 'from aspecd import plotting, utils, dataset\n'), ((22739, 22763), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (22755, 22763), True, 'import numpy as np\n'), ((23434, 23451), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (23449, 23451), False, 'from aspecd import plotting, utils, dataset\n'), ((23485, 23509), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (23501, 23509), True, 'import numpy as np\n'), ((23932, 23965), 'aspecd.plotting.SinglePlotter2DStacked', 'plotting.SinglePlotter2DStacked', ([], {}), '()\n', (23963, 23965), False, 'from aspecd import plotting, utils, dataset\n'), ((24105, 24134), 'os.path.exists', 'os.path.exists', (['self.filename'], {}), '(self.filename)\n', (24119, 24134), False, 'import os\n'), ((24466, 24487), 'numpy.random.random', 'np.random.random', (['[5]'], {}), '([5])\n', (24482, 24487), True, 'import numpy as np\n'), ((27412, 27433), 'numpy.linspace', 'np.linspace', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (27423, 27433), True, 'import numpy as np\n'), ((27575, 27599), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(10)'], {}), '(50, 100, 10)\n', (27586, 27599), True, 'import numpy as np\n'), ((28247, 28268), 'numpy.linspace', 'np.linspace', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (28258, 28268), True, 'import numpy as np\n'), ((28410, 28434), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(10)'], {}), '(50, 100, 10)\n', (28421, 28434), True, 'import numpy as np\n'), ((30895, 30919), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(10)'], {}), '(50, 100, 10)\n', (30906, 30919), True, 'import numpy as np\n'), ((31415, 31436), 'numpy.linspace', 'np.linspace', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (31426, 31436), True, 'import numpy as np\n'), ((31862, 31886), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(10)'], {}), '(50, 100, 10)\n', (31873, 31886), True, 'import numpy as np\n'), ((32471, 32492), 'numpy.linspace', 'np.linspace', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (32482, 32492), True, 'import numpy as np\n'), ((33021, 33045), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(10)'], {}), '(50, 100, 10)\n', (33032, 33045), True, 'import numpy as np\n'), ((33726, 33749), 'aspecd.plotting.MultiPlotter', 'plotting.MultiPlotter', ([], {}), '()\n', (33747, 33749), False, 'from aspecd import plotting, utils, dataset\n'), ((35694, 35711), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (35709, 35711), False, 'from aspecd import plotting, utils, dataset\n'), ((35936, 35953), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (35951, 35953), False, 'from aspecd import plotting, utils, dataset\n'), ((37453, 37470), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (37468, 37470), False, 'from aspecd import plotting, utils, dataset\n'), ((39210, 39235), 'aspecd.plotting.MultiPlotter1D', 'plotting.MultiPlotter1D', ([], {}), '()\n', (39233, 39235), False, 'from aspecd import plotting, utils, dataset\n'), ((40095, 40112), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (40110, 40112), False, 'from aspecd import plotting, utils, dataset\n'), ((40142, 40162), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (40156, 40162), True, 'import numpy as np\n'), ((41342, 41359), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (41357, 41359), False, 'from aspecd import plotting, utils, dataset\n'), ((41731, 41763), 'aspecd.plotting.MultiPlotter1DStacked', 'plotting.MultiPlotter1DStacked', ([], {}), '()\n', (41761, 41763), False, 'from aspecd import plotting, utils, dataset\n'), ((43767, 43794), 'aspecd.plotting.CompositePlotter', 'plotting.CompositePlotter', ([], {}), '()\n', (43792, 43794), False, 'from aspecd import plotting, utils, dataset\n'), ((44816, 44842), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (44840, 44842), False, 'from aspecd import plotting, utils, dataset\n'), ((45327, 45353), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (45351, 45353), False, 'from aspecd import plotting, utils, dataset\n'), ((45891, 45917), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (45915, 45917), False, 'from aspecd import plotting, utils, dataset\n'), ((46652, 46678), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (46676, 46678), False, 'from aspecd import plotting, utils, dataset\n'), ((47167, 47193), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (47191, 47193), False, 'from aspecd import plotting, utils, dataset\n'), ((47794, 47820), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (47818, 47820), False, 'from aspecd import plotting, utils, dataset\n'), ((48294, 48327), 'aspecd.plotting.SingleCompositePlotter', 'plotting.SingleCompositePlotter', ([], {}), '()\n', (48325, 48327), False, 'from aspecd import plotting, utils, dataset\n'), ((48814, 48831), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (48829, 48831), False, 'from aspecd import plotting, utils, dataset\n'), ((48959, 48985), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (48983, 48985), False, 'from aspecd import plotting, utils, dataset\n'), ((49245, 49271), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (49269, 49271), False, 'from aspecd import plotting, utils, dataset\n'), ((49347, 49364), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (49362, 49364), False, 'from aspecd import plotting, utils, dataset\n'), ((49651, 49677), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (49675, 49677), False, 'from aspecd import plotting, utils, dataset\n'), ((49753, 49770), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (49768, 49770), False, 'from aspecd import plotting, utils, dataset\n'), ((50896, 50912), 'aspecd.plotting.Saver', 'plotting.Saver', ([], {}), '()\n', (50910, 50912), False, 'from aspecd import plotting, utils, dataset\n'), ((50984, 51013), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (50998, 51013), False, 'import os\n'), ((51598, 51616), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (51614, 51616), False, 'from aspecd import plotting, utils, dataset\n'), ((51803, 51832), 'aspecd.plotting.Saver', 'plotting.Saver', (['self.filename'], {}), '(self.filename)\n', (51817, 51832), False, 'from aspecd import plotting, utils, dataset\n'), ((52155, 52173), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (52171, 52173), False, 'from aspecd import plotting, utils, dataset\n'), ((52603, 52621), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (52619, 52621), False, 'from aspecd import plotting, utils, dataset\n'), ((52852, 52870), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (52868, 52870), False, 'from aspecd import plotting, utils, dataset\n'), ((52962, 52993), 'os.path.splitext', 'os.path.splitext', (['self.filename'], {}), '(self.filename)\n', (52978, 52993), False, 'import os\n'), ((53209, 53227), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (53225, 53227), False, 'from aspecd import plotting, utils, dataset\n'), ((53308, 53339), 'os.path.splitext', 'os.path.splitext', (['self.filename'], {}), '(self.filename)\n', (53324, 53339), False, 'import os\n'), ((53618, 53636), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (53634, 53636), False, 'from aspecd import plotting, utils, dataset\n'), ((53728, 53759), 'os.path.splitext', 'os.path.splitext', (['self.filename'], {}), '(self.filename)\n', (53744, 53759), False, 'import os\n'), ((53966, 53983), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (53981, 53983), False, 'from aspecd import plotting, utils, dataset\n'), ((54002, 54028), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (54026, 54028), False, 'from aspecd import plotting, utils, dataset\n'), ((54244, 54261), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (54259, 54261), False, 'from aspecd import plotting, utils, dataset\n'), ((54295, 54315), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (54309, 54315), True, 'import numpy as np\n'), ((54334, 54360), 'aspecd.plotting.SinglePlotter2D', 'plotting.SinglePlotter2D', ([], {}), '()\n', (54358, 54360), False, 'from aspecd import plotting, utils, dataset\n'), ((54568, 54591), 'aspecd.plotting.MultiPlotter', 'plotting.MultiPlotter', ([], {}), '()\n', (54589, 54591), False, 'from aspecd import plotting, utils, dataset\n'), ((54827, 54845), 'aspecd.plotting.Caption', 'plotting.Caption', ([], {}), '()\n', (54843, 54845), False, 'from aspecd import plotting, utils, dataset\n'), ((55613, 55641), 'aspecd.plotting.DrawingProperties', 'plotting.DrawingProperties', ([], {}), '()\n', (55639, 55641), False, 'from aspecd import plotting, utils, dataset\n'), ((57336, 57361), 'aspecd.plotting.LineProperties', 'plotting.LineProperties', ([], {}), '()\n', (57359, 57361), False, 'from aspecd import plotting, utils, dataset\n'), ((58702, 58730), 'aspecd.plotting.SurfaceProperties', 'plotting.SurfaceProperties', ([], {}), '()\n', (58728, 58730), False, 'from aspecd import plotting, utils, dataset\n'), ((59963, 59988), 'aspecd.plotting.PlotProperties', 'plotting.PlotProperties', ([], {}), '()\n', (59986, 59988), False, 'from aspecd import plotting, utils, dataset\n'), ((60926, 60944), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (60942, 60944), False, 'from aspecd import plotting, utils, dataset\n'), ((61128, 61150), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (61137, 61150), True, 'import matplotlib.pyplot as plt\n'), ((61254, 61281), 'aspecd.plotting.FigureProperties', 'plotting.FigureProperties', ([], {}), '()\n', (61279, 61281), False, 'from aspecd import plotting, utils, dataset\n'), ((62270, 62288), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (62286, 62288), False, 'from aspecd import plotting, utils, dataset\n'), ((62450, 62472), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (62459, 62472), True, 'import matplotlib.pyplot as plt\n'), ((62578, 62596), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (62594, 62596), False, 'from aspecd import plotting, utils, dataset\n'), ((62812, 62834), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (62821, 62834), True, 'import matplotlib.pyplot as plt\n'), ((62940, 62958), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (62956, 62958), False, 'from aspecd import plotting, utils, dataset\n'), ((63158, 63180), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (63167, 63180), True, 'import matplotlib.pyplot as plt\n'), ((63280, 63305), 'aspecd.plotting.AxesProperties', 'plotting.AxesProperties', ([], {}), '()\n', (63303, 63305), False, 'from aspecd import plotting, utils, dataset\n'), ((64483, 64501), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (64499, 64501), False, 'from aspecd import plotting, utils, dataset\n'), ((64659, 64681), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (64668, 64681), True, 'import matplotlib.pyplot as plt\n'), ((64837, 64862), 'aspecd.plotting.MultiPlotter1D', 'plotting.MultiPlotter1D', ([], {}), '()\n', (64860, 64862), False, 'from aspecd import plotting, utils, dataset\n'), ((65048, 65070), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (65057, 65070), True, 'import matplotlib.pyplot as plt\n'), ((65141, 65162), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (65152, 65162), True, 'import numpy as np\n'), ((65178, 65196), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (65194, 65196), False, 'from aspecd import plotting, utils, dataset\n'), ((65399, 65421), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (65408, 65421), True, 'import matplotlib.pyplot as plt\n'), ((65498, 65519), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (65509, 65519), True, 'import numpy as np\n'), ((65612, 65630), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (65628, 65630), False, 'from aspecd import plotting, utils, dataset\n'), ((65840, 65862), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (65849, 65862), True, 'import matplotlib.pyplot as plt\n'), ((65933, 65954), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (65944, 65954), True, 'import numpy as np\n'), ((65970, 65988), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (65986, 65988), False, 'from aspecd import plotting, utils, dataset\n'), ((66191, 66213), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (66200, 66213), True, 'import matplotlib.pyplot as plt\n'), ((66290, 66311), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (66301, 66311), True, 'import numpy as np\n'), ((66404, 66422), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (66420, 66422), False, 'from aspecd import plotting, utils, dataset\n'), ((66632, 66654), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (66641, 66654), True, 'import matplotlib.pyplot as plt\n'), ((66758, 66779), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (66769, 66779), True, 'import numpy as np\n'), ((66872, 66890), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (66888, 66890), False, 'from aspecd import plotting, utils, dataset\n'), ((67079, 67101), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (67088, 67101), True, 'import matplotlib.pyplot as plt\n'), ((67205, 67232), 'aspecd.plotting.LegendProperties', 'plotting.LegendProperties', ([], {}), '()\n', (67230, 67232), False, 'from aspecd import plotting, utils, dataset\n'), ((68236, 68254), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (68252, 68254), False, 'from aspecd import plotting, utils, dataset\n'), ((68496, 68518), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (68505, 68518), True, 'import matplotlib.pyplot as plt\n'), ((68652, 68670), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (68668, 68670), False, 'from aspecd import plotting, utils, dataset\n'), ((68938, 68960), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (68947, 68960), True, 'import matplotlib.pyplot as plt\n'), ((69109, 69127), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (69125, 69127), False, 'from aspecd import plotting, utils, dataset\n'), ((69385, 69407), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (69394, 69407), True, 'import matplotlib.pyplot as plt\n'), ((69545, 69563), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (69561, 69563), False, 'from aspecd import plotting, utils, dataset\n'), ((69840, 69862), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (69849, 69862), True, 'import matplotlib.pyplot as plt\n'), ((70087, 70112), 'aspecd.plotting.GridProperties', 'plotting.GridProperties', ([], {}), '()\n', (70110, 70112), False, 'from aspecd import plotting, utils, dataset\n'), ((71254, 71272), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (71270, 71272), False, 'from aspecd import plotting, utils, dataset\n'), ((71484, 71506), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (71493, 71506), True, 'import matplotlib.pyplot as plt\n'), ((71612, 71643), 'aspecd.plotting.SinglePlotProperties', 'plotting.SinglePlotProperties', ([], {}), '()\n', (71641, 71643), False, 'from aspecd import plotting, utils, dataset\n'), ((72572, 72596), 'aspecd.plotting.SinglePlotter', 'plotting.SinglePlotter', ([], {}), '()\n', (72594, 72596), False, 'from aspecd import plotting, utils, dataset\n'), ((72807, 72829), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (72816, 72829), True, 'import matplotlib.pyplot as plt\n'), ((72997, 73021), 'aspecd.plotting.SinglePlotter', 'plotting.SinglePlotter', ([], {}), '()\n', (73019, 73021), False, 'from aspecd import plotting, utils, dataset\n'), ((73261, 73283), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (73270, 73283), True, 'import matplotlib.pyplot as plt\n'), ((73401, 73427), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (73425, 73427), False, 'from aspecd import plotting, utils, dataset\n'), ((73642, 73664), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (73651, 73664), True, 'import matplotlib.pyplot as plt\n'), ((73772, 73805), 'aspecd.plotting.SinglePlot1DProperties', 'plotting.SinglePlot1DProperties', ([], {}), '()\n', (73803, 73805), False, 'from aspecd import plotting, utils, dataset\n'), ((73977, 74003), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (74001, 74003), False, 'from aspecd import plotting, utils, dataset\n'), ((74226, 74248), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (74235, 74248), True, 'import matplotlib.pyplot as plt\n'), ((74356, 74389), 'aspecd.plotting.SinglePlot2DProperties', 'plotting.SinglePlot2DProperties', ([], {}), '()\n', (74387, 74389), False, 'from aspecd import plotting, utils, dataset\n'), ((74559, 74585), 'aspecd.plotting.SinglePlotter2D', 'plotting.SinglePlotter2D', ([], {}), '()\n', (74583, 74585), False, 'from aspecd import plotting, utils, dataset\n'), ((74605, 74622), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (74620, 74622), False, 'from aspecd import plotting, utils, dataset\n'), ((74652, 74676), 'numpy.random.random', 'np.random.random', (['[5, 5]'], {}), '([5, 5])\n', (74668, 74676), True, 'import numpy as np\n'), ((74879, 74901), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (74888, 74901), True, 'import matplotlib.pyplot as plt\n'), ((75006, 75036), 'aspecd.plotting.MultiPlotProperties', 'plotting.MultiPlotProperties', ([], {}), '()\n', (75034, 75036), False, 'from aspecd import plotting, utils, dataset\n'), ((76073, 76096), 'aspecd.plotting.MultiPlotter', 'plotting.MultiPlotter', ([], {}), '()\n', (76094, 76096), False, 'from aspecd import plotting, utils, dataset\n'), ((76326, 76348), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (76335, 76348), True, 'import matplotlib.pyplot as plt\n'), ((76516, 76540), 'aspecd.plotting.SinglePlotter', 'plotting.SinglePlotter', ([], {}), '()\n', (76538, 76540), False, 'from aspecd import plotting, utils, dataset\n'), ((76780, 76802), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (76789, 76802), True, 'import matplotlib.pyplot as plt\n'), ((76922, 76945), 'aspecd.plotting.MultiPlotter', 'plotting.MultiPlotter', ([], {}), '()\n', (76943, 76945), False, 'from aspecd import plotting, utils, dataset\n'), ((76965, 76982), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (76980, 76982), False, 'from aspecd import plotting, utils, dataset\n'), ((77345, 77370), 'matplotlib.pyplot.close', 'plt.close', (['plotter.figure'], {}), '(plotter.figure)\n', (77354, 77370), True, 'import matplotlib.pyplot as plt\n'), ((78995, 79027), 'aspecd.plotting.MultiPlot1DProperties', 'plotting.MultiPlot1DProperties', ([], {}), '()\n', (79025, 79027), False, 'from aspecd import plotting, utils, dataset\n'), ((80774, 80808), 'aspecd.plotting.CompositePlotProperties', 'plotting.CompositePlotProperties', ([], {}), '()\n', (80806, 80808), False, 'from aspecd import plotting, utils, dataset\n'), ((81527, 81554), 'aspecd.plotting.CompositePlotter', 'plotting.CompositePlotter', ([], {}), '()\n', (81552, 81554), False, 'from aspecd import plotting, utils, dataset\n'), ((81666, 81692), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (81690, 81692), False, 'from aspecd import plotting, utils, dataset\n'), ((82087, 82109), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (82096, 82109), True, 'import matplotlib.pyplot as plt\n'), ((82241, 82268), 'aspecd.plotting.CompositePlotter', 'plotting.CompositePlotter', ([], {}), '()\n', (82266, 82268), False, 'from aspecd import plotting, utils, dataset\n'), ((82394, 82420), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (82418, 82420), False, 'from aspecd import plotting, utils, dataset\n'), ((82970, 82992), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (82979, 82992), True, 'import matplotlib.pyplot as plt\n'), ((83110, 83137), 'aspecd.plotting.CompositePlotter', 'plotting.CompositePlotter', ([], {}), '()\n', (83135, 83137), False, 'from aspecd import plotting, utils, dataset\n'), ((83249, 83275), 'aspecd.plotting.SinglePlotter1D', 'plotting.SinglePlotter1D', ([], {}), '()\n', (83273, 83275), False, 'from aspecd import plotting, utils, dataset\n'), ((83724, 83746), 'matplotlib.pyplot.close', 'plt.close', (['plot.figure'], {}), '(plot.figure)\n', (83733, 83746), True, 'import matplotlib.pyplot as plt\n'), ((566, 590), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (575, 590), False, 'import os\n'), ((632, 659), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (641, 659), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6215), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (6197, 6215), True, 'import matplotlib.pyplot as plt\n'), ((8890, 8911), 'aspecd.dataset.plot', 'dataset.plot', (['plotter'], {}), '(plotter)\n', (8902, 8911), False, 'from aspecd import plotting, utils, dataset\n'), ((9421, 9442), 'aspecd.dataset.plot', 'dataset.plot', (['plotter'], {}), '(plotter)\n', (9433, 9442), False, 'from aspecd import plotting, utils, dataset\n'), ((9628, 9655), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (9637, 9655), True, 'import matplotlib.pyplot as plt\n'), ((11529, 11551), 'numpy.random.random', 'np.random.random', (['[10]'], {}), '([10])\n', (11545, 11551), True, 'import numpy as np\n'), ((11877, 11899), 'numpy.random.random', 'np.random.random', (['[10]'], {}), '([10])\n', (11893, 11899), True, 'import numpy as np\n'), ((12229, 12251), 'numpy.random.random', 'np.random.random', (['[10]'], {}), '([10])\n', (12245, 12251), True, 'import numpy as np\n'), ((14179, 14206), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (14188, 14206), True, 'import matplotlib.pyplot as plt\n'), ((24066, 24093), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (24075, 24093), True, 'import matplotlib.pyplot as plt\n'), ((24148, 24172), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (24157, 24172), False, 'import os\n'), ((24898, 24923), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (24914, 24923), True, 'import numpy as np\n'), ((25261, 25286), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (25277, 25286), True, 'import numpy as np\n'), ((25527, 25552), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (25543, 25552), True, 'import numpy as np\n'), ((25942, 25967), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (25958, 25967), True, 'import numpy as np\n'), ((26273, 26298), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (26289, 26298), True, 'import numpy as np\n'), ((26714, 26739), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (26730, 26739), True, 'import numpy as np\n'), ((27238, 27263), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (27254, 27263), True, 'import numpy as np\n'), ((28073, 28098), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (28089, 28098), True, 'import numpy as np\n'), ((28885, 28910), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (28901, 28910), True, 'import numpy as np\n'), ((29235, 29260), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (29251, 29260), True, 'import numpy as np\n'), ((29569, 29594), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (29585, 29594), True, 'import numpy as np\n'), ((30342, 30367), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (30358, 30367), True, 'import numpy as np\n'), ((30554, 30583), 'os.path.exists', 'os.path.exists', (['self.filename'], {}), '(self.filename)\n', (30568, 30583), False, 'import os\n'), ((30722, 30747), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (30738, 30747), True, 'import numpy as np\n'), ((31241, 31266), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (31257, 31266), True, 'import numpy as np\n'), ((31689, 31714), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (31705, 31714), True, 'import numpy as np\n'), ((32297, 32322), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (32313, 32322), True, 'import numpy as np\n'), ((32848, 32873), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (32864, 32873), True, 'import numpy as np\n'), ((33434, 33459), 'numpy.random.random', 'np.random.random', (['[5, 10]'], {}), '([5, 10])\n', (33450, 33459), True, 'import numpy as np\n'), ((33816, 33843), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (33825, 33843), True, 'import matplotlib.pyplot as plt\n'), ((34340, 34357), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (34355, 34357), False, 'from aspecd import plotting, utils, dataset\n'), ((35416, 35433), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (35431, 35433), False, 'from aspecd import plotting, utils, dataset\n'), ((36769, 36786), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (36784, 36786), False, 'from aspecd import plotting, utils, dataset\n'), ((37037, 37054), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (37052, 37054), False, 'from aspecd import plotting, utils, dataset\n'), ((39302, 39329), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (39311, 39329), True, 'import matplotlib.pyplot as plt\n'), ((40415, 40432), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (40430, 40432), False, 'from aspecd import plotting, utils, dataset\n'), ((40566, 40583), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (40581, 40583), False, 'from aspecd import plotting, utils, dataset\n'), ((40768, 40785), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (40783, 40785), False, 'from aspecd import plotting, utils, dataset\n'), ((41145, 41162), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (41160, 41162), False, 'from aspecd import plotting, utils, dataset\n'), ((41854, 41879), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (41865, 41879), True, 'import numpy as np\n'), ((42086, 42113), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (42095, 42113), True, 'import matplotlib.pyplot as plt\n'), ((43893, 43923), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (43904, 43923), True, 'import numpy as np\n'), ((43989, 44016), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (43998, 44016), True, 'import matplotlib.pyplot as plt\n'), ((48394, 48421), 'matplotlib.pyplot.close', 'plt.close', (['self.plotter.fig'], {}), '(self.plotter.fig)\n', (48403, 48421), True, 'import matplotlib.pyplot as plt\n'), ((50254, 50275), 'aspecd.dataset.plot', 'dataset.plot', (['plotter'], {}), '(plotter)\n', (50266, 50275), False, 'from aspecd import plotting, utils, dataset\n'), ((50794, 50815), 'aspecd.dataset.plot', 'dataset.plot', (['plotter'], {}), '(plotter)\n', (50806, 50815), False, 'from aspecd import plotting, utils, dataset\n'), ((51027, 51051), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (51036, 51051), False, 'import os\n'), ((51122, 51155), 'matplotlib.pyplot.close', 'plt.close', (['self.saver.plotter.fig'], {}), '(self.saver.plotter.fig)\n', (51131, 51155), True, 'import matplotlib.pyplot as plt\n'), ((52746, 52775), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (52760, 52775), False, 'import os\n'), ((53099, 53128), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (53113, 53128), False, 'import os\n'), ((53503, 53532), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (53517, 53532), False, 'import os\n'), ((53865, 53894), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (53879, 53894), False, 'import os\n'), ((54624, 54641), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (54639, 54641), False, 'from aspecd import plotting, utils, dataset\n'), ((66924, 66960), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (66947, 66960), False, 'import warnings\n'), ((76122, 76139), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (76137, 76139), False, 'from aspecd import plotting, utils, dataset\n'), ((81783, 81813), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (81794, 81813), True, 'import numpy as np\n'), ((82511, 82541), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (82522, 82541), True, 'import numpy as np\n'), ((83420, 83450), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(101)'], {}), '(0, 2 * np.pi, 101)\n', (83431, 83450), True, 'import numpy as np\n'), ((8443, 8460), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (8458, 8460), False, 'from aspecd import plotting, utils, dataset\n'), ((10156, 10173), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (10171, 10173), False, 'from aspecd import plotting, utils, dataset\n'), ((10962, 10979), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (10977, 10979), False, 'from aspecd import plotting, utils, dataset\n'), ((11243, 11260), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (11258, 11260), False, 'from aspecd import plotting, utils, dataset\n'), ((37150, 37163), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (37161, 37163), False, 'import io\n'), ((48015, 48028), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (48026, 48028), False, 'import io\n'), ((51499, 51517), 'aspecd.plotting.Plotter', 'plotting.Plotter', ([], {}), '()\n', (51515, 51517), False, 'from aspecd import plotting, utils, dataset\n'), ((65563, 65584), 'numpy.linspace', 'np.linspace', (['(2)', '(3)', '(11)'], {}), '(2, 3, 11)\n', (65574, 65584), True, 'import numpy as np\n'), ((66355, 66376), 'numpy.linspace', 'np.linspace', (['(2)', '(3)', '(11)'], {}), '(2, 3, 11)\n', (66366, 66376), True, 'import numpy as np\n'), ((66823, 66844), 'numpy.linspace', 'np.linspace', (['(2)', '(3)', '(11)'], {}), '(2, 3, 11)\n', (66834, 66844), True, 'import numpy as np\n'), ((68315, 68328), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (68326, 68328), False, 'import io\n'), ((68813, 68826), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (68824, 68826), False, 'import io\n'), ((69260, 69273), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (69271, 69273), False, 'import io\n'), ((69706, 69719), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (69717, 69719), False, 'import io\n'), ((72623, 72640), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (72638, 72640), False, 'from aspecd import plotting, utils, dataset\n'), ((73048, 73065), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (73063, 73065), False, 'from aspecd import plotting, utils, dataset\n'), ((73454, 73471), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (73469, 73471), False, 'from aspecd import plotting, utils, dataset\n'), ((74030, 74047), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (74045, 74047), False, 'from aspecd import plotting, utils, dataset\n'), ((76567, 76584), 'aspecd.dataset.Dataset', 'dataset.Dataset', ([], {}), '()\n', (76582, 76584), False, 'from aspecd import plotting, utils, dataset\n'), ((77115, 77128), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (77126, 77128), False, 'import io\n')] |
import cv2
import numpy as np
import socket
# Define IP Address for Arduinos and PORT Number
Arduino_1 = '192.168.100.16'
Arduino_2 = '192.168.100.17'
Server_Result = '192.168.100.13'
PORT_1 = 8888
MONITORING_PORT = 4500
class Connection:
def __init__(self, HOST, PORT):
self.HOST = HOST
self.PORT = PORT
def createSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, sock):
server_address = (self.HOST, self.PORT)
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
def sendPacket(self, message, sock):
print('sending {!r}'.format(message))
sock.sendall(message)
def closeSocket(self, sock):
print('closing socket')
sock.close()
def clientSocket(msg, lamp):
if lamp == 1:
tcpConnection = Connection(Arduino_1, PORT_1)
elif lamp == 2:
tcpConnection = Connection(Arduino_2, PORT_1)
else:
tcpConnection = Connection(Server_Result, MONITORING_PORT)
sock = tcpConnection.createSocket()
tcpConnection.connect(sock)
try:
# Define
msg = msg
tcpConnection.sendPacket(msg, sock)
###### Look for the responses from Arduino
#amount_received = 0
#amount_expected = len(msg)
#while amount_received < amount_expected:
# data = sock.recv(200)
# amount_received += len(data)
# print('received {!r}'.format(data))
finally:
tcpConnection.closeSocket(sock)
thres = 0.45 # Threshold to detect object
nms_threshold = 0.2
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
#print(classNames)
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
# human height sample 175cm
known_distance = 200 #centimeter
known_frame_height = 487
real_sample_height = 175
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320,320)
net.setInputScale(1.0/ 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
def getFocalLength(known_distance, real_sample_height, known_frame_height):
focal_length = (known_frame_height * known_distance) / real_sample_height
return focal_length
def calculateDistance(focal_length, real_sample_height, real_time_frame_height):
result = (real_sample_height * focal_length) / real_time_frame_height
return result
def getObjects(img, draw=True, objects=[]):
classIds, confs, bbox = net.detect(img,confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1,-1)[0])
confs = list(map(float, confs))
if len(objects) == 0:
objects = classNames
objectInfo = []
#if(len(confs) > 0):
# print(type(confs[0]))
#print(confs)
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
#print(indices)
for i in indices:
i = i[0]
box = bbox[i]
x,y,w,h = box[0],box[1],box[2],box[3]
#print("\n\nHEIGHT : %s" %box[3])
#clientSocket(b'%s'%box[3], 0)
#if(box[3] < 220 && box[3] < 180):
# clientSocket(b'0', 1)
# clientSocket(b'1', 2)
#else:
# clientSocket(b'0', 2)
# clientSocket(b'1', 1)
className = classNames[classIds[i][0]-1]
if className in objects:
objectInfo.append([[x,y,w,h], className])
focal_length = getFocalLength(known_distance, real_sample_height, known_frame_height)
Distance = calculateDistance(focal_length, real_sample_height, h)
if(draw):
cv2.rectangle(img, (x,y), (x+w,y+h), color=(0,225,0), thickness=2)
#cv2.putText(img, f"Distance = {round(Distance,2)} CM", (box[0]+7, box[1]+30), cv2.FONT_HERSHEY_COMPLEX,(0.2*box[3])/80, (255,0,0),1)
cv2.putText(img, f"Distance = {round(Distance,2)} CM", (50, 50), cv2.FONT_HERSHEY_COMPLEX,1, (255,0,0),2)
#cv2.putText(img, className.upper(), (box[0]+10, box[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
return img, objectInfo
#if len(classIds) != 0:
# for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
# cv2.rectangle(img,box,color=(0,255,0),thickness=2)
# cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30),
# cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
# cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30),
# cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
if __name__ == "__main__":
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
cap.set(10,50)
while True:
success,img = cap.read()
result, objectInfo = getObjects(img,objects=['person'])
for final_box in objectInfo:
height = final_box[0][3]
print("Person Height :", height)
# for final_box in objectInfo:
# height = final_box[0][3]
# print(height)
# if height < 250:
# clientSocket(b'0', 1)
# clientSocket(b'1', 2)
# elif height > 350:
# clientSocket(b'1', 1)
# clientSocket(b'0', 2)
# else:
# clientSocket(b'1', 1)
# clientSocket(b'1', 2)
# print(objectInfo)
cv2.imshow("Output",img)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break | [
"cv2.rectangle",
"socket.socket",
"cv2.imshow",
"numpy.array",
"cv2.dnn_DetectionModel",
"cv2.VideoCapture",
"cv2.dnn.NMSBoxes",
"cv2.waitKey"
] | [((2136, 2183), 'cv2.dnn_DetectionModel', 'cv2.dnn_DetectionModel', (['weightsPath', 'configPath'], {}), '(weightsPath, configPath)\n', (2158, 2183), False, 'import cv2\n'), ((3073, 3124), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['bbox', 'confs', 'thres', 'nms_threshold'], {}), '(bbox, confs, thres, nms_threshold)\n', (3089, 3124), False, 'import cv2\n'), ((4940, 4959), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4956, 4959), False, 'import cv2\n'), ((398, 447), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (411, 447), False, 'import socket\n'), ((5781, 5806), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'img'], {}), "('Output', img)\n", (5791, 5806), False, 'import cv2\n'), ((5821, 5835), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5832, 5835), False, 'import cv2\n'), ((3923, 3997), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)'], {'color': '(0, 225, 0)', 'thickness': '(2)'}), '(img, (x, y), (x + w, y + h), color=(0, 225, 0), thickness=2)\n', (3936, 3997), False, 'import cv2\n'), ((2824, 2839), 'numpy.array', 'np.array', (['confs'], {}), '(confs)\n', (2832, 2839), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import xml.etree.ElementTree as ET
from os.path import isfile, join
from os import getcwd
from scipy.spatial import distance
##############################
# MACROS
#################################
# # Geometry data
# A = -65
# B = 25
# YMAX = 20
# THICKNESS = -10 # negative to match equation
# # Mesh data
# VERTICAL_RES = 60
# N_LAYERS = 4 # from endo to epi, not including endo
# CIRCUNFERENTIAL_RES = 30
# Geo
A = -65
B = 25
H = 0
K = 0
YMAX = 20
_TYPE = 1
N_INTERNAL_LAYERS = 3 # Horizontal res --> will add two layers (internal and external)
N_NODES_PER_LAYER = 20 # Vertical res --> will add one or 2 nodes to fix top/bottom constrains
N_REVS = 9 # Circf. res --> need to be multiple of 3
##############################
# 2D Functions
#################################
class vector2d:
def __init__(self, p1, p2, has_normal=True):
self.p1 = p1
self.p2 = p2
self.vec = self.vector2d()
self.unit = self.unit_vector()
self.to_plot = [[p1[0], p2[0]], [p1[1],p2[1]]]
self.normal = vector2d([-self.vec[1], self.vec[0]], [self.vec[1], -self.vec[0]], has_normal=False) if has_normal else None
def __call__(self):
return self.vec
def __str__(self):
return "Vector2d: p1: {p1:} p2: {p2:}".format(p1=self.p1, p2=self.p2)
def vector2d(self):
return np.array([self.p2[a] - self.p1[a] for a in range(len(self.p1))])
def unit_vector(self):
return self.vec / np.linalg.norm(self.vec)
def rotate(self,theta):
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
p2 = np.matmul(rotation_matrix, self.vec)
p2 += np.array(self.p1)
return vector2d(self.p1, p2)
def vector2dFromP1(center, length, dir):
p1 = np.array(center)
p2 = np.array([length * dir[0], length * dir[1]]) + p1
return vector2d(p1,p2)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2' """
return np.arccos(np.clip(np.dot(v1.unit, v2.unit), -1.0, 1.0))
def regress(xs,ys,deg):
coeffs = np.polyfit(xs,ys,deg)
# if _print == True:
# a = ['(x^'+str(len(coeffs)-(i+1))+") * "+str(y) if i+1 !=len(coeffs) else str(y) for i, y in enumerate(coeffs)]
# print("Coeffs: " + str(coeffs) + " | " + " + ".join(a)[:-1:])
# return lambda x: np.sum([(x**len(coeffs)-(i+1))*y if i+1 !=len(coeffs) else y for i, y in enumerate(coeffs)])
return np.poly1d(coeffs)
# Ellipse Functions
def ellipse(a, b, h=0, k=0, _type=0, ref=-1):
def eq(val):
if _type == 0: # solved for y (return y, given x)
return (a/b) * -ref * np.sqrt(b**2 - (val-h)**2) + k
# return np.sqrt((1 - (val - h)**2 ) /b**2) + k
elif _type == 1: # solved for x (return x, given y)
return (b/a) * ref * np.sqrt(a**2 - (val-k)**2) + h
return eq
def ellipse_focci(a,b,h=0,k=0):
c = np.sqrt(a**2 - b**2)
return np.array([h, k + c]), np.array([h, k - c])
def sctattered_ellipse(a,b,h,k, yrange, xrange, x_res, y_res):
# Define eq of elipse
y_ellpisis = ellipse(a,b,h,k,1)
x_ellpisis = ellipse(a,b,h,k,0)
# Get min and max values
ymin = np.min(yrange)
ymax = np.max(yrange)
xmin = np.min(xrange)
xmax = np.max(xrange)
# Calculate undistributed points
ys_ybased = np.linspace(ymin, ymax, x_res)
xs_ybased = np.array([y_ellpisis(y) for y in ys_ybased ])
xs_xbased = np.linspace(xmin, xmax, y_res)
ys_xbased = np.array([x_ellpisis(x) for x in xs_xbased ])
# Set points in a single array
xs = np.append(xs_ybased, xs_xbased)
ys = np.append(ys_ybased, ys_xbased)
# Sort points
s1 = np.zeros((len(xs), 2))
for i, x in enumerate(xs):
s1[i][0] = x
s1[i][1] = ys[i]
s1 = s1[np.argsort(s1[:, 1])]
s2 = np.zeros((2,len(s1)))
for i in range(len(s1)):
s2[0][i] = s1[i][0]
s2[1][i] = s1[i][1]
return s1, s2
def regressed_ellipse(a,b,h,k, yrange, xrange, yswitch=0.80, breakpoints=[0], res=100, deg=2, axis=1):
# Define min and max values
ymin = np.min(yrange)
ymax = np.max(yrange)
xmin = np.min(xrange)
xmax = np.max(xrange)
# Calculate scattered ellipse
s_original, _ = sctattered_ellipse(a,b,h,k, yrange, xrange, res, res)
# Set yswtich based on the basal value of a
yswitch = a * yswitch
# print("yswith:",yswitch)
# Remove breakpoints before yswitch
# breakpoints = np.delete(breakpoints, [i for i, p in enumerate(breakpoints) if p <= yswitch])
# Insert min and max breakpoints if they are not already included (do not duplicate)
# breakpoints = np.insert(breakpoints, 0, yswitch) if yswitch > ymin else breakpoints
breakpoints = np.insert(breakpoints, 0, ymin) if ymin not in breakpoints else breakpoints
breakpoints = np.append(breakpoints, ymax) if ymax not in breakpoints else breakpoints
# print("Breakpoints:", breakpoints)
# Break s_original based on breakpoints
polys = []
r_range = range(len(breakpoints) - 1)
count = 1
for i in r_range:
brkpoint1 = breakpoints[i]
brkpoint2 = breakpoints[i+1]
s = [[],[]]
for j in range(count-1,len(s_original)):
yval = s_original[j][1]
# print(yval)
if breakpoints[i] <= yval <= breakpoints[i+1]:
s[0].append(s_original[j][0])
s[1].append(s_original[j][1])
# s.append([s_original[j][0], s_original[j][1]])
count += 1
else:
break
# print("---")
# print("brk1:", breakpoints[i])
# print("brk2:", breakpoints[i+1])
# print("s[0]:")
# print(s[0])
# print("s[1]:")
# print(s[1])
# print("---")
polys.append(regress(s[1], s[0], deg))
# # Calculate yss and xss
# r_range = range(len(breakpoints) - 1)
# yss = []
# for i in r_range:
# brkpoint1 = breakpoints[i]
# brkpoint2 = breakpoints[i+1]
# if brkpoint2 <= yswitch:
# yss.append(np.linspace(breakpoints[i], ellpisis(i+1), res))
# else:
# yss.append(np.linspace(breakpoints[i], breakpoints[i+1], res))
# yss = [np.linspace(breakpoints[i], breakpoints[i+1], res) for i in r_range]
# xss = [[ellpisis(y) for y in ys] for ys in yss]
# polys = [regress(xss[i], yss[i], deg) for i in r_range]
def reg_ell(val):
if val == ymin:
return 0
else:
for i in r_range:
if breakpoints[i] <= val <= breakpoints[i+1]:
index = i
break
return polys[index](val)
return reg_ell
def distributed_ellipse(a,b,h,k, yrange, xrange, x_res=500, y_res=500, dist_res=50, err=0.05):
# Calculate original ellipse
ell_original_coords, ell_original = sctattered_ellipse(a,b,h,k, yrange, xrange, x_res, y_res)
# Calculate total length of the curve
dist_matrix = distance.cdist(ell_original_coords, ell_original_coords, 'euclidean')
# Get dist resolution
dist = dist_matrix[0][-1] / (dist_res - 1)
# Set min and max dist according to allowed error
min_dist = dist*(1-err)
max_dist = dist*(1+err)
diff_sum = 0
# Bound first coord
ell_distr_coords = [ell_original_coords[0]]
ell_distr = [[ell_original[0][0]],[ell_original[1][0]]]
for i in range(len(dist_matrix) - 1):
prev_dist = dist_matrix[i][0]
next_dist = dist_matrix[i+1][0]
diff_dist = next_dist - prev_dist
diff_sum += diff_dist
if min_dist <= diff_sum <= max_dist:
ell_distr_coords.append(ell_original_coords[i])
ell_distr[0].append(ell_original[0][i])
ell_distr[1].append(ell_original[1][i])
diff_sum = 0
ell_distr_coords.append(ell_original_coords[-1])
ell_distr[0].append(ell_original[0][-1])
ell_distr[1].append(ell_original[1][-1])
return np.array(ell_distr_coords), np.array(ell_distr)
# Geometry build functions
def refractions(ell_coords, focci, n1, n2, bias_factor=0, plot_ax=None, flat_top=True):
# NOTE: Refreaction only inside object (not on edges)
def snellsLaw(n1,n2,theta1):
""" Returns theta2 based on snell's refraction law"""
theta2 = np.arcsin((n1/n2) * np.sin(theta1))
# if theta2 <= np.pi * 0.5:
# print("true")
# theta2 = -theta2
return theta2
refracs = []
for i in range(-1, len(ell_coords) - 1):
# Calculate "refraction" rays for borders along y axis
if i < 0 and ell_coords[i+1][0] == 0:
incomming_ray = vector2d(focci, ell_coords[i+1])
ref_vector = vector2d(ell_coords[i+1],ell_coords[i+2]) # Not really used (just for plot consistence)
n_vec1 = vector2dFromP1(ref_vector.p1, 5, incomming_ray.normal.unit_vector())
n_vec2 = vector2dFromP1(ref_vector.p1, 5, -incomming_ray.normal.unit_vector())
refracted_ray = vector2dFromP1(ref_vector.p1, 5, incomming_ray.unit_vector())
elif flat_top == True and i >= len(ell_coords) - 4:
incomming_ray = vector2d(focci, ell_coords[i+1])
ref_vector = vector2d(ell_coords[i+1], [ell_coords[i+1][0] + 5, ell_coords[i+1][1]]) # Not really used (just for plot consistence)
n_vec1 = vector2dFromP1(ref_vector.p1, 5, -ref_vector.unit_vector())
n_vec2 = vector2dFromP1(ref_vector.p1, 5, ref_vector.unit_vector())
refracted_ray = vector2dFromP1(ref_vector.p1, 5, ref_vector.unit_vector())
else:
# Get incomming ray and ref vectors
incomming_ray = vector2d(focci, ell_coords[i+1])
ref_vector = vector2d(ell_coords[i],ell_coords[i+1])
# Get normal vectors (2 of them for plotting)
n_vec1 = vector2dFromP1(ref_vector.p2, 5, -ref_vector.normal.unit_vector())
n_vec2 = vector2dFromP1(ref_vector.p2, 5, ref_vector.normal.unit_vector())
# Refraction angle will be used for yvals below than zero
if n_vec2.p1[1] < 0:
# Calculate refraction angle
theta1 = angle_between(incomming_ray, n_vec1)
theta2 = snellsLaw(n1,n2,theta1)
# Apply bias factor
bias_factor = bias_factor * 1/np.log(abs(n_vec2.p1[1]) + 100)
theta2 = theta2 * (1 - bias_factor)
# Rotate vec_2 based on theta 2
refracted_ray = n_vec2.rotate(-theta2) if n_vec2.p1[1] < 0 else n_vec2.rotate(theta2)
else:
refracted_ray = n_vec2
# n_vec2 = n_vec1
refracs.append((refracted_ray, n_vec2))
# Storing info for plot
if plot_ax != None:
xs = []
ys = []
xs.extend(incomming_ray.to_plot[0])
xs.extend(ref_vector.to_plot[0])
xs.extend(refracted_ray.to_plot[0])
ys.extend(incomming_ray.to_plot[1])
ys.extend(ref_vector.to_plot[1])
ys.extend(refracted_ray.to_plot[1])
xs1 = []
ys1 = []
# xs1.extend(n_vec1.to_plot[0])
xs1.extend(n_vec2.to_plot[0])
# ys1.extend(n_vec1.to_plot[1])
ys1.extend(n_vec2.to_plot[1])
xs2 = []
ys2 = []
xs2.extend(refracted_ray.to_plot[0])
ys2.extend(refracted_ray.to_plot[1])
plot_ax.plot(xs,ys)
plot_ax.plot(xs1,ys1, linestyle="--", c="k")
# # Calculate "refraction" rays for borders along y axis
# for i in range(0,len(ell_coords), len(ell_coords) -1):
# if ell_coords[i][0] == 0:
# incomming_ray = vector2d(focci, ell_coords[i])
# n_vec2 = vector2dFromP1(ref_vector.p2, 5, ref_vector.normal.unit_vector())
return refracs, [(xs,ys), (xs2,ys1)] #plot data
def ref_nodes(refracts, thickness, n_layers, focci=np.array([0,0]), flat_top=True):
layers_space = np.linspace(0,thickness,n_layers + 2)
print(layers_space)
points_matrix_coords = []
points_matrix = [[],[]]
ref_vectors = np.copy(refracts)
dL = layers_space[1] - layers_space[0]
print("dL:",dL)
for L in layers_space:
for i, vecs in enumerate(ref_vectors):
refracted_vec = vecs[0]
normal_vec = vecs[1]
theta = angle_between(normal_vec,refracted_vec)
if theta == np.pi*0.5:
theta = 0
if L > 0:
# vec = vector2dFromP1(refracted_vec.p1, L, refracted_vec.unit)
# vec = vector2dFromP1(refracted_vec.p1, L, normal_vec.unit)
cosTheta = np.cos(theta)
cdL = L * np.reciprocal(cosTheta) if cosTheta > 0 else 0
print("L:", round(L,3), "| theta:", round(np.degrees(theta),3), "| cdL", round(cdL,5), "| L+cdL:", round(L + cdL,5))
vec = vector2dFromP1(refracted_vec.p1, L, normal_vec.unit)
# print(vec)
# # print(vec)
# vec = vec.rotate(theta)
# print("vec*unit:",vec.vec * refracted_vec.unit)
# vec = vector2d(normal_vec.p1, vec.vec * refracted_vec.unit + vec.p1)
points_matrix_coords.append(vec.p2)
points_matrix[0].append(vec.p2[0])
points_matrix[1].append(vec.p2[1])
else:
vec = refracted_vec
points_matrix_coords.append(vec.p1)
points_matrix[0].append(vec.p1[0])
points_matrix[1].append(vec.p1[1])
# print(vec)
return np.array(points_matrix_coords), np.array(points_matrix)
def ref_nodes2(refracts, thickness, n_layers, focci=np.array([0,0]), layer_res=N_NODES_PER_LAYER+2, flat_top=True):
def is_parallel(p1, vec, err=np.radians(1)):
if p1[0] != vec.p1[0] and p1[1] != vec.p1[1]:
v1 = vector2d(p1,vec.p1)
theta = angle_between(vec, v1)
# print(np.degrees(theta))
if theta <= err or (np.pi - err <= theta <= np.pi + err) or theta >= np.pi*2 - err:
return True
else:
return False
else:
return True
layers_space = np.linspace(0,thickness,n_layers + 2)
points_matrix_coords = []
points_matrix = [[],[]]
ref_vectors = np.copy(refracts)
for Lindex, L in enumerate(layers_space):
if Lindex == 0:
for vecs in ref_vectors:
ref_coord = vecs[0].p1
points_matrix_coords.append(ref_coord)
points_matrix[0].append(ref_coord[0])
points_matrix[1].append(ref_coord[1])
print("node_per_layer:", len(points_matrix_coords))
else:
layer_coords, layer_xy = sctattered_ellipse(A-L,B+L,H,K, [A-L,YMAX], [0,B+L], 600, 600)
node_per_layer_counter = 0
angle_err = np.radians(0.5)
# while node_per_layer_counter != layer_res:
# node_per_layer_counter = 0
tracker = 0
for vecs in ref_vectors:
found_match = False
angle_err = np.radians(0.5)
while not found_match:
local_tracker = tracker
for i in range(tracker,len(layer_coords)):
# print("tracker", tracker, "local_tracker", local_tracker)
if is_parallel(layer_coords[i],vecs[0], err=angle_err):
points_matrix_coords.append(layer_coords[i])
points_matrix[0].append(layer_xy[0][i])
points_matrix[1].append(layer_xy[1][i])
node_per_layer_counter += 1
found_match = True
break
else:
local_tracker += 1
angle_err += np.radians(0.5) # increase a tolerable degree
tracker = local_tracker
print("node_per_layer:",node_per_layer_counter)
return np.array(points_matrix_coords), np.array(points_matrix)
def make_3d(points_matrix_coords, points_matrix, shift_yz=True):
points_matrix_coords_3d = []
for a in points_matrix_coords:
if shift_yz == True:
a = np.insert(a,1,0.)
else:
a = np.append(a,0)
points_matrix_coords_3d.append(a)
if len(points_matrix) > 0:
z = np.zeros(len(points_matrix[0]))
if shift_yz == True:
a = points_matrix[0]
b = points_matrix[1]
points_matrix = np.vstack((a,z))
points_matrix = np.vstack((points_matrix,b))
# points_matrix = np.insert(points_matrix, 1, z)
else:
points_matrix = np.vstack(points_matrix, z)
return np.array(points_matrix_coords_3d), points_matrix
def revolute(points_matrix_coords, rev=360, res=4, exclude_axis=True, axis='z'):
def rotation_matrix(theta, axis='z'):
if axis == 'z':
return np.array([
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
elif axis == 'y':
return np.array([
[np.cos(theta), 0, -np.sin(theta)],
[0, 1, 0],
[np.sin(theta), 0, np.cos(theta)],
]
)
point_cloud_by_coord = {}
theta_space = np.linspace(0, rev, res + 1)
node_count = 0
section_count = 0
for dtheta in theta_space[:-1]:
for coord in points_matrix_coords:
coord = np.matmul(coord, rotation_matrix(np.radians(dtheta)))
# Conditioning rotation on axis
if coord[0] == 0:
section = 0
else:
section = section_count
point_cloud_by_coord[tuple(coord)] = (coord, node_count, section)
node_count += 1
section_count += 1
# print("number of nodes:", node_count - 1)
# print("n_sections ", section_count - 1 )
# Setting a dict of point cloud by node number
point_cloud = {}
for key in point_cloud_by_coord:
point = point_cloud_by_coord[key]
point_cloud[point[1]] = (point[0], point[1], point[2])
# Setting a matrix of x,y,z (explicit coordinates matrix)
point_matrix = np.zeros((3, len(point_cloud)))
for i, point in enumerate(point_cloud):
point_matrix[0][i] = point_cloud[point][0][0]
point_matrix[1][i] = point_cloud[point][0][1]
point_matrix[2][i] = point_cloud[point][0][2]
return point_cloud, point_cloud_by_coord, point_matrix
def hex8(point_cloud, nodes, n_layers=N_INTERNAL_LAYERS+2):
# def find_nearest(array, value):
# array = np.asarray(array)
# return (np.abs(array - value)).argmin()
# def mask(arrays, idx):
# for arr in arrays:
# arr.mask[idx] = True
def distance(p1,p2):
return np.linalg.norm(np.array(p1)-np.array(p2))
# def get_point(key,dic):
# p = dic[key][0]
# return p, p[0], p[1], p[2]
# def get_key_by_nearest(key_list, xyz_list, ref_val, axis):
# return key_list[find_nearest(xyz_list[axis], ref_val)]
# def get_elem(i,j,k,shape):
# if i != len(shape) -1:
# i2 = i + 1
# else:
# i2 = 0
# return(np.array([
# shape[i2][j][k+1], # P6
# shape[i][j][k+1], # P2
# shape[i][j+1][k+1], # P4
# shape[i2][j+1][k+1], # P8
# shape[i2][j][k], # P5
# shape[i][j][k], # P1
# shape[i][j+1][k], # P3
# shape[i2][j+1][k], # P7
# ]))
# def sort_by_axis(dic, axis, reverse=False, returnDict=False):
# if returnDict:
# return {k: v for k, v in sorted(dic.items(), key=lambda item: item[1][0][axis], reverse=reverse)}
# else:
# return sorted(dic.items(), key=lambda item: item[1][0][axis], reverse=reverse)
# # shape((s,z,d))
# shape = dict()
# for key in point_cloud:
# data = point_cloud[key]
# s = data[-1] # section number
# z = data[0][2] # z value
# d = distance([0,0],[data[0][0],data[0][1]]) # distance from center (0,0)
# shape_key = (s, z, d)
# shape[shape_key] = data
# # --- Note
# # Not sure if it will ALWAYS have different values. If it happens, will have to use the following:
# # if shape_key in shape:
# # shape[shape_key].append(data[0])
# # else:
# # shape[shape_key] = [data[0]]
# # ---
# ------------------------------------------------
## NEED TO DO: TRIAL ONE:
# # sort shape
# shape = {k: v for k, v in sorted(shape.items(), key=lambda item: (item[0][0], -item[0][1]) )}
# # shape (section, height, distance from center), sorted by section, reverse height, but not distance
# # need to transfor to this:
# # shape(i,j,k) where i is the section, j is the layer and k the ordered node pos along the layer
## --------------------------------------------------------------------------
# a = {} # Temporary dict to organize data into sections and layers
# layer_tracker = -1
# for i, (s, z, d) in enumerate(shape):
# if i % n_layers == 0:
# layer_tracker += 1
# key = (s, layer_tracker)
# else:
# if key in a:
# a[key].append(shape[(s,z,d)])
# else:
# a[key] = [shape[(s,z,d)]]
# if layer_tracker == n_layers -1:
# layer_tracker = -1
# shape = dict() # final dictionay with data in (sections, layers, k)
# for s, l in a:
# # Make sure content is sorted
# content = a[(s,l)]
# # content = sorted(content, key=lambda item: item[0][2], reverse=True)
# for i, b in enumerate(content):
# shape[s,l,i] = b
def display(dic,withVal=False):
for key in dic:
if withVal:
print(key, dic[key])
else:
print(key)
print("Length:",len(dic))
# point_cloud = {"0": [coord, nodeNumber, nodeSection] }
# Endgoal --> shape(i,j,k) where i is the section, j is the layer number, k is the node number (with respect to height)
# Sorting into a dict based on section --> sections = {"0": [cood, nodeNumber]}
sections = dict()
for key in point_cloud:
data = point_cloud[key]
s = data[-1] # section number
if s in sections:
sections[s].append(data[:-1])
else:
sections[s] = [data[:-1]]
# print("n_layers:", n_layers)
temp_dict = dict() # nodes by height
temp_dict2 = dict() # nodes by radius
print("sorting by height and radius")
for s in sections:
# sorted by height
nodes_in_section = sorted(sections[s], key=lambda item: item[0][2])
nodes_in_section2 = sorted(sections[s], key=lambda item: distance([0,0], item[0][:-1]))
for i, data in enumerate(nodes_in_section):
key = (s,i)
if key in temp_dict:
temp_dict[key].append(data)
temp_dict2[key].append(nodes_in_section2[i])
else:
temp_dict[key] = [data]
temp_dict2[key] = [nodes_in_section2[i]]
for i in range(10):
print("byHeight:", temp_dict[(0,i)][0], "byRadius", temp_dict2[(0,i)][0])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# cs = ['b','r','k','g','c','b','r','k','g','c','b','r','k','g','c','b','r','k','g','c','b']
# for key in temp_dict:
# print(key)
# if key[0] == 0:
# for arr in temp_dict[key]:
# p = arr[0]
# print(key[1])
# ax.scatter3D(p[0], p[1], p[2], c=cs[key[1]])
# print("sorting by distance")
# def sprint(x):
# print("sort method:", x)
# return x
# shape = dict()
# for s, l in temp_dict:
# verticalLayer = sorted(temp_dict[(s,l)], key=lambda item: distance([0,0,0], item[0])) # sort by height
# # print("--")
# # for k in verticalLayer:
# # print(k)
# # print("---")
# for n, data in enumerate(verticalLayer):
# shape[(s,n,l)] = data
# print("lenshape",len(shape), "len_pointcloud",len(point_cloud))
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for key in temp_dict:
# if key[0] != 0:
# break
# for data in temp_dict[key]:
# p = temp_dict[key][0]
# ax.scatter3D(p[0], p[1], p[2])
# display(shape,withVal=True)
# print("---")
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# # cs = ['b','r','b','g','c',]
# # for key in shape:
# # p = shape[key][0]
# # ax.scatter3D(p[0], p[1], p[2], c=cs[key[1]])
# else:
# break
# Sections work fine
# print("SECTIONS")
# display(sections)
# print("---")
# print("Length section[0]",len(sections[0]))
# print("n_layers:", n_layers)
# temp_dict = dict()
# print("sorting by height")
# for s in sections:
# nodes_in_section = sorted(sections[s], key=lambda item: item[0][2], reverse=True) # sort by height
# # for c in nodes_in_section:
# # print(c)
# n_nodes_in_section = len(nodes_in_section)
# n_nodes_per_layer = int(round(n_nodes_in_section / n_layers))
# # print("n_nodes:", n_nodes_in_section, "n_nodes_per_layer", n_nodes_per_layer)
# layerTracker = -1
# heightTracker = -1
# for h, data in enumerate(nodes_in_section):
# if h % n_layers == 0:
# heightTracker += 1
# # if layerTracker == n_layers -1:
# # layerTracker = -1
# # layerTracker += 1
# key = (s,heightTracker)
# # print(key, data)
# if key in temp_dict:
# temp_dict[key].append(data)
# else:
# temp_dict[key] = [data]
# print("sorting by distance")
# shape = dict()
# for s, h in temp_dict:
# horizontalLayer = sorted(temp_dict[(s,h)], key=lambda item: distance([0,0], [item[0][0], item[0][1]]), reverse=True) # sort by distance to 0,0
# for n, data in enumerate(horizontalLayer):
# # print("len_HorizontalLayer:",len(horizontalLayer))
# shape[(s,n,h)] = data
# print("lenshape",len(shape), "len_pointcloud",len(point_cloud))
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# cs = ['b','r','b','g','c',]
# for key in shape:
# p = shape[key][0]
# ax.scatter3D(p[0], p[1], p[2], c=cs[key[1]])
# fig = plt.figure()
# ax = fig.add_subplot(111)
# cs = ['b','r','k','g','c',]
# for key in shape:
# if key[0] == 0:
# p = shape[key][0]
# ax.scatter(p[0], p[2], c=cs[key[1]])
# else:
# break
# print("sorting by distance")
# shape = dict()
# for s, h in temp_dict:
# nodes_in_height_h = sorted(temp_dict[(s,h)], key=lambda item: distance([0,0], [item[0][0], item[0][1]]), reverse=True) # sort by distance to 0,0
# n_nodes_in_section = len(nodes_in_height_h)
# n_nodes_per_layer = int(round(n_nodes_in_section / n_layers))
# counter = -1
# for i, data in enumerate(nodes_in_height_h):
# if i % n_nodes_per_layer == 0:
# counter = -1
# counter += 1
# key = (s,h, i)
# print(key, data)
# shape[key] = data
# print("SHAPE")
# display(shape)
# print("----")
# shape = temp_dict
# print("TEMP DICT")
# display(temp_dict, withVal=True)
# print("---")
# shape = dict()
# layer_tracker = -1
# height_tracker = -1
# for i, (s, h) in enumerate(temp_dict):
# data = temp_dict[(s,h)]
# if i % n_layers == 0:
# layer_tracker += 1
# height_tracker += 1
# key = (s, layer_tracker, height_tracker)
# # print(key)
# shape[key] = data
# if layer_tracker == n_layers -1:
# layer_tracker = 0
# height_tracker = 0
# for key in shape:
# print(key, shape[key][0])
# print(shape[(0,0,1)])
# print(shape[(0,0,2)])
# print(shape[(0,0,3)])
# print(shape[(0,0,4)])
# print(shape[(0,0,5)])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for k in range(20):
# p = shape[0,k,0][0]
# print(p)
# ax.scatter3D(p[0], p[1], p[2])
# print(len(point_cloud), len(shape))
# print(temp_dict)
# for s in sections:
# content = sorted(sections[s], key=lambda item: (distance([0,0], [item[0][0], item[0][1]]), item[0][2]), reverse=True) # sort by layer
# layer_tracker = - 1
# for i, data in enumerate(content):
# if i % n_layers == 0:
# layer_tracker += 1
# key = (s, layer_tracker)
# else:
# if key in temp_dict:
# temp_dict[key].append(data)
# else:
# temp_dict[key] = [data]
# if layer_tracker == n_layers -1:
# layer_tracker = -1
# print(temp_dict[0,0])
# print("temp_dict:")
# for c in temp_dict[(0,0)]:
# print(c[0])
# print("--")
# print("temp_dict:")
# for c in temp_dict[(1,0)]:
# print(c[0])
# print("--")
# print("len_cloud", len(point_cloud), "temp_dict",len(temp_dict))
# shape = dict()
# for s, l in temp_dict:
# content = sorted(temp_dict[(s,l)], key=lambda item: item[0][2], reverse=True) # sort by height
# for i, data in enumerate(content):
# shape[(s,l,i)] = data
# # print(shape[(0,0,0)])
# # print(shape[(0,0,1)])
# # print(shape[(0,0,2)])
# print("len_cloud", len(point_cloud), "len_shape",len(shape))
# shape2 = dict()
# layerTracker = 0
# zcount = 0
# for s in shape:
# if layerTracker <= 3:
# new_key = (s[0],zcount)
# if new_key in shape2:
# arr = shape2[new_key]
# arr.append((shape[s][1], s[2]))
# arr = sorted(arr, key=lambda item: item[1])
# shape2[new_key] = arr
# else:
# shape2[new_key] = [(shape[s][1], s[2])]
# layerTracker += 1
# zcount += 1
# else:
# zcount = 0
# layerTracker = 0
# for s in shape2:
# print(s, shape2[s])
def hexalise(shape):
def get_elem(i,j,k,shape):
if i != len(shape) -1:
i2 = i + 1
else:
i2 = 0
return(np.array([
shape[i2][j][k+1], # P6
shape[i][j][k+1], # P2
shape[i][j+1][k+1], # P4
shape[i2][j+1][k+1], # P8
shape[i2][j][k], # P5
shape[i][j][k], # P1
shape[i][j+1][k], # P3
shape[i2][j+1][k], # P7
]))
elements = {}
elem_count = 1
n_sections = len(shape)
for i in range(len(shape)): # sections
for j in range(len(shape[i]) -1): # layers
for k in range(len(shape[i][j]) -1): # points
elem = get_elem(i,j,k,shape)
elements[elem_count] = elem
elem_count += 1
return elements
def write_geometry(nodes, elems, file_name, path_to_output_folder):
# Create MeshData Element
geometry = ET.Element('Geometry')
tree = ET.ElementTree(geometry)
nodes_tag = ET.SubElement(geometry,"Nodes")
nodes_tag.set("name","Object01")
elems_tag = ET.SubElement(geometry,"Elements")
elems_tag.set("type","hex8")
elems_tag.set("name","Part1")
# Add nodes data
for node in nodes:
# Create sub-elements
_node = ET.SubElement(nodes_tag, "node")
_node.set("id",str(node))
_node.text = ",".join([str(x) for x in nodes[node]])
# Add elems data
for elem in elems:
# Create sub-elements
_elem = ET.SubElement(elems_tag, "elem")
_elem.set("id",str(elem))
_elem.text = ",".join([str(x) for x in elems[elem]])
# print(ET.tostring(geometry))
# root = ET.ElementTree(geometry)
# print(root)
indent(tree.getroot())
tree.write(join(path_to_output_folder,file_name),encoding="ISO-8859-1")
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
######################################
if __name__ == "__main__":
print("==== Test case ===")
fig = plt.figure()
axs = fig.add_subplot(121)
axs2 = fig.add_subplot(122)
fig2 = plt.figure()
axs3 = fig2.add_subplot(111, projection='3d')
## Focci points
focci_pos, focci_neg = ellipse_focci(A,B,H,K)
# plt.scatter(focci_pos[0], focci_pos[1],c='y')
## Scattered ellipse
ell_original_coords, ell_original = sctattered_ellipse(A,B,H,K, [A,YMAX], [0,B], 1000, 1000)
axs.scatter(ell_original[0], ell_original[1],c='b')
ell_distr_coords, ell_distr = distributed_ellipse(A,B,H,K, [A,YMAX], [0,B], dist_res=N_NODES_PER_LAYER)
axs.scatter(ell_distr[0], ell_distr[1],c='g')
refractions, _ = refractions(ell_distr_coords, [0,0], n1=1, n2=0.85, bias_factor=-1.5, plot_ax=axs)
# ell_2_coords, ell_2 = sctattered_ellipse(A-10,B+10,H,K, [A-10,YMAX], [0,B+10], 100, 100)
# axs2.scatter(ell_2[0], ell_2[1],c='g')
ref_nodes_coords, ref_nodes = ref_nodes2(refractions, 10, N_INTERNAL_LAYERS)
print("total n nodes:", len(ref_nodes_coords))
axs2.scatter(ref_nodes[0], ref_nodes[1])
ref_nodes_coords, ref_nodes = make_3d(ref_nodes_coords, ref_nodes)
node_cloud, _, nodes = revolute(ref_nodes_coords, res=N_REVS, axis='z')
axs3.scatter3D(nodes[0],nodes[1],nodes[2])
hex8(node_cloud, nodes)
# xnodes = np.ma.array([0,1,2,3], mask=False)
# ynodes = np.ma.array([0,1,2,3], mask=False)
# def mask(arrays, idx):
# for arr in arrays:
# arr.mask[idx] = True
# mask([xnodes, ynodes], 1)
# print(xnodes)
axs.grid()
axs.axis('equal')
axs2.grid()
axs2.y_res = 2
axs2.axis('equal')
# axs2.x_res = 5
plt.show()
| [
"numpy.radians",
"numpy.sqrt",
"numpy.polyfit",
"numpy.argsort",
"numpy.array",
"numpy.linalg.norm",
"numpy.poly1d",
"numpy.sin",
"scipy.spatial.distance",
"numpy.max",
"numpy.linspace",
"numpy.dot",
"numpy.matmul",
"numpy.vstack",
"numpy.min",
"numpy.degrees",
"numpy.reciprocal",
... | [((1825, 1841), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (1833, 1841), True, 'import numpy as np\n'), ((2123, 2146), 'numpy.polyfit', 'np.polyfit', (['xs', 'ys', 'deg'], {}), '(xs, ys, deg)\n', (2133, 2146), True, 'import numpy as np\n'), ((2472, 2489), 'numpy.poly1d', 'np.poly1d', (['coeffs'], {}), '(coeffs)\n', (2481, 2489), True, 'import numpy as np\n'), ((2892, 2916), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 - b ** 2)'], {}), '(a ** 2 - b ** 2)\n', (2899, 2916), True, 'import numpy as np\n'), ((3152, 3166), 'numpy.min', 'np.min', (['yrange'], {}), '(yrange)\n', (3158, 3166), True, 'import numpy as np\n'), ((3175, 3189), 'numpy.max', 'np.max', (['yrange'], {}), '(yrange)\n', (3181, 3189), True, 'import numpy as np\n'), ((3198, 3212), 'numpy.min', 'np.min', (['xrange'], {}), '(xrange)\n', (3204, 3212), True, 'import numpy as np\n'), ((3221, 3235), 'numpy.max', 'np.max', (['xrange'], {}), '(xrange)\n', (3227, 3235), True, 'import numpy as np\n'), ((3284, 3314), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'x_res'], {}), '(ymin, ymax, x_res)\n', (3295, 3314), True, 'import numpy as np\n'), ((3387, 3417), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'y_res'], {}), '(xmin, xmax, y_res)\n', (3398, 3417), True, 'import numpy as np\n'), ((3516, 3547), 'numpy.append', 'np.append', (['xs_ybased', 'xs_xbased'], {}), '(xs_ybased, xs_xbased)\n', (3525, 3547), True, 'import numpy as np\n'), ((3554, 3585), 'numpy.append', 'np.append', (['ys_ybased', 'ys_xbased'], {}), '(ys_ybased, ys_xbased)\n', (3563, 3585), True, 'import numpy as np\n'), ((3981, 3995), 'numpy.min', 'np.min', (['yrange'], {}), '(yrange)\n', (3987, 3995), True, 'import numpy as np\n'), ((4004, 4018), 'numpy.max', 'np.max', (['yrange'], {}), '(yrange)\n', (4010, 4018), True, 'import numpy as np\n'), ((4027, 4041), 'numpy.min', 'np.min', (['xrange'], {}), '(xrange)\n', (4033, 4041), True, 'import numpy as np\n'), ((4050, 4064), 'numpy.max', 'np.max', (['xrange'], {}), '(xrange)\n', (4056, 4064), True, 'import numpy as np\n'), ((6511, 6580), 'scipy.spatial.distance.cdist', 'distance.cdist', (['ell_original_coords', 'ell_original_coords', '"""euclidean"""'], {}), "(ell_original_coords, ell_original_coords, 'euclidean')\n", (6525, 6580), False, 'from scipy.spatial import distance\n'), ((10814, 10830), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10822, 10830), True, 'import numpy as np\n'), ((10864, 10903), 'numpy.linspace', 'np.linspace', (['(0)', 'thickness', '(n_layers + 2)'], {}), '(0, thickness, n_layers + 2)\n', (10875, 10903), True, 'import numpy as np\n'), ((10991, 11008), 'numpy.copy', 'np.copy', (['refracts'], {}), '(refracts)\n', (10998, 11008), True, 'import numpy as np\n'), ((12310, 12326), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (12318, 12326), True, 'import numpy as np\n'), ((12732, 12771), 'numpy.linspace', 'np.linspace', (['(0)', 'thickness', '(n_layers + 2)'], {}), '(0, thickness, n_layers + 2)\n', (12743, 12771), True, 'import numpy as np\n'), ((12838, 12855), 'numpy.copy', 'np.copy', (['refracts'], {}), '(refracts)\n', (12845, 12855), True, 'import numpy as np\n'), ((15297, 15325), 'numpy.linspace', 'np.linspace', (['(0)', 'rev', '(res + 1)'], {}), '(0, rev, res + 1)\n', (15308, 15325), True, 'import numpy as np\n'), ((27534, 27556), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""Geometry"""'], {}), "('Geometry')\n", (27544, 27556), True, 'import xml.etree.ElementTree as ET\n'), ((27565, 27589), 'xml.etree.ElementTree.ElementTree', 'ET.ElementTree', (['geometry'], {}), '(geometry)\n', (27579, 27589), True, 'import xml.etree.ElementTree as ET\n'), ((27603, 27635), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['geometry', '"""Nodes"""'], {}), "(geometry, 'Nodes')\n", (27616, 27635), True, 'import xml.etree.ElementTree as ET\n'), ((27682, 27717), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['geometry', '"""Elements"""'], {}), "(geometry, 'Elements')\n", (27695, 27717), True, 'import xml.etree.ElementTree as ET\n'), ((28827, 28839), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28837, 28839), True, 'from matplotlib import pyplot as plt\n'), ((28906, 28918), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28916, 28918), True, 'from matplotlib import pyplot as plt\n'), ((30355, 30365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30363, 30365), True, 'from matplotlib import pyplot as plt\n'), ((1682, 1718), 'numpy.matmul', 'np.matmul', (['rotation_matrix', 'self.vec'], {}), '(rotation_matrix, self.vec)\n', (1691, 1718), True, 'import numpy as np\n'), ((1727, 1744), 'numpy.array', 'np.array', (['self.p1'], {}), '(self.p1)\n', (1735, 1744), True, 'import numpy as np\n'), ((1848, 1892), 'numpy.array', 'np.array', (['[length * dir[0], length * dir[1]]'], {}), '([length * dir[0], length * dir[1]])\n', (1856, 1892), True, 'import numpy as np\n'), ((2921, 2941), 'numpy.array', 'np.array', (['[h, k + c]'], {}), '([h, k + c])\n', (2929, 2941), True, 'import numpy as np\n'), ((2943, 2963), 'numpy.array', 'np.array', (['[h, k - c]'], {}), '([h, k - c])\n', (2951, 2963), True, 'import numpy as np\n'), ((3702, 3722), 'numpy.argsort', 'np.argsort', (['s1[:, 1]'], {}), '(s1[:, 1])\n', (3712, 3722), True, 'import numpy as np\n'), ((4588, 4619), 'numpy.insert', 'np.insert', (['breakpoints', '(0)', 'ymin'], {}), '(breakpoints, 0, ymin)\n', (4597, 4619), True, 'import numpy as np\n'), ((4679, 4707), 'numpy.append', 'np.append', (['breakpoints', 'ymax'], {}), '(breakpoints, ymax)\n', (4688, 4707), True, 'import numpy as np\n'), ((7389, 7415), 'numpy.array', 'np.array', (['ell_distr_coords'], {}), '(ell_distr_coords)\n', (7397, 7415), True, 'import numpy as np\n'), ((7417, 7436), 'numpy.array', 'np.array', (['ell_distr'], {}), '(ell_distr)\n', (7425, 7436), True, 'import numpy as np\n'), ((12201, 12231), 'numpy.array', 'np.array', (['points_matrix_coords'], {}), '(points_matrix_coords)\n', (12209, 12231), True, 'import numpy as np\n'), ((12233, 12256), 'numpy.array', 'np.array', (['points_matrix'], {}), '(points_matrix)\n', (12241, 12256), True, 'import numpy as np\n'), ((12406, 12419), 'numpy.radians', 'np.radians', (['(1)'], {}), '(1)\n', (12416, 12419), True, 'import numpy as np\n'), ((14148, 14178), 'numpy.array', 'np.array', (['points_matrix_coords'], {}), '(points_matrix_coords)\n', (14156, 14178), True, 'import numpy as np\n'), ((14180, 14203), 'numpy.array', 'np.array', (['points_matrix'], {}), '(points_matrix)\n', (14188, 14203), True, 'import numpy as np\n'), ((26886, 27069), 'numpy.array', 'np.array', (['[shape[i2][j][k + 1], shape[i][j][k + 1], shape[i][j + 1][k + 1], shape[i2]\n [j + 1][k + 1], shape[i2][j][k], shape[i][j][k], shape[i][j + 1][k],\n shape[i2][j + 1][k]]'], {}), '([shape[i2][j][k + 1], shape[i][j][k + 1], shape[i][j + 1][k + 1],\n shape[i2][j + 1][k + 1], shape[i2][j][k], shape[i][j][k], shape[i][j + \n 1][k], shape[i2][j + 1][k]])\n', (26894, 27069), True, 'import numpy as np\n'), ((27852, 27884), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['nodes_tag', '"""node"""'], {}), "(nodes_tag, 'node')\n", (27865, 27884), True, 'import xml.etree.ElementTree as ET\n'), ((28041, 28073), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['elems_tag', '"""elem"""'], {}), "(elems_tag, 'elem')\n", (28054, 28073), True, 'import xml.etree.ElementTree as ET\n'), ((28276, 28314), 'os.path.join', 'join', (['path_to_output_folder', 'file_name'], {}), '(path_to_output_folder, file_name)\n', (28280, 28314), False, 'from os.path import isfile, join\n'), ((1528, 1552), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vec'], {}), '(self.vec)\n', (1542, 1552), True, 'import numpy as np\n'), ((2050, 2074), 'numpy.dot', 'np.dot', (['v1.unit', 'v2.unit'], {}), '(v1.unit, v2.unit)\n', (2056, 2074), True, 'import numpy as np\n'), ((13303, 13318), 'numpy.radians', 'np.radians', (['(0.5)'], {}), '(0.5)\n', (13313, 13318), True, 'import numpy as np\n'), ((14363, 14383), 'numpy.insert', 'np.insert', (['a', '(1)', '(0.0)'], {}), '(a, 1, 0.0)\n', (14372, 14383), True, 'import numpy as np\n'), ((14396, 14411), 'numpy.append', 'np.append', (['a', '(0)'], {}), '(a, 0)\n', (14405, 14411), True, 'import numpy as np\n'), ((14610, 14627), 'numpy.vstack', 'np.vstack', (['(a, z)'], {}), '((a, z))\n', (14619, 14627), True, 'import numpy as np\n'), ((14646, 14675), 'numpy.vstack', 'np.vstack', (['(points_matrix, b)'], {}), '((points_matrix, b))\n', (14655, 14675), True, 'import numpy as np\n'), ((14754, 14781), 'numpy.vstack', 'np.vstack', (['points_matrix', 'z'], {}), '(points_matrix, z)\n', (14763, 14781), True, 'import numpy as np\n'), ((14792, 14825), 'numpy.array', 'np.array', (['points_matrix_coords_3d'], {}), '(points_matrix_coords_3d)\n', (14800, 14825), True, 'import numpy as np\n'), ((7727, 7741), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (7733, 7741), True, 'import numpy as np\n'), ((11440, 11453), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (11446, 11453), True, 'import numpy as np\n'), ((13483, 13498), 'numpy.radians', 'np.radians', (['(0.5)'], {}), '(0.5)\n', (13493, 13498), True, 'import numpy as np\n'), ((16654, 16666), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (16662, 16666), True, 'import numpy as np\n'), ((16667, 16679), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (16675, 16679), True, 'import numpy as np\n'), ((1610, 1623), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1616, 1623), True, 'import numpy as np\n'), ((1643, 1656), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1649, 1656), True, 'import numpy as np\n'), ((1658, 1671), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1664, 1671), True, 'import numpy as np\n'), ((2649, 2681), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 - (val - h) ** 2)'], {}), '(b ** 2 - (val - h) ** 2)\n', (2656, 2681), True, 'import numpy as np\n'), ((14013, 14028), 'numpy.radians', 'np.radians', (['(0.5)'], {}), '(0.5)\n', (14023, 14028), True, 'import numpy as np\n'), ((15478, 15496), 'numpy.radians', 'np.radians', (['dtheta'], {}), '(dtheta)\n', (15488, 15496), True, 'import numpy as np\n'), ((20169, 20199), 'scipy.spatial.distance', 'distance', (['[0, 0]', 'item[0][:-1]'], {}), '([0, 0], item[0][:-1])\n', (20177, 20199), False, 'from scipy.spatial import distance\n'), ((1626, 1639), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1632, 1639), True, 'import numpy as np\n'), ((2810, 2842), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 - (val - k) ** 2)'], {}), '(a ** 2 - (val - k) ** 2)\n', (2817, 2842), True, 'import numpy as np\n'), ((11468, 11491), 'numpy.reciprocal', 'np.reciprocal', (['cosTheta'], {}), '(cosTheta)\n', (11481, 11491), True, 'import numpy as np\n'), ((11564, 11581), 'numpy.degrees', 'np.degrees', (['theta'], {}), '(theta)\n', (11574, 11581), True, 'import numpy as np\n'), ((15006, 15019), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (15012, 15019), True, 'import numpy as np\n'), ((15021, 15034), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (15027, 15034), True, 'import numpy as np\n'), ((15061, 15074), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (15067, 15074), True, 'import numpy as np\n'), ((15046, 15059), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (15052, 15059), True, 'import numpy as np\n'), ((15152, 15165), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (15158, 15165), True, 'import numpy as np\n'), ((15207, 15220), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (15213, 15220), True, 'import numpy as np\n'), ((15225, 15238), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (15231, 15238), True, 'import numpy as np\n'), ((15171, 15184), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (15177, 15184), True, 'import numpy as np\n')] |
from CHECLabPy.plotting.setup import Plotter
from sstcam_sandbox import get_plot
from CHECLabPy.core.io import HDF5Reader
from os.path import join
import numpy as np
from matplotlib.colors import LogNorm
from IPython import embed
class Hist2D(Plotter):
def __init__(self, xlabel, ylabel):
super().__init__()
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
def plot(self, x, y):
self.ax.hist2d(x, y, bins=(100, 100), norm=LogNorm())
def main():
path = "/Volumes/gct-jason/astri_onsky_archive/hillas_over_campaign.h5"
output = get_plot("d190524_time_gradient/correlations/data")
with HDF5Reader(path) as reader:
df = reader.read("data")
tgradient = df['tgradient'].values
length = df['length'].values
width = df['width'].values
notnoisy = ~np.logical_and(length > 0.1, width > 0.1)
tgradient = tgradient[notnoisy]
length = length[notnoisy]
width = width[notnoisy]
p = Hist2D("Time Gradient (ns/degree)", "Length (degree)")
p.plot(tgradient, length)
p.save(join(output, "tgradvslength.pdf"))
if __name__ == '__main__':
main()
| [
"sstcam_sandbox.get_plot",
"numpy.logical_and",
"os.path.join",
"CHECLabPy.core.io.HDF5Reader",
"matplotlib.colors.LogNorm"
] | [((584, 635), 'sstcam_sandbox.get_plot', 'get_plot', (['"""d190524_time_gradient/correlations/data"""'], {}), "('d190524_time_gradient/correlations/data')\n", (592, 635), False, 'from sstcam_sandbox import get_plot\n'), ((646, 662), 'CHECLabPy.core.io.HDF5Reader', 'HDF5Reader', (['path'], {}), '(path)\n', (656, 662), False, 'from CHECLabPy.core.io import HDF5Reader\n'), ((828, 869), 'numpy.logical_and', 'np.logical_and', (['(length > 0.1)', '(width > 0.1)'], {}), '(length > 0.1, width > 0.1)\n', (842, 869), True, 'import numpy as np\n'), ((1069, 1102), 'os.path.join', 'join', (['output', '"""tgradvslength.pdf"""'], {}), "(output, 'tgradvslength.pdf')\n", (1073, 1102), False, 'from os.path import join\n'), ((470, 479), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (477, 479), False, 'from matplotlib.colors import LogNorm\n')] |
# <NAME> <<EMAIL>>
import argparse
import logging
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from transformers import BertTokenizer, BertForSequenceClassification
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
class Example:
def __init__(self, sent0, sent1):
self.sent0 = sent0
self.sent1 = sent1
class Features:
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_example(example, tokenizer, max_seq_len):
out = tokenizer(example.sent0, example.sent1, padding='max_length',
max_length=max_seq_len, truncation=True)
return Features(out['input_ids'], out['attention_mask'], out['token_type_ids'])
def get_tensor_dataset(features):
input_ids = torch.tensor([x.input_ids for x in features], dtype=torch.long)
input_masks = torch.tensor([x.input_mask for x in features], dtype=torch.bool)
segment_ids = torch.tensor([x.segment_ids for x in features], dtype=torch.int)
return TensorDataset(input_ids, input_masks, segment_ids)
class XnliLoader:
LABELS = ['neutral', 'contradiction', 'entailment']
L2I = {'neutral': 0, 'contradiction': 1, 'contradictory': 1, 'entailment': 2}
def load_features(self, filename, tokenizer, max_seq_len):
features = []
df = pd.read_csv(filename, sep='\t')
for i, row in tqdm(df.iterrows(), total=df.shape[0], desc='loading data'):
example = Example(row['premise'], row['hypo'])
features.append(convert_example(example, tokenizer, max_seq_len))
return features
class MakLoader:
LABELS = ['Αθλητικά', 'Ρεπορτάζ', 'Οικονομία', 'Πολιτική', 'Διεθνή',
'Τηλεόραση', 'Τέχνες-Πολιτισμός']
L2I = {label: i for i, label in enumerate(LABELS)}
def load_features(self, filename, tokenizer, max_seq_len):
features = []
df = pd.read_csv(filename)
for i, row in tqdm(df.iterrows(), total=df.shape[0], desc='loading data'):
example = Example(row['Text'], None)
features.append(convert_example(example, tokenizer, max_seq_len))
return features
def main():
parser = argparse.ArgumentParser(description='greek bert distillation')
parser.add_argument('--pretrained_model', required=True, help='pretrained model directory')
parser.add_argument('--task', required=True, choices=['xnli', 'mak'], help='task name')
parser.add_argument('--dataset', required=True, help='dataset file path')
parser.add_argument('--save_file', required=True, help='logits save file path')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--max_seq_len', type=int, default=128, help='max sequence length')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--tsv', action='store_true', help='whether to output tsv data')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
device = torch.device('cuda:0')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.task == 'xnli':
data = XnliLoader()
elif args.task == 'mak':
data = MakLoader()
num_labels = len(data.LABELS)
tokenizer = BertTokenizer.from_pretrained('nlpaueb/bert-base-greek-uncased-v1')
model = BertForSequenceClassification.from_pretrained(
args.pretrained_model, num_labels=num_labels).to(device)
features = data.load_features(args.dataset, tokenizer, args.max_seq_len)
dataset = get_tensor_dataset(features)
loader = DataLoader(dataset, sampler=SequentialSampler(dataset),
batch_size=args.batch_size)
y_pred = None
model.eval()
with torch.no_grad():
for batch in tqdm(loader, desc='evaluating'):
batch = tuple(x.to(device) for x in batch)
input_ids, input_masks, segment_ids = batch
logits = model(input_ids, attention_mask=input_masks,
token_type_ids=segment_ids)[0]
if y_pred is None:
y_pred = logits.detach().cpu().numpy()
else:
y_pred = np.append(y_pred, logits.detach().cpu().numpy(), axis=0)
out_data = {}
for i in range(len(data.LABELS)):
out_data['score{}'.format(i)] = y_pred[:, i]
aug_df = pd.DataFrame.from_dict(out_data)
if args.tsv:
aug_df.to_csv(args.save_file, index=None, sep='\t')
else:
aug_df.to_csv(args.save_file, index=None)
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"torch.manual_seed",
"argparse.ArgumentParser",
"pandas.read_csv",
"transformers.BertTokenizer.from_pretrained",
"torch.utils.data.SequentialSampler",
"torch.utils.data.TensorDataset",
"pandas.DataFrame.from_dict",
"torch.tensor",
"transformers.BertForSequenceClassification.... | [((881, 944), 'torch.tensor', 'torch.tensor', (['[x.input_ids for x in features]'], {'dtype': 'torch.long'}), '([x.input_ids for x in features], dtype=torch.long)\n', (893, 944), False, 'import torch\n'), ((963, 1027), 'torch.tensor', 'torch.tensor', (['[x.input_mask for x in features]'], {'dtype': 'torch.bool'}), '([x.input_mask for x in features], dtype=torch.bool)\n', (975, 1027), False, 'import torch\n'), ((1046, 1110), 'torch.tensor', 'torch.tensor', (['[x.segment_ids for x in features]'], {'dtype': 'torch.int'}), '([x.segment_ids for x in features], dtype=torch.int)\n', (1058, 1110), False, 'import torch\n'), ((1122, 1172), 'torch.utils.data.TensorDataset', 'TensorDataset', (['input_ids', 'input_masks', 'segment_ids'], {}), '(input_ids, input_masks, segment_ids)\n', (1135, 1172), False, 'from torch.utils.data import TensorDataset, DataLoader, SequentialSampler\n'), ((2274, 2336), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""greek bert distillation"""'}), "(description='greek bert distillation')\n", (2297, 2336), False, 'import argparse\n'), ((3060, 3099), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3079, 3099), False, 'import logging\n'), ((3114, 3136), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (3126, 3136), False, 'import torch\n'), ((3141, 3169), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3158, 3169), False, 'import torch\n'), ((3174, 3199), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3188, 3199), True, 'import numpy as np\n'), ((3364, 3431), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""nlpaueb/bert-base-greek-uncased-v1"""'], {}), "('nlpaueb/bert-base-greek-uncased-v1')\n", (3393, 3431), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((4433, 4465), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['out_data'], {}), '(out_data)\n', (4455, 4465), True, 'import pandas as pd\n'), ((1429, 1460), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""'}), "(filename, sep='\\t')\n", (1440, 1460), True, 'import pandas as pd\n'), ((1992, 2013), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (2003, 2013), True, 'import pandas as pd\n'), ((3827, 3842), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3840, 3842), False, 'import torch\n'), ((3865, 3896), 'tqdm.auto.tqdm', 'tqdm', (['loader'], {'desc': '"""evaluating"""'}), "(loader, desc='evaluating')\n", (3869, 3896), False, 'from tqdm.auto import tqdm\n'), ((3444, 3539), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['args.pretrained_model'], {'num_labels': 'num_labels'}), '(args.pretrained_model,\n num_labels=num_labels)\n', (3489, 3539), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((3718, 3744), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (3735, 3744), False, 'from torch.utils.data import TensorDataset, DataLoader, SequentialSampler\n')] |
import argparse
import numpy as np
from matplotlib import pyplot as plt
def main(FLAGS):
some_data = np.random.rand(256, 256)
print(FLAGS.data_dir)
plt.matshow(some_data)
plt.show()
if __name__ == '__main__':
# Instantiates an arg parser
parser = argparse.ArgumentParser()
# Establishes default arguments
parser.add_argument("--data_dir",
type=str,
default="C:\\my\\data\\path\\",
help="The complete desired input filepath.")
# Parses known arguments
FLAGS, unparsed = parser.parse_known_args()
# Runs the tensorflow app
main(FLAGS)
| [
"matplotlib.pyplot.matshow",
"numpy.random.rand",
"argparse.ArgumentParser",
"matplotlib.pyplot.show"
] | [((110, 134), 'numpy.random.rand', 'np.random.rand', (['(256)', '(256)'], {}), '(256, 256)\n', (124, 134), True, 'import numpy as np\n'), ((167, 189), 'matplotlib.pyplot.matshow', 'plt.matshow', (['some_data'], {}), '(some_data)\n', (178, 189), True, 'from matplotlib import pyplot as plt\n'), ((195, 205), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (203, 205), True, 'from matplotlib import pyplot as plt\n'), ((282, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (305, 307), False, 'import argparse\n')] |
import numpy as np
_dtype = np.dtype([("x", np.uint16), ("y", np.uint16), ("p", np.bool_), ("ts", np.uint64)])
class DVSSpikeTrain(np.recarray):
"""Common type for event based vision datasets"""
__name__ = "SparseVisionSpikeTrain"
def __new__(cls, nb_of_spikes, *args, width=-1, height=-1, duration=-1, time_scale=1e-6, **nargs):
obj = super(DVSSpikeTrain, cls).__new__(cls, nb_of_spikes, dtype=_dtype, *args, **nargs)
obj.width = width
obj.height = height
obj.duration = duration
obj.time_scale = time_scale # dt duration in seconds
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.width = getattr(obj, "width", None)
self.height = getattr(obj, "height", None)
self.duration = getattr(obj, "duration", None)
self.time_scale = getattr(obj, "time_scale", None)
| [
"numpy.dtype"
] | [((29, 116), 'numpy.dtype', 'np.dtype', (["[('x', np.uint16), ('y', np.uint16), ('p', np.bool_), ('ts', np.uint64)]"], {}), "([('x', np.uint16), ('y', np.uint16), ('p', np.bool_), ('ts', np.\n uint64)])\n", (37, 116), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright 2016-present <NAME>.
#
# Licensed under the MIT License.
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/mit-license.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
#
# Author <NAME> (<EMAIL>)
#
# ------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import env
import unittest
import numpy as np
from npcore.layer.gates import Linear, ReLU
from npcore.layer.link import Link
from npcore.layer.objectives import (
Objective,
MSELoss,
MAELoss,
LogCoshLoss,
XTanhLoss,
AlgebraicLoss,
SigmoidCrossentropyLoss,
SoftmaxCrossentropyLoss
)
# ------------------------------------------------------------------------
np.random.seed(2)
# ------------------------------------------------------------------------
class TestUnitInitializers(unittest.TestCase):
def test_init(self):
print('Testing Regression Objective Layers.')
input_size = 3
output_size = 2
shape = (input_size, output_size)
x_t = np.random.rand(3, 3)
y_t = np.random.rand(3, 2)
stage = {
'epoch': 1,
'mode': 'learning',
'hparam': {
'batch_size': 2,
'eta': 1e-3,
'l1_lambda': 1e-2,
'l2_lambda': 1e-2,
'momentum': 0.9,
'beta_decay1': 0.9,
'beta_decay2': 0.999
}
}
relu = ReLU(size=input_size, name='input')
linear = Linear(size=output_size, name='output')
link = Link(shape=shape, name='dense')
mae = MAELoss(size=output_size, name='objective')
seq = relu.connect(link).connect(linear).connect(mae).head
seq.forward(stage, x_t).evaluate(y_t).backward(stage)
print(seq.tail.outputs)
print(seq.tail.evaluation_metric)
print('Testing Category Classification Objective Layers.')
input_size = 3
output_size = 2
shape = (input_size, output_size)
x_t = np.random.rand(2, 3)
y_t = np.array([[0, 1], [1, 0]])
stage = {
'epoch': 1,
'mode': 'learning',
'hparam': {
'batch_size': 2,
'eta': 1e-3,
'l1_lambda': 1e-2,
'l2_lambda': 1e-2,
'momentum': 0.9,
'beta_decay1': 0.9,
'beta_decay2': 0.999
}
}
relu = ReLU(size=input_size, name='input')
linear = Linear(size=output_size, name='output')
link = Link(shape=shape, name='dense')
cce = SoftmaxCrossentropyLoss(size=output_size, name='objective')
seq = relu.connect(link).connect(linear).connect(cce).head
seq.forward(stage, x_t).evaluate(y_t).backward(stage)
print(seq.tail.outputs)
print(seq.tail.evaluation_metric)
if __name__ == '__main__':
unittest.main()
| [
"npcore.layer.objectives.MAELoss",
"numpy.random.rand",
"npcore.layer.gates.ReLU",
"numpy.array",
"numpy.random.seed",
"unittest.main",
"npcore.layer.link.Link",
"npcore.layer.objectives.SoftmaxCrossentropyLoss",
"npcore.layer.gates.Linear"
] | [((1256, 1273), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (1270, 1273), True, 'import numpy as np\n'), ((3483, 3498), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3496, 3498), False, 'import unittest\n'), ((1582, 1602), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (1596, 1602), True, 'import numpy as np\n'), ((1617, 1637), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (1631, 1637), True, 'import numpy as np\n'), ((2014, 2049), 'npcore.layer.gates.ReLU', 'ReLU', ([], {'size': 'input_size', 'name': '"""input"""'}), "(size=input_size, name='input')\n", (2018, 2049), False, 'from npcore.layer.gates import Linear, ReLU\n'), ((2067, 2106), 'npcore.layer.gates.Linear', 'Linear', ([], {'size': 'output_size', 'name': '"""output"""'}), "(size=output_size, name='output')\n", (2073, 2106), False, 'from npcore.layer.gates import Linear, ReLU\n'), ((2122, 2153), 'npcore.layer.link.Link', 'Link', ([], {'shape': 'shape', 'name': '"""dense"""'}), "(shape=shape, name='dense')\n", (2126, 2153), False, 'from npcore.layer.link import Link\n'), ((2169, 2212), 'npcore.layer.objectives.MAELoss', 'MAELoss', ([], {'size': 'output_size', 'name': '"""objective"""'}), "(size=output_size, name='objective')\n", (2176, 2212), False, 'from npcore.layer.objectives import Objective, MSELoss, MAELoss, LogCoshLoss, XTanhLoss, AlgebraicLoss, SigmoidCrossentropyLoss, SoftmaxCrossentropyLoss\n'), ((2591, 2611), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (2605, 2611), True, 'import numpy as np\n'), ((2626, 2652), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (2634, 2652), True, 'import numpy as np\n'), ((3029, 3064), 'npcore.layer.gates.ReLU', 'ReLU', ([], {'size': 'input_size', 'name': '"""input"""'}), "(size=input_size, name='input')\n", (3033, 3064), False, 'from npcore.layer.gates import Linear, ReLU\n'), ((3082, 3121), 'npcore.layer.gates.Linear', 'Linear', ([], {'size': 'output_size', 'name': '"""output"""'}), "(size=output_size, name='output')\n", (3088, 3121), False, 'from npcore.layer.gates import Linear, ReLU\n'), ((3137, 3168), 'npcore.layer.link.Link', 'Link', ([], {'shape': 'shape', 'name': '"""dense"""'}), "(shape=shape, name='dense')\n", (3141, 3168), False, 'from npcore.layer.link import Link\n'), ((3184, 3243), 'npcore.layer.objectives.SoftmaxCrossentropyLoss', 'SoftmaxCrossentropyLoss', ([], {'size': 'output_size', 'name': '"""objective"""'}), "(size=output_size, name='objective')\n", (3207, 3243), False, 'from npcore.layer.objectives import Objective, MSELoss, MAELoss, LogCoshLoss, XTanhLoss, AlgebraicLoss, SigmoidCrossentropyLoss, SoftmaxCrossentropyLoss\n')] |
import sys
import numpy as np
sys.path.append('..')
from Game import Game
from .QubicLogic import Board
import itertools
class QubicGame(Game):
"""
Connect4 Game class implementing the alpha-zero-general Game interface.
"""
def __init__(self, depth = None, height=None, width=None, win_length=None, np_pieces=None):
Game.__init__(self)
self._base_board = Board(height, width, depth, win_length, np_pieces)
def getInitBoard(self):
return self._base_board.np_pieces
def getBoardSize(self):
return (self._base_board.height, self._base_board.width, self._base_board.depth)
def getActionSize(self):
return self._base_board.height * self._base_board.width * self._base_board.depth
def getNextState(self, board, player, action):
"""Returns a copy of the board with updated move, original board is unmodified."""
b = self._base_board.with_np_pieces(np_pieces=np.copy(board))
b.add_piece(action, player)
return b.np_pieces, -player
def getValidMoves(self, board, player):
"Any zero value in top row in a valid move"
return self._base_board.with_np_pieces(np_pieces=board).get_valid_moves()
def getGameEnded(self, board, player):
b = self._base_board.with_np_pieces(np_pieces=board)
winstate = b.get_win_state()
if winstate.is_ended:
if winstate.winner is None:
# draw has very little value.
return 1e-4
elif winstate.winner == player:
return +1
elif winstate.winner == -player:
return -1
else:
raise ValueError('Unexpected winstate found: ', winstate)
else:
# 0 used to represent unfinished game.
return 0
def getCanonicalForm(self, board, player):
# Flip player from 1 to -1
return board * player
def getCubeRotations(self, a):
# Get all combinations of axes that are permutable
n = a.ndim
axcomb = np.array(list(itertools.permutations(range(n), n)))
# Initialize output array
out = np.zeros((6,2,2,2,) + a.shape,dtype=a.dtype)
# Run loop through all axes for flipping and permuting each axis
for i,ax in enumerate(axcomb):
for j,fx in enumerate([1,-1]):
for k,fy in enumerate([1,-1]):
for l,fz in enumerate([1,-1]):
out[i,j,k,l] = np.transpose(a[::fx,::fy,::fz],ax)
return out
def convertListToCube(self,l):
"""This function converts a list into a cube"""
cube = np.zeros((self._base_board.depth, self._base_board.height, self._base_board.width), dtype=np.float32)
for i in range(self._base_board.depth):
for j in range(self._base_board.height):
for k in range(self._base_board.width):
cube[i][j][k] = l[16*i+4*j+k]
return cube
def convertCubeToList(self,cube):
"""This function convets a cube into a list"""
l = []
for i in range(self._base_board.depth):
for j in range(self._base_board.height):
for k in range(self._base_board.width):
l.append(cube[i][j][k])
return l
def getAllTransformations(self,a,pi):
pi_ = self.convertListToCube(pi)
a_out = self.getCubeRotations(a)
pi_out = self.getCubeRotations(pi_)
L = []
for i in range(6):
for j in range(2):
for k in range(2):
for l in range(2):
L.append((a_out[i][j][k][l], self.convertCubeToList(pi_out[i][j][k][l])))
d_inds = []
for i in range(len(L)):
comp = (L[i][0] == a)
uq = np.unique(comp)
if((uq.size == 1) and (uq[0] == True) and (L[i][1] == pi)):
d_inds.append(i)
break
for i in d_inds:
del L[i]
return L
def getSymmetries(self, board, pi):
"""Cube has 48 possible symmetrical transformations"""
return self.getAllTransformations(board, pi)
def stringRepresentation(self, board):
return str(self._base_board.with_np_pieces(np_pieces=board))
def getDims(self):
return(self._base_board.depth , self._base_board.height , self._base_board.width)
def display(board):
print(" -----------------------")
print(' '.join(map(str, range(len(board[0])))))
print(board)
print(" -----------------------")
| [
"numpy.copy",
"numpy.unique",
"Game.Game.__init__",
"numpy.zeros",
"numpy.transpose",
"sys.path.append"
] | [((31, 52), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (46, 52), False, 'import sys\n'), ((344, 363), 'Game.Game.__init__', 'Game.__init__', (['self'], {}), '(self)\n', (357, 363), False, 'from Game import Game\n'), ((2164, 2211), 'numpy.zeros', 'np.zeros', (['((6, 2, 2, 2) + a.shape)'], {'dtype': 'a.dtype'}), '((6, 2, 2, 2) + a.shape, dtype=a.dtype)\n', (2172, 2211), True, 'import numpy as np\n'), ((2664, 2770), 'numpy.zeros', 'np.zeros', (['(self._base_board.depth, self._base_board.height, self._base_board.width)'], {'dtype': 'np.float32'}), '((self._base_board.depth, self._base_board.height, self._base_board\n .width), dtype=np.float32)\n', (2672, 2770), True, 'import numpy as np\n'), ((3838, 3853), 'numpy.unique', 'np.unique', (['comp'], {}), '(comp)\n', (3847, 3853), True, 'import numpy as np\n'), ((947, 961), 'numpy.copy', 'np.copy', (['board'], {}), '(board)\n', (954, 961), True, 'import numpy as np\n'), ((2502, 2539), 'numpy.transpose', 'np.transpose', (['a[::fx, ::fy, ::fz]', 'ax'], {}), '(a[::fx, ::fy, ::fz], ax)\n', (2514, 2539), True, 'import numpy as np\n')] |
import imageio
# imageio.plugins.ffmpeg.download()
import numpy as np
import os
import argparse
import process_anno
from tqdm import tqdm
import torch
import torchvision.transforms as trn
from spatial_transforms import (
Compose,ToTensor)
import json
def extract_frames(output, dirname, filenames, frame_num, anno):
transform = Compose([trn.ToPILImage(),
ToTensor()])
"""Extract frames in a video. """
#Read videos and extract features in batches
for file_cnt, fname in tqdm(enumerate(filenames)):
if fname[:-4] in list(anno.keys()):
bd_info = anno[fname[:-4]]
else:
continue
for bd_ in bd_info:
bd_cnt = bd_['count']
start_time = bd_['start_time']
end_time = bd_['end_time']
if float(start_time) >= float(end_time):
continue
vid = imageio.get_reader(os.path.join(output, dirname, fname), 'ffmpeg')
frames_dir = os.path.join(output, 'Frames', fname[:-4] + '_' + str(bd_cnt))
print(frames_dir)
if not os.path.exists(frames_dir):
os.makedirs(frames_dir)
if len(os.listdir(frames_dir)) == frame_num:
print('already existing vid')
continue
curr_frames=[]
for frame in vid:
if len(frame.shape)<3:
frame = np.repeat(frame,3)
# curr_frames.append(frame)
curr_frames.append(transform(frame).unsqueeze(0))
vid_len = len(curr_frames) - 1
curr_frames = torch.cat(curr_frames, dim=0)
print("Shape of frames: {0}".format(curr_frames.shape))
print('vid_len', vid_len)
st_fr = vid_len * start_time
end_fr = vid_len * end_time
idx = np.linspace(st_fr, end_fr, frame_num)
idx = np.round(idx).astype(int).tolist()
frames_to_save = curr_frames[idx,:,:,:]
# print(frames_to_save)
for frame_n in range(frame_num):
curr_frame = frames_to_save[frame_n,...]
# print('curr_frame', curr_frame.shape)
imageio.imwrite(os.path.join(frames_dir, str(frame_n)+'.jpg'), curr_frame.permute(1,2,0))
print('{}/{} done'.format(file_cnt, len(filenames)))
assert len(os.listdir(frames_dir)) == frame_num, 'Wrong frame number...'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default='/saat/Charades_for_SAAT/')
parser.add_argument('--dataset_name', type=str, default='Charades')
parser.add_argument('--frame_per_video', type=int, default=28)
# parser.add_argument('--start_idx', type=int, default=0)
# parser.add_argument('--end_idx', type=int, default=1)
opt = parser.parse_args()
with open('/saat/renewed_charades_label.json', 'r') as f:
anno_info = json.load(f)
anno, train_keys = anno_info, list(anno_info.keys())
namelist = os.listdir(os.path.join(opt.file_path, opt.dataset_name))
namelist_to_pass = []
for id_ in namelist:
if id_[:-4] in train_keys:
namelist_to_pass.append(id_)
extract_frames(opt.file_path, opt.dataset_name, namelist_to_pass, opt.frame_per_video, anno)
| [
"os.path.exists",
"os.listdir",
"numpy.repeat",
"torchvision.transforms.ToPILImage",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.round",
"os.path.join",
"numpy.linspace",
"spatial_transforms.ToTensor",
"json.load",
"torch.cat"
] | [((2076, 2101), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2099, 2101), False, 'import argparse\n'), ((2534, 2546), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2543, 2546), False, 'import json\n'), ((2624, 2669), 'os.path.join', 'os.path.join', (['opt.file_path', 'opt.dataset_name'], {}), '(opt.file_path, opt.dataset_name)\n', (2636, 2669), False, 'import os\n'), ((339, 355), 'torchvision.transforms.ToPILImage', 'trn.ToPILImage', ([], {}), '()\n', (353, 355), True, 'import torchvision.transforms as trn\n'), ((358, 368), 'spatial_transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (366, 368), False, 'from spatial_transforms import Compose, ToTensor\n'), ((1336, 1365), 'torch.cat', 'torch.cat', (['curr_frames'], {'dim': '(0)'}), '(curr_frames, dim=0)\n', (1345, 1365), False, 'import torch\n'), ((1528, 1565), 'numpy.linspace', 'np.linspace', (['st_fr', 'end_fr', 'frame_num'], {}), '(st_fr, end_fr, frame_num)\n', (1539, 1565), True, 'import numpy as np\n'), ((788, 824), 'os.path.join', 'os.path.join', (['output', 'dirname', 'fname'], {}), '(output, dirname, fname)\n', (800, 824), False, 'import os\n'), ((950, 976), 'os.path.exists', 'os.path.exists', (['frames_dir'], {}), '(frames_dir)\n', (964, 976), False, 'import os\n'), ((982, 1005), 'os.makedirs', 'os.makedirs', (['frames_dir'], {}), '(frames_dir)\n', (993, 1005), False, 'import os\n'), ((1016, 1038), 'os.listdir', 'os.listdir', (['frames_dir'], {}), '(frames_dir)\n', (1026, 1038), False, 'import os\n'), ((1180, 1199), 'numpy.repeat', 'np.repeat', (['frame', '(3)'], {}), '(frame, 3)\n', (1189, 1199), True, 'import numpy as np\n'), ((1976, 1998), 'os.listdir', 'os.listdir', (['frames_dir'], {}), '(frames_dir)\n', (1986, 1998), False, 'import os\n'), ((1575, 1588), 'numpy.round', 'np.round', (['idx'], {}), '(idx)\n', (1583, 1588), True, 'import numpy as np\n')] |
"""
tSNE analysis for glbase expression objects.
This should really be merged with MDS and inherited...
"""
from operator import itemgetter
import numpy, random
import matplotlib.pyplot as plot
import matplotlib.patches
from mpl_toolkits.mplot3d import Axes3D, art3d
import scipy.cluster.vq
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import MiniBatchKMeans, AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
from sklearn.neighbors import NearestCentroid
from scipy.cluster.hierarchy import dendrogram
from . import config
from .draw import draw
from .genelist import genelist
class base_manifold:
def __init__(self, parent=None, name='none', manifold_type='base_manifold'):
self.manifold_type = manifold_type
self.parent = parent
self.name = name
self.configured = False
self.trained = False
self.clusters = False
self.cluster_labels = None
self.centroids = None
self.__draw = draw()
def __repr__(self):
return "<glbase.{0}>".format(self.manifold_type)
def __str__(self):
ret = ["{0}} object".format(self.manifold_type),
"\tExpression: %s" % self.parent.name,
"\tConfigured: %s" % self.configured,
"\tTrained : %s" % self.trained,
]
return "\n".join(ret)
def configure(self,
rowwise: str = False,
feature_key_name: str = None,
whiten: bool = False,
random_state = None,
verbose: int = 2,
**kargs):
"""
**Purpose**
Configure the {0} Manifold
**Arguments**
rowwise (Optional, default=False)
perform manifold on the rows, rather than the columns
feature_key_name (Optional, default=False)
if rowwise=True then this must be set to a key name
in the expression object ot extract the row labels from.
random_state (Optional, default=None)
tSNE is non-determinisic
set this to the seed you wish to use, otherwise a random number used
whiten (Optional, default=False)
set the data to unit variance
""".format(self.manifold_type)
if rowwise:
# rowwise here is not needed
assert feature_key_name, 'If rowwise=True then feature_key_name must also be valid'
assert feature_key_name in list(self.parent.keys()), 'feature_key_name "%s" not found in this expression object' % feature_key_name
self.labels = self.parent[feature_key_name]
self.data_table = self.parent.getExpressionTable()
else:
self.labels = self.parent.getConditionNames()
self.data_table = self.parent.getExpressionTable().T
self.random_state = random_state
random.seed(self.random_state)
self.verbose = verbose
self.whiten = whiten
self.configured = True
def scatter(self, filename=None, spot_cols='grey', spots=True, label=False, alpha=0.8,
spot_size=40, label_font_size=7, cut=None, squish_scales=False,
only_plot_if_x_in_label=None, draw_clusters=True, **kargs):
"""
**Purpose**
plot a scatter plot of the {0}.
**Arguments**
filename (Required)
spot_cols (Optional, default="black" or self.set_cols())
list of colours for the samples, should be the same length as
the number of conditions.
if labels == True and spots == False and spot_cols is not None then
spot_cols will be used to colour the labels.
label (Optional, default=False)
label each spot with the name of the condition
only_plot_if_x_in_label (Optional, default=None)
Only plot an individual scatter if X is in the label name.
This must be a list or tuple of names
Allows you to effectively remove points from the tSNE plot.
spots (Optional, default=True)
Draw the spots
alpha (Optional, default=0.8)
alpha value to use to blend the individual points
spot_size (Optional, default=40)
Size of the spots on the scatter
label_font_size (Optional, default=7)
Size of the spot label text, only valid if label=True
cut (Optional, default=None)
Send a rectangle of the form [topleftx, toplefty, bottomrightx, bottomrighty], cut out all of the items within that
area and return their label and PC score
squish_scales (Optional, default=False)
set the limits very aggressively to [minmin(x), minmax(y)]
draw_clusters (Optional, default=True)
colour the spots and label by clusters if True
**Returns**
None
""".format(self.manifold_type)
assert filename, "scatter: Must provide a filename"
assert self.trained, '{} not trained'.format(self.manifold_type)
labels = self.labels
xdata = self.npos[:, 0]
ydata = self.npos[:, 1]
if not self.clusters: draw_clusters=False # set to false if no clusters available;
return self.__draw.unified_scatter(
labels,
xdata,
ydata,
x=1,
y=2,
filename=filename,
mode='{0} '.format(self.manifold_type),
perc_weights=None,
spot_cols=spot_cols,
spots=spots,
label=label,
alpha=alpha,
spot_size=spot_size,
label_font_size=label_font_size,
cut=cut,
squish_scales=squish_scales,
only_plot_if_x_in_label=only_plot_if_x_in_label,
cluster_data=self.clusters,
cluster_labels=self.cluster_labels,
cluster_centroids=self.centroids,
draw_clusters=draw_clusters,
**kargs
)
def cluster(self, method=None, num_clusters=None, filename=None):
'''
**Purpose**
Report louvain or leiden clusters for a trained 2D {0}
**Arguments**
method (Required)
Sklearn method:
https://scikit-learn.org/stable/modules/clustering.html#k-means
Implemented:
'KMeans': The k-means (MiniBatchKMeans) algorithm. Requires a 'num_clusters' argument
'AgglomerativeClustering': AgglomerativeClustering. Requires a 'num_clusters' argument
num_clusters (Required)
The expected number of clusters.
**Returns**
THe cluster model and cluster labels
'''.format(self.manifold_type)
assert self.trained, '{0} not trained'.format(self.manifold_type)
if self.clusters:
config.log.warning('Overwriting exisitng cluster data')
self.clusters = None
self.__cluster_mode = method
valid_methods = {'KMeans', 'AgglomerativeClustering'}
assert method in valid_methods, 'method {0} not found'.format(method)
xdata = self.npos[:, 0]
ydata = self.npos[:, 1]
if method == 'KMeans':
assert num_clusters, 'if method is KMeans then you need a num_clusters'
mbk = MiniBatchKMeans(init='k-means++',
n_clusters=num_clusters,
batch_size=100,
n_init=50,
max_no_improvement=10,
verbose=1,
random_state=self.random_state)
labels = mbk.fit_predict(self.npos)
self.clusters = mbk
self.cluster_labels = labels
self.centroids = mbk.cluster_centers_
elif method == 'AgglomerativeClustering':
knn_graph = kneighbors_graph(self.npos, num_clusters, include_self=False)
self.__model = AgglomerativeClustering(
linkage='ward',
connectivity=knn_graph,
n_clusters=num_clusters,
affinity='euclidean')
self.__full_model = AgglomerativeClustering( # For the tree;
distance_threshold=0,
n_clusters=None,
linkage='ward',
connectivity=knn_graph,
affinity='euclidean'
)
self.__full_model_fp = self.__full_model.fit(self.npos)
labels = self.__model.fit_predict(self.npos)
self.clusters = self.__model
self.cluster_labels = labels
clf = NearestCentroid()
clf.fit(self.npos, labels)
self.centroids = clf.centroids_
config.log.info('tsne.cluster: {0} clustered'.format(method))
return self.clusters, self.cluster_labels, self.centroids
def cluster_tree(self, filename, **kargs):
"""
**Purpose**
Draw the relationship between clusters as a tree.
Only valid if clusering mode was 'AgglomerativeClustering'
**Arguments**
filename (Required)
filename to save the image to
"""
assert filename, 'You must specify a filename'
assert self.__cluster_mode == 'AgglomerativeClustering', 'cluster_tree can only be used if the cluster method was AgglomerativeClustering'
assert self.trained, '{} not trained'.format(self.manifold_type)
fig = self.__draw.getfigure()
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = numpy.zeros(self.__full_model_fp.children_.shape[0])
n_samples = len(self.__full_model_fp.labels_)
for i, merge in enumerate(self.__full_model_fp.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = numpy.column_stack([
self.__full_model_fp.children_,
self.__full_model_fp.distances_,
counts]).astype(float)
ax = fig.add_subplot(111)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, ax=ax,
truncate_mode='level', p=self.__model.n_clusters,
**kargs)
self.__draw.savefigure(fig, filename)
| [
"sklearn.cluster.AgglomerativeClustering",
"scipy.cluster.hierarchy.dendrogram",
"sklearn.cluster.MiniBatchKMeans",
"numpy.column_stack",
"random.seed",
"sklearn.neighbors.NearestCentroid",
"sklearn.neighbors.kneighbors_graph",
"numpy.zeros"
] | [((2912, 2942), 'random.seed', 'random.seed', (['self.random_state'], {}), '(self.random_state)\n', (2923, 2942), False, 'import numpy, random\n'), ((9775, 9827), 'numpy.zeros', 'numpy.zeros', (['self.__full_model_fp.children_.shape[0]'], {}), '(self.__full_model_fp.children_.shape[0])\n', (9786, 9827), False, 'import numpy, random\n'), ((10494, 10591), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['linkage_matrix'], {'ax': 'ax', 'truncate_mode': '"""level"""', 'p': 'self.__model.n_clusters'}), "(linkage_matrix, ax=ax, truncate_mode='level', p=self.__model.\n n_clusters, **kargs)\n", (10504, 10591), False, 'from scipy.cluster.hierarchy import dendrogram\n'), ((7502, 7662), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'init': '"""k-means++"""', 'n_clusters': 'num_clusters', 'batch_size': '(100)', 'n_init': '(50)', 'max_no_improvement': '(10)', 'verbose': '(1)', 'random_state': 'self.random_state'}), "(init='k-means++', n_clusters=num_clusters, batch_size=100,\n n_init=50, max_no_improvement=10, verbose=1, random_state=self.random_state\n )\n", (7517, 7662), False, 'from sklearn.cluster import MiniBatchKMeans, AgglomerativeClustering\n'), ((7996, 8057), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['self.npos', 'num_clusters'], {'include_self': '(False)'}), '(self.npos, num_clusters, include_self=False)\n', (8012, 8057), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((8086, 8201), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'linkage': '"""ward"""', 'connectivity': 'knn_graph', 'n_clusters': 'num_clusters', 'affinity': '"""euclidean"""'}), "(linkage='ward', connectivity=knn_graph, n_clusters=\n num_clusters, affinity='euclidean')\n", (8109, 8201), False, 'from sklearn.cluster import MiniBatchKMeans, AgglomerativeClustering\n'), ((8295, 8424), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'distance_threshold': '(0)', 'n_clusters': 'None', 'linkage': '"""ward"""', 'connectivity': 'knn_graph', 'affinity': '"""euclidean"""'}), "(distance_threshold=0, n_clusters=None, linkage=\n 'ward', connectivity=knn_graph, affinity='euclidean')\n", (8318, 8424), False, 'from sklearn.cluster import MiniBatchKMeans, AgglomerativeClustering\n'), ((8762, 8779), 'sklearn.neighbors.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (8777, 8779), False, 'from sklearn.neighbors import NearestCentroid\n'), ((10262, 10360), 'numpy.column_stack', 'numpy.column_stack', (['[self.__full_model_fp.children_, self.__full_model_fp.distances_, counts]'], {}), '([self.__full_model_fp.children_, self.__full_model_fp.\n distances_, counts])\n', (10280, 10360), False, 'import numpy, random\n')] |
import numpy as np
class RunningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
@staticmethod
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class**2).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
tp = np.diag(hist)
sum_a1 = hist.sum(axis=1)
acc = tp.sum() / (hist.sum() + np.finfo(np.float32).eps)
acc_cls = tp / (sum_a1 + np.finfo(np.float32).eps)
acc_cls = np.nanmean(acc_cls)
iu = tp / (sum_a1 + hist.sum(axis=0) - tp + np.finfo(np.float32).eps)
mean_iu = np.nanmean(iu)
freq = sum_a1 / (hist.sum() + np.finfo(np.float32).eps)
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return {'Overall_Acc': acc,
'Mean_Acc': acc_cls,
'FreqW_Acc': fwavacc,
'Mean_IoU': mean_iu}, cls_iu
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
if __name__ == "__main__":
n_class = 2
score = RunningScore(n_class)
label_true = np.array([1, 0, 0, 1, 1, 0, 1, 0, 1, 0])
label_pred = np.array([1, 1, 0, 1, 0, 0, 1, 1, 0, 0])
score.update(label_true, label_pred)
print(score.confusion_matrix)
| [
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.nanmean",
"numpy.finfo"
] | [((1789, 1829), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 1, 0, 1, 0, 1, 0]'], {}), '([1, 0, 0, 1, 1, 0, 1, 0, 1, 0])\n', (1797, 1829), True, 'import numpy as np\n'), ((1847, 1887), 'numpy.array', 'np.array', (['[1, 1, 0, 1, 0, 0, 1, 1, 0, 0]'], {}), '([1, 1, 0, 1, 0, 0, 1, 1, 0, 0])\n', (1855, 1887), True, 'import numpy as np\n'), ((151, 183), 'numpy.zeros', 'np.zeros', (['(n_classes, n_classes)'], {}), '((n_classes, n_classes))\n', (159, 183), True, 'import numpy as np\n'), ((939, 952), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (946, 952), True, 'import numpy as np\n'), ((1131, 1150), 'numpy.nanmean', 'np.nanmean', (['acc_cls'], {}), '(acc_cls)\n', (1141, 1150), True, 'import numpy as np\n'), ((1248, 1262), 'numpy.nanmean', 'np.nanmean', (['iu'], {}), '(iu)\n', (1258, 1262), True, 'import numpy as np\n'), ((1649, 1691), 'numpy.zeros', 'np.zeros', (['(self.n_classes, self.n_classes)'], {}), '((self.n_classes, self.n_classes))\n', (1657, 1691), True, 'import numpy as np\n'), ((1027, 1047), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1035, 1047), True, 'import numpy as np\n'), ((1087, 1107), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1095, 1107), True, 'import numpy as np\n'), ((1204, 1224), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1212, 1224), True, 'import numpy as np\n'), ((1302, 1322), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1310, 1322), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.