code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import cv2
import glob
import os
import sys
from carlaModule import carla
from carlaSyncMode import CarlaSyncMode
import random
import pygame
import numpy as np
from datetime import datetime
# Hyperparameters Here
n_vehicle = 3
n_walkers = 0
show_img = 0
show_recording = 1
# Camera position setting
camera_x = 5.5
camera_y = 0
camera_z = 1
# Data description
town_num = 'town01'
target_color = 'Red'
step_size = 5
image_goal = 10000-6331
# Target threshold size
target_size = 'L'
if target_size == 'S':
fire_hydrant_qualified_size_min = 10*10 # medium
fire_hydrant_qualified_size_max = 32*32 # small
elif target_size == 'M':
fire_hydrant_qualified_size_min = 32*32 # medium
fire_hydrant_qualified_size_max = 96*96 # medium
elif target_size == 'L':
fire_hydrant_qualified_size_min = 96*96 # large
fire_hydrant_qualified_size_max = 1080*1920 # large
out_seg = '%s_%s_%s_seg' % (town_num, target_size, target_color)
out_rgb = '%s_%s_%s_rgb' % (town_num, target_size, target_color)
img_resolution = (1920, 1080)
# Camera angle setting
# if target_size == 'S':
# camera_pitch = 0
# camera_yaw = 15
# elif target_size == 'M':
# camera_pitch = -15
# camera_yaw = 300
# elif target_size == 'L':
camera_pitch = -15
camera_yaw = 90
# End of Hyperparameters
def draw_image(surface, image, blend=False):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = 'ubuntumono'
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def postprocess(seg_file, rgb_file):
seg_img = cv2.imread(seg_file)
rgb_img = cv2.imread(rgb_file)
_, timg = cv2.threshold(seg_img[:, :, 2], 110, 220, cv2.THRESH_BINARY)
output = cv2.connectedComponentsWithStats(timg, 8, cv2.CV_32S)
num_labels = output[0]
stats = output[2]
fire_hydrant_found = 0
fire_hydrant_qualified = 0
for i in range(num_labels):
if (
stats[i, cv2.CC_STAT_AREA] > 0 and
stats[i, cv2.CC_STAT_WIDTH] != 1920 and stats[i, cv2.CC_STAT_HEIGHT] != 1080 and
stats[i, cv2.CC_STAT_HEIGHT] > 10 # Check for chains
):
fire_hydrant_found += 1
if fire_hydrant_qualified_size_min < stats[i, cv2.CC_STAT_AREA] < fire_hydrant_qualified_size_max :
fire_hydrant_qualified += 1
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
w = stats[i, cv2.CC_STAT_WIDTH]
h = stats[i, cv2.CC_STAT_HEIGHT]
if show_img == 1:
cv2.rectangle(rgb_img, (x, y), (x + w, y + h), (128, 255, 0))
print('found', fire_hydrant_found, 'qualified', fire_hydrant_qualified)
if show_img == 1 and fire_hydrant_found == 1:
cv2.imshow('Boundig Boxes', rgb_img)
cv2.waitKey(0)
if fire_hydrant_found != 1 or fire_hydrant_qualified != 1:
os.remove(seg_file)
os.remove(rgb_file)
return 0
else:
print('saved img', rgb_file)
return 1
def main():
# weather parameters
weather_parameters = {
'cloudiness' : 0, # 0 ~ 100
'precipitation' : 0, # 0 ~ 100
'precipitation_deposits' : 0, # 0 ~ 100
'wind_intensity' : 0, # 0 ~ 100
'sun_azimuth_angle' : 0, # 0 ~ 360
'sun_altitude_angle' : 50, # -90 ~ 90
'fog_density' : 0, # 0 ~ 180
'fog_distance' : 0, # 0 ~ infinite
'wetness' : 0, # 0 ~ 100
}
weather_keys = list(weather_parameters.keys())
weather_index = 0
image_collected = 0
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
try:
os.mkdir(out_seg)
os.mkdir(out_rgb)
except:
print("Did not create image directories")
actor_list = []
pygame.init()
if show_recording == 1:
display = pygame.display.set_mode(img_resolution,
pygame.HWSURFACE | pygame.DOUBLEBUF)
font = get_font()
clock = pygame.time.Clock()
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
try:
m = world.get_map()
### Spawn vehicles
spawn_points = m.get_spawn_points()
print('%d spawn points generated' % len(spawn_points))
random.shuffle(spawn_points)
waypoints = []
blueprint_library = world.get_blueprint_library()
vehicle_blueprints = blueprint_library.filter('vehicle.*')
for i in range(n_vehicle):
vehicle = world.spawn_actor(
random.choice(vehicle_blueprints),
spawn_points[i])
actor_list.append(vehicle)
vehicle.set_simulate_physics(False)
waypoints.append(m.get_waypoint(spawn_points[i].location))
### Spawn Pedestrians
walkers_list = []
# # 0. Choose a blueprint fo the walkers
blueprintsWalkers = world.get_blueprint_library().filter("walker.pedestrian.*")
walker_bp = random.choice(blueprintsWalkers)
# 1. Take all the random locations to spawn
spawn_points = []
for i in range(n_walkers):
spawn_point = carla.Transform()
spawn_point.location = world.get_random_location_from_navigation()
spawn_points.append(spawn_point)
# 2. Build the batch of commands to spawn the pedestrians
batch = []
for spawn_point in spawn_points:
walker_bp = random.choice(blueprintsWalkers)
batch.append(carla.command.SpawnActor(walker_bp, spawn_point))
# 2.1 apply the batch
results = client.apply_batch_sync(batch, True)
for i in range(len(results)):
walkers_list.append({"id": results[i].actor_id})
# 3. Spawn walker AI controllers for each walker
batch = []
walker_controller_bp = world.get_blueprint_library().find('controller.ai.walker')
for i in range(len(walkers_list)):
batch.append(carla.command.SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]["id"]))
# 3.1 apply the batch
results = client.apply_batch_sync(batch, True)
for i in range(len(results)):
walkers_list[i]["con"] = results[i].actor_id
# 4. Put altogether the walker and controller ids
all_id = []
for i in range(len(walkers_list)):
all_id.append(walkers_list[i]["con"])
all_id.append(walkers_list[i]["id"])
all_actors = world.get_actors(all_id)
actor_list.extend(all_actors)
# wait for a tick to ensure client receives the last transform of the walkers we have just created
world.wait_for_tick()
# 5. initialize each controller and set target to walk to (list is [controller, actor, controller, actor ...])
for i in range(0, len(all_actors), 2):
# start walker
all_actors[i].start()
# set walk to random point
all_actors[i].go_to_location(world.get_random_location_from_navigation())
# random max speed
all_actors[i].set_max_speed(1 + random.random())
camera_rgb_bp = blueprint_library.find('sensor.camera.rgb')
camera_rgb_bp.set_attribute('image_size_x', '%d'%img_resolution[0])
camera_rgb_bp.set_attribute('image_size_y', '%d'%img_resolution[1])
camera_rgb = world.spawn_actor(
camera_rgb_bp,
carla.Transform(carla.Location(x=camera_x, y=camera_y, z=camera_z), carla.Rotation(pitch=camera_pitch, yaw=camera_yaw)),
attach_to=vehicle)
actor_list.append(camera_rgb)
camera_semseg_bp = blueprint_library.find('sensor.camera.semantic_segmentation')
camera_semseg_bp.set_attribute('image_size_x', '%d'%img_resolution[0])
camera_semseg_bp.set_attribute('image_size_y', '%d'%img_resolution[1])
camera_semseg = world.spawn_actor(
camera_semseg_bp,
carla.Transform(carla.Location(x=camera_x, y=camera_y, z=camera_z), carla.Rotation(pitch=camera_pitch, yaw=camera_yaw)),
attach_to=vehicle)
actor_list.append(camera_semseg)
clock_count = 1
# Create a synchronous mode context.
with CarlaSyncMode(world, camera_rgb, camera_semseg, fps=10) as sync_mode:
while True:
# Begin change weather
weather = carla.WeatherParameters(**weather_parameters)
world.set_weather(weather)
# weather_parameters[weather_keys[weather_index]] += 25
# if weather_keys[weather_index] == 'sun_azimuth_angle':
# weather_parameters[weather_keys[weather_index]] %= 360
# elif weather_keys[weather_index] == 'sun_altitude_angle':
# weather_parameters[weather_keys[weather_index]] += 90
# weather_parameters[weather_keys[weather_index]] %= 180
# weather_parameters[weather_keys[weather_index]] -= 90
# elif weather_keys[weather_index] == 'fog_density':
# weather_parameters[weather_keys[weather_index]] %= 180
# else:
# weather_parameters[weather_keys[weather_index]] %= 100
# weather_index += 1
# weather_index %= len(weather_keys)
# End change weather
if should_quit(): return
if image_collected == image_goal: return
clock.tick()
clock_count += 1
# Advance the simulation and wait for the data.
snapshot, image_rgb, image_semseg = sync_mode.tick(timeout=2.0)
# Choose the next waypoint and update the car location.
for i in range(n_vehicle):
waypoints[i] = random.choice(waypoints[i].next(1.5))
actor_list[i].set_transform(waypoints[i].transform)
# if False:
if clock_count % step_size == 0:
clock_count = 1
image_semseg.convert(carla.ColorConverter.CityScapesPalette)
seg_file = '%s/%s_%06d.png' % (out_seg, current_time, image_semseg.frame)
rgb_file = '%s/%s_%06d.png' % (out_rgb, current_time, image_rgb.frame)
image_semseg.save_to_disk(seg_file)
image_rgb.save_to_disk(rgb_file)
image_collected += postprocess(seg_file, rgb_file)
# Draw the display.
if show_recording == 1:
fps = round(1.0 / snapshot.timestamp.delta_seconds)
draw_image(display, image_rgb)
draw_image(display, image_semseg, blend=True)
display.blit(
font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)),
(8, 10))
display.blit(
font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)),
(8, 28))
pygame.display.flip()
finally:
print('destroying actors.')
for actor in actor_list:
actor.destroy()
pygame.quit()
print('done.')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!') | [
"os.mkdir",
"os.remove",
"pygame.event.get",
"random.shuffle",
"carlaModule.carla.Transform",
"pygame.font.Font",
"cv2.rectangle",
"carlaModule.carla.Location",
"cv2.imshow",
"carlaModule.carla.command.SpawnActor",
"pygame.display.set_mode",
"numpy.reshape",
"carlaModule.carla.WeatherParamet... | [((1670, 1719), 'numpy.reshape', 'np.reshape', (['array', '(image.height, image.width, 4)'], {}), '(array, (image.height, image.width, 4))\n', (1680, 1719), True, 'import numpy as np\n'), ((2114, 2142), 'pygame.font.match_font', 'pygame.font.match_font', (['font'], {}), '(font)\n', (2136, 2142), False, 'import pygame\n'), ((2154, 2180), 'pygame.font.Font', 'pygame.font.Font', (['font', '(14)'], {}), '(font, 14)\n', (2170, 2180), False, 'import pygame\n'), ((2219, 2237), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2235, 2237), False, 'import pygame\n'), ((2485, 2505), 'cv2.imread', 'cv2.imread', (['seg_file'], {}), '(seg_file)\n', (2495, 2505), False, 'import cv2\n'), ((2520, 2540), 'cv2.imread', 'cv2.imread', (['rgb_file'], {}), '(rgb_file)\n', (2530, 2540), False, 'import cv2\n'), ((2555, 2615), 'cv2.threshold', 'cv2.threshold', (['seg_img[:, :, 2]', '(110)', '(220)', 'cv2.THRESH_BINARY'], {}), '(seg_img[:, :, 2], 110, 220, cv2.THRESH_BINARY)\n', (2568, 2615), False, 'import cv2\n'), ((2629, 2682), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['timg', '(8)', 'cv2.CV_32S'], {}), '(timg, 8, cv2.CV_32S)\n', (2661, 2682), False, 'import cv2\n'), ((4626, 4640), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4638, 4640), False, 'from datetime import datetime\n'), ((4832, 4845), 'pygame.init', 'pygame.init', ([], {}), '()\n', (4843, 4845), False, 'import pygame\n'), ((5016, 5035), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (5033, 5035), False, 'import pygame\n'), ((5050, 5081), 'carlaModule.carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (5062, 5081), False, 'from carlaModule import carla\n'), ((3708, 3744), 'cv2.imshow', 'cv2.imshow', (['"""Boundig Boxes"""', 'rgb_img'], {}), "('Boundig Boxes', rgb_img)\n", (3718, 3744), False, 'import cv2\n'), ((3753, 3767), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3764, 3767), False, 'import cv2\n'), ((3840, 3859), 'os.remove', 'os.remove', (['seg_file'], {}), '(seg_file)\n', (3849, 3859), False, 'import os\n'), ((3868, 3887), 'os.remove', 'os.remove', (['rgb_file'], {}), '(rgb_file)\n', (3877, 3887), False, 'import os\n'), ((4702, 4719), 'os.mkdir', 'os.mkdir', (['out_seg'], {}), '(out_seg)\n', (4710, 4719), False, 'import os\n'), ((4728, 4745), 'os.mkdir', 'os.mkdir', (['out_rgb'], {}), '(out_rgb)\n', (4736, 4745), False, 'import os\n'), ((4893, 4969), 'pygame.display.set_mode', 'pygame.display.set_mode', (['img_resolution', '(pygame.HWSURFACE | pygame.DOUBLEBUF)'], {}), '(img_resolution, pygame.HWSURFACE | pygame.DOUBLEBUF)\n', (4916, 4969), False, 'import pygame\n'), ((5323, 5351), 'random.shuffle', 'random.shuffle', (['spawn_points'], {}), '(spawn_points)\n', (5337, 5351), False, 'import random\n'), ((6045, 6077), 'random.choice', 'random.choice', (['blueprintsWalkers'], {}), '(blueprintsWalkers)\n', (6058, 6077), False, 'import random\n'), ((12328, 12341), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (12339, 12341), False, 'import pygame\n'), ((1639, 1656), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1647, 1656), True, 'import numpy as np\n'), ((1983, 2006), 'pygame.font.get_fonts', 'pygame.font.get_fonts', ([], {}), '()\n', (2004, 2006), False, 'import pygame\n'), ((6218, 6235), 'carlaModule.carla.Transform', 'carla.Transform', ([], {}), '()\n', (6233, 6235), False, 'from carlaModule import carla\n'), ((6511, 6543), 'random.choice', 'random.choice', (['blueprintsWalkers'], {}), '(blueprintsWalkers)\n', (6524, 6543), False, 'import random\n'), ((9299, 9354), 'carlaSyncMode.CarlaSyncMode', 'CarlaSyncMode', (['world', 'camera_rgb', 'camera_semseg'], {'fps': '(10)'}), '(world, camera_rgb, camera_semseg, fps=10)\n', (9312, 9354), False, 'from carlaSyncMode import CarlaSyncMode\n'), ((5605, 5638), 'random.choice', 'random.choice', (['vehicle_blueprints'], {}), '(vehicle_blueprints)\n', (5618, 5638), False, 'import random\n'), ((6569, 6617), 'carlaModule.carla.command.SpawnActor', 'carla.command.SpawnActor', (['walker_bp', 'spawn_point'], {}), '(walker_bp, spawn_point)\n', (6593, 6617), False, 'from carlaModule import carla\n'), ((8516, 8566), 'carlaModule.carla.Location', 'carla.Location', ([], {'x': 'camera_x', 'y': 'camera_y', 'z': 'camera_z'}), '(x=camera_x, y=camera_y, z=camera_z)\n', (8530, 8566), False, 'from carlaModule import carla\n'), ((8568, 8618), 'carlaModule.carla.Rotation', 'carla.Rotation', ([], {'pitch': 'camera_pitch', 'yaw': 'camera_yaw'}), '(pitch=camera_pitch, yaw=camera_yaw)\n', (8582, 8618), False, 'from carlaModule import carla\n'), ((9039, 9089), 'carlaModule.carla.Location', 'carla.Location', ([], {'x': 'camera_x', 'y': 'camera_y', 'z': 'camera_z'}), '(x=camera_x, y=camera_y, z=camera_z)\n', (9053, 9089), False, 'from carlaModule import carla\n'), ((9091, 9141), 'carlaModule.carla.Rotation', 'carla.Rotation', ([], {'pitch': 'camera_pitch', 'yaw': 'camera_yaw'}), '(pitch=camera_pitch, yaw=camera_yaw)\n', (9105, 9141), False, 'from carlaModule import carla\n'), ((9458, 9503), 'carlaModule.carla.WeatherParameters', 'carla.WeatherParameters', ([], {}), '(**weather_parameters)\n', (9481, 9503), False, 'from carlaModule import carla\n'), ((3511, 3572), 'cv2.rectangle', 'cv2.rectangle', (['rgb_img', '(x, y)', '(x + w, y + h)', '(128, 255, 0)'], {}), '(rgb_img, (x, y), (x + w, y + h), (128, 255, 0))\n', (3524, 3572), False, 'import cv2\n'), ((7086, 7103), 'carlaModule.carla.Transform', 'carla.Transform', ([], {}), '()\n', (7101, 7103), False, 'from carlaModule import carla\n'), ((8181, 8196), 'random.random', 'random.random', ([], {}), '()\n', (8194, 8196), False, 'import random\n'), ((12185, 12206), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (12204, 12206), False, 'import pygame\n')] |
#!/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
from glumpy import gl, gloo
from glumpy.app.window import key
from utils import gamegl
from utils import animation
from utils import audio
from utils.midi import MidiMod
vertex = """
attribute vec2 position;
void main (void)
{
gl_Position = vec4(position, 0.0, 1.0);
}
"""
fragment = open("bulb.glsl").read()
p = 8
class MandelBulb(gamegl.Window):
def init_program(self):
self.program = gloo.Program(vertex, fragment, count=4)
self.program['position'] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)]
if self.dorecord:
self.program['max_iter'] = 50
self.program['max_march'] = 200
self.program['fast'] = 1
else:
self.program['max_iter'] = self.params['max_iter']
self.program['max_march'] = self.params['max_march']
self.program['fast'] = 0
# Render
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
def render(self, dt):
if not self.draw:
return False
self.window.clear()
self.program['C'] = [
self.params['cX'], self.params['cY'], self.params['cZ']]
self.program['power'] = self.params['power']
self.program['pitch'] = self.params['pitch']
self.program['yaw'] = self.params['yaw']
self.program['zoom'] = self.params['zoom']
self.program['aO'] = self.params['aO']
self.program['hue'] = self.params['hue']
self.program['minDist'] = self.params['min_dist']
self.program.draw(gl.GL_TRIANGLE_STRIP)
self.draw = False
return True
def on_resize(self, width, height):
self.program["size"] = width, height
self.draw = True
def on_mouse_drag(self, x, y, dx, dy, button):
self.params["pitch"] -= dy / 50
self.params["yaw"] += dx / 50
self.draw = True
print(x, y, dx, dy, button)
def on_mouse_scroll(self, x, y, dx, dy):
self.params["zoom"] += self.params["zoom"] / 10 * dy
self.draw = True
def on_key_press(self, k, modifiers):
super().on_key_press(k, modifiers)
s = 0.1
if k == key.UP:
self.params['cX'] += s
if k == key.DOWN:
self.params['cX'] -= s
if k == key.LEFT:
self.params['cY'] += s
if k == key.RIGHT:
self.params['cY'] -= s
elif k == 69:
self.params["cZ"] += s
elif k == 81:
self.params["cZ"] -= s
elif k == 65:
self.params["power"] += .1
elif k == 68:
self.params["power"] -= .1
self.draw = True
params = {
'cX': -.2,
'cY': -1,
'cZ': 0,
'power': 8,
'pitch': 0,
'yaw': 0,
'zoom': -0.59,
'max_iter': 16,
'max_march': 150,
'min_dist': 0.001,
'aO': 1,
'hue': .6,
"mods": {
"power": {
"type": "float",
"sliders": True,
"min": 0,
"max": 12,
"resolution": 0.1,
},
"min_dist": {
"type": "float",
"sliders": True,
"min": 0,
"max": 0.5,
"resolution": 0.001,
},
"cX": {
"type": "float",
"sliders": True,
"min": -5,
"max": 5,
"resolution": 0.1,
},
"cY": {
"type": "float",
"sliders": True,
"min": -5,
"max": 5,
"resolution": 0.1,
},
"cZ": {
"type": "float",
"sliders": True,
"min": -5,
"max": 5,
"resolution": 0.1,
},
}
}
class Demo(animation.Animation):
def __init__(self):
self.scenes = [
[3300, None],
[2751, self.ending],
[1500, self.sub],
[1250, self.verse2],
[500, self.verse1],
[0, self.intro],
]
super().__init__(params)
def ending(self, frame):
if self.scene_init:
self.m_mod = self.linspace(self.params["power"], 1.)
self.x_mod = self.logspace(self.params["cX"] + 10, -2 + 10)
self.y_mod = self.linspace(self.params["cY"] + 10, 10)
self.z_mod = self.logspace(self.params["cZ"] + 10, 10.5)
self.params["power"] = self.m_mod[self.scene_pos]
self.params["cX"] = self.x_mod[self.scene_pos] - 10
self.params["cY"] = self.y_mod[self.scene_pos] - 10
self.params["cZ"] = self.z_mod[self.scene_pos] - 10
self.params["yaw"] += 0.003
def sub(self, frame):
if self.scene_init:
self.base_x = self.params["cX"]
self.base_y = self.params["cY"]
self.base_z = self.params["cZ"]
self.z_mod = 0
self.x_mod = 0
self.y_mod = 0
self.y_pos = self.logspace(self.params["cY"] + 10, 5.)
self.params["power"] += 6e-2 * self.bass
self.x_mod += .1 * self.perc + .01 * self.low
self.y_mod += .1 * self.bell
self.z_mod += .05 * self.bell
self.params["cX"] = self.base_x + 2.6 * np.sin(self.x_mod)
self.params["cZ"] = self.base_z - 2 * np.abs(np.sin(self.z_mod))
self.params["cY"] = self.y_pos[self.scene_pos] - 10
self.params["hue"] += 1e-3 * self.bell
self.params["yaw"] += 4e-3
self.params["pitch"] += 2e-3
def verse2(self, frame):
if self.scene_init:
self.y_mod = 0
self.base_y = self.params["cY"]
self.z_mod = self.linspace(self.params["zoom"], -.5)
self.params["zoom"] = self.z_mod[self.scene_pos]
self.params["power"] -= 1e-2 * self.bell
self.params["cX"] -= 3e-2 * self.perc
self.params["cY"] += 3e-2 * self.perc
self.params["hue"] += 1e-4 * self.bell
def verse1(self, frame):
if self.scene_init:
self.m_mod = self.logspace(self.params["min_dist"] + 10, 0.01 + 10)
self.z_mod = self.linspace(self.params["cZ"], 1.)
self.params["hue"] -= 1e-4 * self.hgh
self.params["power"] += 1e-2 * self.bell
self.params["cY"] -= 3.8e-3 * self.hgh
self.params["cZ"] = self.z_mod[self.scene_pos]
self.params["min_dist"] = self.m_mod[self.scene_pos] - 10
def intro(self, frame):
if self.scene_init:
self.p_mod = self.linspace(1.9000000000000123, 4)
self.z_mod = self.linspace(-1.47, -1.18)
self.y_mod = self.linspace(-2, -.1)
self.base_x = self.params["cX"]
self.params["power"] = 1.9
self.params["hue"] = .6
self.params["power"] += 5e-3 * self.low
self.params["zoom"] = self.z_mod[self.scene_pos]
self.params["cY"] = self.y_mod[self.scene_pos]
def updateMidi(self, midi_events, frame):
super().updateMidi(midi_events, frame)
if frame < 775:
self.midi_events["hgh"].prev_val = 0
self.hgh = 0
def setMidi(self, midi, midi_skip):
self.midi = midi
self.midi_skip = 0
self.midi_events = {
"hgh": MidiMod("perc high", mod="one-off", decay=20),
"perc": MidiMod("perc low", mod="one-off", decay=5),
"kick": MidiMod("kick", decay=15),
"bass": MidiMod("BF sub", decay=23, mod="one-off"),
"rhode": MidiMod("BF friend"),
"bell": MidiMod("BF buttons", mod="one-off", decay=42),
"flute": MidiMod("BFbendy lead"),
}
def setAudio(self, obj):
self.audio = obj
self.spectre = audio.SpectroGram(obj.audio_frame_size)
self.audio_events = {
"low": audio.AudioMod((0, 195), "mean"),
}
if __name__ == "__main__":
gamegl.run_main(Demo(), MandelBulb)
| [
"utils.audio.SpectroGram",
"glumpy.gl.glEnable",
"utils.audio.AudioMod",
"glumpy.gloo.Program",
"glumpy.gl.glBlendFunc",
"numpy.sin",
"utils.midi.MidiMod"
] | [((995, 1034), 'glumpy.gloo.Program', 'gloo.Program', (['vertex', 'fragment'], {'count': '(4)'}), '(vertex, fragment, count=4)\n', (1007, 1034), False, 'from glumpy import gl, gloo\n'), ((1464, 1488), 'glumpy.gl.glEnable', 'gl.glEnable', (['gl.GL_BLEND'], {}), '(gl.GL_BLEND)\n', (1475, 1488), False, 'from glumpy import gl, gloo\n'), ((1497, 1555), 'glumpy.gl.glBlendFunc', 'gl.glBlendFunc', (['gl.GL_SRC_ALPHA', 'gl.GL_ONE_MINUS_SRC_ALPHA'], {}), '(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n', (1511, 1555), False, 'from glumpy import gl, gloo\n'), ((8266, 8305), 'utils.audio.SpectroGram', 'audio.SpectroGram', (['obj.audio_frame_size'], {}), '(obj.audio_frame_size)\n', (8283, 8305), False, 'from utils import audio\n'), ((7798, 7843), 'utils.midi.MidiMod', 'MidiMod', (['"""perc high"""'], {'mod': '"""one-off"""', 'decay': '(20)'}), "('perc high', mod='one-off', decay=20)\n", (7805, 7843), False, 'from utils.midi import MidiMod\n'), ((7865, 7908), 'utils.midi.MidiMod', 'MidiMod', (['"""perc low"""'], {'mod': '"""one-off"""', 'decay': '(5)'}), "('perc low', mod='one-off', decay=5)\n", (7872, 7908), False, 'from utils.midi import MidiMod\n'), ((7930, 7955), 'utils.midi.MidiMod', 'MidiMod', (['"""kick"""'], {'decay': '(15)'}), "('kick', decay=15)\n", (7937, 7955), False, 'from utils.midi import MidiMod\n'), ((7977, 8019), 'utils.midi.MidiMod', 'MidiMod', (['"""BF sub"""'], {'decay': '(23)', 'mod': '"""one-off"""'}), "('BF sub', decay=23, mod='one-off')\n", (7984, 8019), False, 'from utils.midi import MidiMod\n'), ((8042, 8062), 'utils.midi.MidiMod', 'MidiMod', (['"""BF friend"""'], {}), "('BF friend')\n", (8049, 8062), False, 'from utils.midi import MidiMod\n'), ((8084, 8130), 'utils.midi.MidiMod', 'MidiMod', (['"""BF buttons"""'], {'mod': '"""one-off"""', 'decay': '(42)'}), "('BF buttons', mod='one-off', decay=42)\n", (8091, 8130), False, 'from utils.midi import MidiMod\n'), ((8153, 8176), 'utils.midi.MidiMod', 'MidiMod', (['"""BFbendy lead"""'], {}), "('BFbendy lead')\n", (8160, 8176), False, 'from utils.midi import MidiMod\n'), ((8355, 8387), 'utils.audio.AudioMod', 'audio.AudioMod', (['(0, 195)', '"""mean"""'], {}), "((0, 195), 'mean')\n", (8369, 8387), False, 'from utils import audio\n'), ((5791, 5809), 'numpy.sin', 'np.sin', (['self.x_mod'], {}), '(self.x_mod)\n', (5797, 5809), True, 'import numpy as np\n'), ((5863, 5881), 'numpy.sin', 'np.sin', (['self.z_mod'], {}), '(self.z_mod)\n', (5869, 5881), True, 'import numpy as np\n')] |
import sys
import numpy as np
import pytest
from tenvs.accounts.stock_account import StockAccount
def mock_bars(day_num=10):
closes1 = np.reshape(np.linspace(10.0, 11, day_num + 1), (day_num + 1, 1))
closes2 = np.reshape(np.linspace(10.0, 11, day_num + 1), (day_num + 1, 1))
closes3 = np.ones((day_num + 1, 1))
closes = np.concatenate([closes1, closes2, closes3], axis=1)
bars = {}
for day in range(day_num):
bars[str(day)] = {
'pre_day_close': closes[day, :],
'pre_bar_closes': closes[day, :],
'closes': closes[day + 1, :],
'opens': closes[day, :]}
return bars
class TestStockAccount:
def test_basic(self):
np.random.seed(0)
investment = 1e5
codes = ['000001.SZ', '000001.SZ']
account = StockAccount(investment, codes)
assert account.investment == investment
assert account.pre_day_total_assets == investment
assert account.caps.tolist() == [0, 0, investment]
assert account.total_assets == investment
assert account.bar_pnl == 0.0
assert account.bar_pnls.tolist() == [0.0, 0.0, 0.0]
assert account.day_pnl == 0.0
assert account.day_pnls.tolist() == [0.0, 0.0, 0.0]
assert account.pnl == 0.0
assert account.pnls.tolist() == [0.0, 0.0, 0.0]
assert account.day_return == 0
assert account.day_returns.tolist() == [0.0, 0.0, 0.0]
assert account.value == 1.0
assert account.contributions.tolist() == [0.0, 0.0, 0.0]
assert account.weights.tolist() == [0.0, 0.0, 1.0]
assert account.day_fee == 0
assert account.day_fees.tolist() == [0.0, 0.0, 0.0]
assert account.fee == 0
assert account.bar_cash_changes.tolist() == [0.0, 0.0, 0.0]
assert account.day_cash_changes.tolist() == [0.0, 0.0, 0.0]
assert account.balance == investment
bars = mock_bars(10)
# 各买入500股
day = '0'
volumes = np.array([500, 500, 0])
account.bar_execute(
volumes=volumes, bar=bars[day], bar_id=0, day_log=True, day=day)
assert account.available == 89990.0
assert account.pre_day_total_assets == investment
assert account.caps.tolist() == [5050.0, 5050.0, 89990.0]
assert account.total_assets == 100090.0
assert account.bar_pnl == 90.0
assert account.bar_pnls.tolist() == [45.0, 45.0, 0.0]
assert account.day_pnl == 90.0
assert account.day_pnls.tolist() == [45.0, 45.0, 0.0]
assert account.day_pnl == 90.0
assert account.day_pnls.tolist() == [45.0, 45.0, 0.0]
assert account.day_return == 0.0009
assert account.day_returns.tolist() == [0.00045, 0.00045, 0.0]
assert account.value == 1.0009
assert account.contributions.tolist() == [0.00045, 0.00045, 0.0]
assert account.weights.tolist() == [
0.0504545908682186, 0.0504545908682186, 0.8990908182635627]
assert account.day_fee == 10.0
assert account.day_fees.tolist() == [5.0, 5.0, 0.0]
assert account.fee == 10.0
assert account.bar_cash_changes.tolist(
) == [-5005.0, -5005.0, -10010.0]
assert account.day_cash_changes.tolist(
) == [-5005.0, -5005.0, -10010.0]
assert account.balance == 89990.00
assert account.available == 89990.00
assert account.sellable_volumes.tolist() == [0., 0., 89990.0]
assert account.frozen_volumes.tolist() == [500., 500., 0.]
assert account.volumes.tolist() == [500., 500., 89990.0]
# 再次买入
volumes = np.array([6000, 500, 0])
day = '1'
account.bar_execute(
volumes=volumes, bar=bars[day], bar_id=0, day_log=True, day=day)
assert account.available == 24330.0
assert account.pre_day_total_assets == 100090.0
assert account.caps.tolist() == [66300.0, 10200.0, 24330.0]
assert account.total_assets == 100830.0
assert account.bar_pnl == 740.0
assert account.bar_pnls.tolist() == [645.0, 95.0, 0.0]
assert account.day_pnl == 740.0
assert account.day_pnls.tolist() == [645.0, 95.0, 0.0]
assert account.day_pnl == 740.0
assert account.day_pnls.tolist() == [645.0, 95.0, 0.0]
assert account.day_return == 0.007393345988610251
assert account.day_returns.tolist(
) == [0.006444200219802178, 0.0009491457688080727, 0.0]
assert account.value == 1.0083
assert account.contributions.tolist() == [0.0069, 0.0014, 0.0]
assert account.weights.tolist() == [
0.6575423980958048, 0.10116036893781613, 0.24129723296637906]
assert account.day_fee == 10.0
assert account.day_fees.tolist() == [5.0, 5.0, 0.0]
assert account.fee == 20.0
assert account.bar_cash_changes.tolist(
) == [-60605.0, -5055.0, -65660.0]
assert account.day_cash_changes.tolist(
) == [-60605.0, -5055.0, -65660.0]
assert account.balance == 24330.0
assert account.available == 24330.0
assert account.sellable_volumes.tolist() == [500.0, 500.0, 24330.0]
assert account.frozen_volumes.tolist() == [6000.0, 500.0, 0.0]
assert account.volumes.tolist() == [6500.0, 1000.0, 24330.0]
# 卖出
volumes = np.array([-5000, 500, 0])
day = '2'
account.bar_execute(
volumes=volumes, bar=bars[day], bar_id=0, day_log=True, day=day)
assert account.available == 70129.222
assert account.pre_day_total_assets == 100830.0
assert account.caps.tolist() == [15450.0, 15450.0, 70129.222]
assert account.total_assets == 101029.222
assert account.bar_pnl == 199.222
assert account.bar_pnls.tolist() == [
54.222, 145.0, 0.0]
assert account.day_pnl == 199.222
assert account.day_pnls.tolist() == [
54.222, 145.0, 0.0]
assert account.day_pnl == 199.222
assert account.day_pnls.tolist() == [
54.222, 145.0, 0.0]
assert account.day_return == 0.0019758206882872164
assert account.day_returns.tolist(
) == [0.0005377566200535555, 0.0014380640682336607, 0.0]
assert account.value == 1.01029222
assert account.contributions.tolist() == [0.00744222, 0.00285, 0.0]
assert account.weights.tolist() == [
0.1529260514349007, 0.1529260514349007, 0.6941478971301986]
assert account.day_fee == 100.778
assert account.day_fees.tolist() == [95.778, 5.0, 0.0]
assert account.fee == 120.778
assert account.bar_cash_changes.tolist(
) == [50904.222, -5105.0, 45799.222]
assert account.day_cash_changes.tolist(
) == [50904.222, -5105.0, 45799.222]
assert account.balance == 70129.222
assert account.available == 70129.222
assert account.sellable_volumes.tolist(
) == [1500.0, 1000.0, 24330.0]
assert account.frozen_volumes.tolist() == [0.0, 500.0, 45799.222]
assert account.volumes.tolist() == [1500.0, 1500.0, 70129.222]
if __name__ == "__main__":
sys.exit(pytest.main([__file__]))
| [
"numpy.random.seed",
"tenvs.accounts.stock_account.StockAccount",
"numpy.ones",
"pytest.main",
"numpy.array",
"numpy.linspace",
"numpy.concatenate"
] | [((300, 325), 'numpy.ones', 'np.ones', (['(day_num + 1, 1)'], {}), '((day_num + 1, 1))\n', (307, 325), True, 'import numpy as np\n'), ((339, 390), 'numpy.concatenate', 'np.concatenate', (['[closes1, closes2, closes3]'], {'axis': '(1)'}), '([closes1, closes2, closes3], axis=1)\n', (353, 390), True, 'import numpy as np\n'), ((153, 187), 'numpy.linspace', 'np.linspace', (['(10.0)', '(11)', '(day_num + 1)'], {}), '(10.0, 11, day_num + 1)\n', (164, 187), True, 'import numpy as np\n'), ((232, 266), 'numpy.linspace', 'np.linspace', (['(10.0)', '(11)', '(day_num + 1)'], {}), '(10.0, 11, day_num + 1)\n', (243, 266), True, 'import numpy as np\n'), ((710, 727), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (724, 727), True, 'import numpy as np\n'), ((814, 845), 'tenvs.accounts.stock_account.StockAccount', 'StockAccount', (['investment', 'codes'], {}), '(investment, codes)\n', (826, 845), False, 'from tenvs.accounts.stock_account import StockAccount\n'), ((2001, 2024), 'numpy.array', 'np.array', (['[500, 500, 0]'], {}), '([500, 500, 0])\n', (2009, 2024), True, 'import numpy as np\n'), ((3631, 3655), 'numpy.array', 'np.array', (['[6000, 500, 0]'], {}), '([6000, 500, 0])\n', (3639, 3655), True, 'import numpy as np\n'), ((5348, 5373), 'numpy.array', 'np.array', (['[-5000, 500, 0]'], {}), '([-5000, 500, 0])\n', (5356, 5373), True, 'import numpy as np\n'), ((7180, 7203), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (7191, 7203), False, 'import pytest\n')] |
r'''
The 1D advection-diffusion problem
----------------------------------
The equation to be solved is
.. math::
\frac{\partial}{\partial t} \psi(x,t) &= -\frac{1}{w(x)} \frac{\partial}{\partial x} \left[ w(x) ~ \mathcal{F}(x,t) \right] + \dot{\psi}\\
\mathcal{F} &= U(x) \psi(x) -K(x) ~ \frac{\partial \psi}{\partial x} + F(x)
for the following quantities:
- state variable :math:`\psi(x,t)`
- diffusivity :math:`K(x)` in units of :math:`x^2 ~ t^{-1}`
- advecting velocity :math:`U(x)` in units of :math:`x ~ t^{-1}`
- a prescribed flux :math:`F(x)` (including boundary conditions) in units of :math:`\psi ~ x ~ t^{-1}`
- a scalar source/sink :math:`\dot{\psi}(x)` in units of :math:`\psi ~ t^{-1}`
- weighting function :math:`w(x)` for the divergence operator on curvilinear grids.
The boundary condition is a flux condition at the end points:
.. math::
\begin{align} \label{eq:fluxcondition}
\mathcal{F}(x_0) &= F(x_0) & \mathcal{F}(x_J) &= F(x_J)
\end{align}
which requires that the advecting velocity :math:`u(x) = 0` at the end points :math:`x_0, x_J`
The solver is implemented on a 1D staggered grid, with J+1 flux points
and J scalar points located somewhere between the flux points.
The solver does **not** assume the gridpoints are evenly spaced in :math:`x`.
Routines are provided to compute the following:
- Advective, diffusive, and total fluxes (the terms of :math:`\mathcal{F}`)
- Tridiagonal matrix operator for the flux convergence
- The actual flux convergence, or instantaneous scalar tendency given a current value of :math:`\psi(x)`
- Future value of :math:`\psi(x)` for an implicit timestep
Some details of the solver formulas are laid out below for reference.
Spatial discretization
----------------------
We use a non-uniform staggered spatial grid with scalar :math:`\psi` evaluated at :math:`J` points,
and flux :math:`\mathcal{F}` evaluated at :math:`J+1` flux points.
The indexing will run from :math:`j=0` to :math:`j=J` for the flux points,
and :math:`i=0` to :math:`i=J-1` for the scalar points.
This notation is consistent with zero-indexed Python arrays.
We define the following arrays:
- :math:`\mathcal{X}_b[j]` is a length J+1 array defining the location of the flux points.
- :math:`\mathcal{X}[i]` is a length J array defining the location of the scalar points, where point :math:`\mathcal{X}[j]` is somewhere between :math:`\mathcal{X}_b[j]` and :math:`\mathcal{X}_b[j+1]` for all :math:`j<J`.
- :math:`\psi[i], \dot{\psi}[i]` are length J arrays defined on :math:`\mathcal{X}`.
- :math:`U[j], K[j], F[j]` are all arrays of length J+1 defined on :math:`\mathcal{X}_b`.
- The grid weights are similarly in arrays :math:`W_b[j], W[i]` respectively on :math:`\mathcal{X}_b`` and :math:`\mathcal{X}`.
Centered difference formulas for the flux
-----------------------------------------
We use centered differences in :math:`x` to discretize the spatial derivatives.
The diffusive component of the flux is thus
.. math::
\begin{align*}
\mathcal{F}_{diff}[j] &= - K[j] \frac{ \left( \psi[i] - \psi[i-1] \right) }{\left( \mathcal{X}[i] - \mathcal{X}[i-1] \right)} & j&=i=1,2,...,J-1
\end{align*}
The diffusive flux is assumed to be zero at the boundaries.
The advective term requires an additional approximation since the scalar :math:`\psi` is not defined at the flux points.
We use a linear interpolation to the flux points:
.. math::
\begin{align*}
\psi_b[j] &\equiv \psi[i-1] \left( \frac{\mathcal{X}[i] - \mathcal{X}_b[j]}{\mathcal{X}[i] - \mathcal{X}[i-1]} \right) + \psi[i] \left( \frac{ \mathcal{X}_b[j] - \mathcal{X}[i-1] }{\mathcal{X}[i] - \mathcal{X}[i-1]} \right) & j&=i=1,2,...,J-1
\end{align*}
Note that for an evenly spaced grid, this reduces to the simple average :math:`\frac{1}{2} \left( \psi[i-1] + \psi[i] \right)`.
With this interpolation, the advective flux is approximated by
.. math::
\begin{align*}
\mathcal{F}_{adv}[j] &= \frac{U[j] }{\mathcal{X}[i] - \mathcal{X}[i-1]} \left( \psi[i-1] (\mathcal{X}[i] - \mathcal{X}_b[j]) + \psi[i] (\mathcal{X}_b[j] - \mathcal{X}[i-1]) \right) & j&=i=1,2,...,J-1
\end{align*}
The total flux away from the boundaries (after some recombining terms) is thus:
.. math::
\mathcal{F}[j] = F[j] + \psi[i-1] \left( \frac{K[j] + U[j] (\mathcal{X}[i] - \mathcal{X}_b[j]) }{ \mathcal{X}[i] - \mathcal{X}[i-1] } \right) - \psi[i] \left( \frac{K[j] - U[j] (\mathcal{X}_b[j] - \mathcal{X}[i-1]) }{\mathcal{X}[i] - \mathcal{X}[i-1] } \right)
which is valid for j=i=1,2,...,J-1.
Centered difference formulas for the flux convergence
-----------------------------------------------------
Centered difference approximation of the flux convergence gives
.. math::
\begin{align*}
\frac{\partial }{\partial t} \psi[i] &= -\frac{ W_b[j+1] \mathcal{F}[j+1] - W_b[j] \mathcal{F}[j] }{W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )} + \dot{\psi}[i] & i&=j=0,1,...,J-1
\end{align*}
The flux convergences are best expressed together in matrix form:
.. math::
\begin{equation}
\frac{\partial \boldsymbol{\psi}}{\partial t} = \boldsymbol{T} ~ \boldsymbol{\psi} + \boldsymbol{S}
\end{equation}
where :math:`\boldsymbol{\psi}` is the :math:`J\times1` column vector,
:math:`\boldsymbol{S}` is a :math:`J\times1` column vector
representing the prescribed flux convergence and source terms, whose elements are
.. math::
\begin{align}
S[i] &= \frac{-W_b[j+1] F[j+1] + W_b[j] F[j]}{W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )} + \dot{\psi}[i] & i&=j=0,1,...,J-1
\end{align}
and :math:`\boldsymbol{T}` is a :math:`J\times J` tridiagonal matrix:
.. math::
\begin{equation}
\boldsymbol{T} ~ \boldsymbol{\psi} = \left[\begin{array}{ccccccc} T_{m0} & T_{u1} & 0 & ... & 0 & 0 & 0 \\T_{l0} & T_{m1} & T_{u2} & ... & 0 & 0 & 0 \\ 0 & T_{l1} & T_{m2} & ... & 0 & 0 & 0 \\... & ... & ... & ... & ... & ... & ... \\0 & 0 & 0 & ... & T_{m(J-3)} & T_{u(J-2)} & 0 \\0 & 0 & 0 & ... & T_{l(J-3)} & T_{m(J-2)} & T_{u(J-1)} \\0 & 0 & 0 & ... & 0 & T_{l(J-2)} & T_{m(J-1)}\end{array}\right] \left[\begin{array}{c} \psi_0 \\ \psi_1 \\ \psi_2 \\... \\ \psi_{J-3} \\ \psi_{J-2} \\ \psi_{J-1} \end{array}\right]
\end{equation}
with vectors :math:`T_l, T_m, T_u` representing respectively the lower, main, and upper diagonals of :math:`\boldsymbol{T}`.
We will treat all three vectors as length J;
the 0th element of :math:`T_u` is ignored while the (J-1)th element of :math:`T_l` is ignored
(this is consistent with the expected inputs for the Python module scipy.linalg.solve_banded).
The instantanous tendency is then easily computed by matrix multiplication.
The elements of the main diagonal of :math:`\boldsymbol{\psi}` can be computed from
.. math::
\begin{align} \label{eq:maindiag}
\begin{split}
T_m[i] &= -\left( \frac{ W_b[j+1] \big( K[j+1] + U[j+1] (\mathcal{X}[i+1] - \mathcal{X}_b[j+1]) \big) }{ W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )(\mathcal{X}[i+1] - \mathcal{X}[i]) } \right) \\
& \qquad - \left( \frac{W_b[j] \big( K[j] - U[j] (\mathcal{X}_b[j] - \mathcal{X}[i-1]) \big) }{W[i] ( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )(\mathcal{X}[i] - \mathcal{X}[i-1]) } \right) \\
i &=j=0,2,...,J-1
\end{split}
\end{align}
which is valid at the boundaries so long as we set :math:`W_b[0] = W_b[J] = 0`.
The lower diagonal (including the right boundary condition) is computed from
.. math::
\begin{align} \label{eq:lowerdiag}
\begin{split}
T_l[i-1] &= \left( \frac{W_b[j]}{W[i] } \right) \left( \frac{ K[j] + U[j] (\mathcal{X}[i] - \mathcal{X}_b[j]) }{( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] ) (\mathcal{X}[i] - \mathcal{X}[i-1] )} \right) \\
i &=j =1,2,...,J-2, J-1
\end{split}
\end{align}
Finally the upper diagonal (including the left boundary condition) is computed from
.. math::
\begin{align} \label{eq:upperdiag}
\begin{split}
T_u[i+1] &= \left( \frac{W_b[j+1]}{W[i]} \right) \left( \frac{K[j+1] - U[j+1] (\mathcal{X}_b[j+1] - \mathcal{X}[i]) }{( \mathcal{X}_b[j+1] - \mathcal{X}_b[j] )(\mathcal{X}[i+1] - \mathcal{X}[i] ) } \right) \\
i &= j=0,...,J-2
\end{split}
\end{align}
Implicit time discretization
----------------------------
The forward-time finite difference approximation to LHS of the flux-convergence equation is simply
.. math::
\begin{equation}
\frac{\partial \psi[i]}{\partial t} \approx \frac{\psi^{n+1}[i]- \psi^{n}[i]}{\Delta t}
\end{equation}
where the superscript :math:`n` indicates the time index.
We use the implicit-time method, in which the RHS is evaluated at the future time :math:`n+1`.
Applying this to the matrix equation above
and moving all the terms at time :math:`n+1` over to the LHS yields
.. math::
\begin{equation} \label{eq:implicit_tridiagonal}
\left( \boldsymbol{I} - \boldsymbol{T} \Delta t \right) \boldsymbol{\psi}^{n+1} = \boldsymbol{\psi}^{n} + \boldsymbol{S} \Delta t
\end{equation}
where :math:`\boldsymbol{I}` is the :math:`J\times J` identity matrix.
Solving for the future value :math:`\boldsymbol{\psi}^{n+1}` is then accomplished
by solving the :math:`J \times J` tridiagonal linear system using standard routines.
Analytical benchmark
--------------------
Here is an analytical case to be used for testing purposes to validate the numerical code.
This is implemented in the CLIMLAB test suite.
- :math:`K=K_0` is constant
- :math:`w(x) = 1` everywhere (Cartesian coordinates)
- :math:`F = 0` everywhere
- :math:`\psi(x,0) = \psi_0 \sin^2\left(\frac{\pi x}{L}\right)`
- :math:`u(x) = U_0 \sin\left(\frac{\pi x}{L}\right)`
for a domain with endpoints at :math:`x=0` and :math:`x=L`.
The analytical solution is
.. math::
\begin{align}
\mathcal{F} &= \psi_0 \sin\left(\frac{\pi x}{L}\right) \left[U_0 \sin^2\left(\frac{\pi x}{L}\right) - 2K \frac{\pi}{L} \cos\left(\frac{\pi x}{L}\right) \right] \\
\frac{\partial \psi}{\partial t} &= -\psi_0 \frac{\pi}{L} \left\{ 3 U_0 \sin^2\left(\frac{\pi x}{L}\right) \cos\left(\frac{\pi x}{L}\right) -2K\frac{\pi}{L} \left[\cos^2\left(\frac{\pi x}{L}\right) -\sin^2\left(\frac{\pi x}{L}\right) \right] \right\}
\end{align}
which satisfies the boundary condition :math:`\mathcal{F} = 0` at :math:`x=0` and :math:`x=L`.
Module function reference
-------------------------
All the functions in ``climlab.dynamics.adv_diff_numerics`` are vectorized
to handle multidimensional input. The key assumption is that
**advection-diffusion operates along the final dimension**.
Inputs should be reshaped appropriately (e.g. with ``numpy.moveaxis()``)
before calling these functions.
'''
from __future__ import division
from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis
from numpy.linalg import solve
from scipy.linalg import solve_banded
def diffusive_flux(X, Xb, K, field):
'''Return the diffusive flux on cell boundaries (length J+1)'''
flux = zeros_like(K)
flux[...,1:-1] += field[...,:-1]*K[...,1:-1]/diff(X,axis=-1)
flux[...,1:-1] -= field[...,1:]*K[...,1:-1]/diff(X,axis=-1)
return flux
def advective_flux(X, Xb, U, field):
'''Return the advective flux on cell boundaries (length J+1)'''
flux = zeros_like(U)
flux[...,1:-1] += field[...,:-1]*(U[...,1:-1]*(X[...,1:]-Xb[...,1:-1]))/diff(X,axis=-1)
flux[...,1:-1] -= field[...,1:]*(-U[...,1:-1]*(Xb[...,1:-1]-X[...,:-1]))/diff(X,axis=-1)
return flux
def total_flux(X, Xb, K, U, field, prescribed_flux=None):
'''Return the total (advective + diffusive + prescribed) flux
on cell boundaries (length J+1)'''
if prescribed_flux is None:
prescribed_flux = zeros_like(U)
return advective_flux(X, Xb, U, field) + diffusive_flux(X, Xb, K, field) + prescribed_flux
def advdiff_tridiag(X, Xb, K, U, W=None, Wb=None, use_banded_solver=False):
r'''Compute the tridiagonal matrix operator for the advective-diffusive
flux convergence.
Input arrays of length J+1:
Xb, Wb, K, U
Input arrays of length J:
X, W
The 0th and Jth (i.e. first and last) elements of Wb are ignored;
assuming boundary condition is a prescribed flux.
The return value depends on input flag ``use_banded_solver``
If ``use_banded_solver==True``, return a 3xJ array containing the elements of the tridiagonal.
This version is restricted to 1D input arrays,
but is suitable for use with the efficient banded solver.
If ``use_banded_solver=False`` (which it must be for multidimensional input),
return an array (...,J,J) with the full tridiagonal matrix.
'''
J = X.shape[-1]
if (W is None):
W = ones_like(X)
if (Wb is None):
Wb = ones_like(Xb)
# These are all length (J-1) in the last axis
lower_diagonal = (Wb[...,1:-1]/W[...,1:] *
(K[...,1:-1]+U[...,1:-1]*(X[...,1:]-Xb[...,1:-1])) /
((Xb[...,2:]-Xb[...,1:-1])*(X[...,1:]-X[...,:-1])))
upper_diagonal = (Wb[...,1:-1]/W[...,:-1] *
(K[...,1:-1]-U[...,1:-1]*(Xb[...,1:-1]-X[...,:-1])) /
((Xb[...,1:-1]-Xb[...,:-2])*(X[...,1:]-X[...,:-1])))
main_diagonal_term1 = (-Wb[...,1:-1]/W[...,:-1] *
(K[...,1:-1]+U[...,1:-1]*(X[...,1:]-Xb[...,1:-1])) /
((Xb[...,1:-1]-Xb[...,:-2])*(X[...,1:]-X[...,:-1])))
main_diagonal_term2 = (-Wb[...,1:-1]/W[...,1:] *
(K[...,1:-1]-U[...,1:-1]*(Xb[...,1:-1]-X[...,:-1])) /
((Xb[...,2:]-Xb[...,1:-1])*(X[...,1:]-X[...,:-1])))
if use_banded_solver:
# Pack the diagonals into a 3xJ array
tridiag_banded = zeros((3,J))
# Lower diagonal (last element ignored)
tridiag_banded[2,:-1] = lower_diagonal
# Upper diagonal (first element ignored)
tridiag_banded[0,1:] = upper_diagonal
# Main diagonal, term 1, length J-1
tridiag_banded[1,:-1] += main_diagonal_term1
# Main diagonal, term 2, length J-1
tridiag_banded[1, 1:] += main_diagonal_term2
return tridiag_banded
else:
# If X.size is (...,J), then the tridiagonal operator is (...,J,J)
sizeJJ = tuple([n for n in X.shape[:-1]] + [J,J])
tridiag = zeros(sizeJJ)
# indices for main, upper, and lower diagonals of a JxJ matrix
inds_main = diag_indices(J)
inds_upper = (inds_main[0][:-1], inds_main[1][1:])
inds_lower = (inds_main[0][1:], inds_main[1][:-1])
# Lower diagonal (length J-1)
tridiag[...,inds_lower[0],inds_lower[1]] = lower_diagonal
# Upper diagonal (length J-1)
tridiag[...,inds_upper[0],inds_upper[1]] = upper_diagonal
# Main diagonal, term 1, length J-1
tridiag[...,inds_main[0][:-1],inds_main[1][:-1]] += main_diagonal_term1
# Main diagonal, term 2, length J-1
tridiag[...,inds_main[0][1:],inds_main[1][1:]] += main_diagonal_term2
return tridiag
def make_the_actual_tridiagonal_matrix(tridiag_banded):
'''Convert a (3xJ) banded array into full (JxJ) tridiagonal matrix form.'''
return (diag(tridiag_banded[1,:], k=0) +
diag(tridiag_banded[0,1:], k=1) +
diag(tridiag_banded[2,:-1], k=-1))
def compute_source(X, Xb, prescribed_flux=None, prescribed_source=None,
W=None, Wb=None):
'''Return the source array S consisting of the convergence of the prescribed flux
plus the prescribed scalar source.'''
if (W is None):
W = ones_like(X)
if (Wb is None):
Wb = ones_like(Xb)
if prescribed_flux is None:
prescribed_flux = zeros_like(Xb)
if prescribed_source is None:
prescribed_source = zeros_like(X)
F = prescribed_flux
return ((-Wb[...,1:]*F[...,1:]+Wb[...,:-1]*F[...,:-1]) /
(W*(Xb[...,1:]-Xb[...,:-1])) + prescribed_source)
def compute_tendency(field, tridiag, source, use_banded_solver=False):
r'''Return the instantaneous scalar tendency.
This is the sum of the convergence of advective+diffusive flux plus any
prescribed convergence or scalar sources.
The convergence is computed by matrix multiplication:
.. math::
\frac{\partial \psi}{\partial t} = T \times \psi + S
where :math:`T` is the tridiagonal flux convergence matrix.
'''
if use_banded_solver:
tridiag = make_the_actual_tridiagonal_matrix(tridiag)
# np.matmul expects the final 2 dims of each array to be matrices
# add a singleton dimension to field so we get (J,J)x(J,1)->(J,1)
result = matmul(tridiag, field[...,newaxis]) + source[...,newaxis]
# Now strip the extra dim
return result[...,0]
def implicit_step_forward(initial_field, tridiag, source, timestep,
use_banded_solver=False):
r'''Return the field at future time using an implicit timestep.
The matrix problem is
.. math::
(I - T \Delta t) \psi^{n+1} = \psi^n + S \Delta t
where :math:`T` is the tridiagonal matrix for the flux convergence, :math:`psi` is the
state variable, the superscript :math:`n` refers to the time index, and :math:`S \Delta t`
is the accumulated source over the timestep :math:`\Delta t`.
Input arguments:
- ``initial_field``: the current state variable :math:`\psi^n`, dimensions (...,J)
- ``tridiag``: the tridiagonal matrix :math:`T`, dimensions (...,J,J) or (...,3,J) depending on the value of ``use_banded_solver``
- ``source``: prescribed sources/sinks of :math:`\psi`, dimensions (...,J)
- ``timestep``: the discrete timestep in time units
- ``use_banded_solver``: switch to use the optional efficient banded solver (see below)
Returns the updated value of the state variable :math:`\psi^{n+1}`, dimensions (...,J)
The expected shape of ``tridiag`` depends on the switch ``use_banded_solver``,
which should be consistent with that used in the call to ``advdiff_tridiag()``.
If ``True``, we use the efficient banded matrix solver
``scipy.linalg.solve_banded()``.
However this will probably only work for a 1D state variable.
The default is to use the general linear system solver ``numpy.linalg.solve()``.
'''
RHS = initial_field + source*timestep
I = 0.*tridiag
J = initial_field.shape[-1]
if use_banded_solver:
I[1,:] = 1. # identity matrix in banded form
IminusTdt = I-tridiag*timestep
return solve_banded((1, 1), IminusTdt, RHS)
else:
# indices for main, upper, and lower diagonals of a JxJ matrix
inds_main = diag_indices(J)
I = 0.*tridiag
I[...,inds_main[0],inds_main[1]] = 1. # stacked identity matrix
IminusTdt = I-tridiag*timestep
return solve(IminusTdt, RHS)
| [
"numpy.zeros_like",
"numpy.ones_like",
"numpy.zeros",
"numpy.diag_indices",
"scipy.linalg.solve_banded",
"numpy.diff",
"numpy.matmul",
"numpy.diag",
"numpy.linalg.solve"
] | [((10995, 11008), 'numpy.zeros_like', 'zeros_like', (['K'], {}), '(K)\n', (11005, 11008), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((11271, 11284), 'numpy.zeros_like', 'zeros_like', (['U'], {}), '(U)\n', (11281, 11284), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((11058, 11074), 'numpy.diff', 'diff', (['X'], {'axis': '(-1)'}), '(X, axis=-1)\n', (11062, 11074), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((11122, 11138), 'numpy.diff', 'diff', (['X'], {'axis': '(-1)'}), '(X, axis=-1)\n', (11126, 11138), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((11361, 11377), 'numpy.diff', 'diff', (['X'], {'axis': '(-1)'}), '(X, axis=-1)\n', (11365, 11377), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((11454, 11470), 'numpy.diff', 'diff', (['X'], {'axis': '(-1)'}), '(X, axis=-1)\n', (11458, 11470), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((11711, 11724), 'numpy.zeros_like', 'zeros_like', (['U'], {}), '(U)\n', (11721, 11724), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((12738, 12750), 'numpy.ones_like', 'ones_like', (['X'], {}), '(X)\n', (12747, 12750), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((12785, 12798), 'numpy.ones_like', 'ones_like', (['Xb'], {}), '(Xb)\n', (12794, 12798), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((13637, 13650), 'numpy.zeros', 'zeros', (['(3, J)'], {}), '((3, J))\n', (13642, 13650), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((14226, 14239), 'numpy.zeros', 'zeros', (['sizeJJ'], {}), '(sizeJJ)\n', (14231, 14239), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((14332, 14347), 'numpy.diag_indices', 'diag_indices', (['J'], {}), '(J)\n', (14344, 14347), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((15183, 15217), 'numpy.diag', 'diag', (['tridiag_banded[2, :-1]'], {'k': '(-1)'}), '(tridiag_banded[2, :-1], k=-1)\n', (15187, 15217), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((15488, 15500), 'numpy.ones_like', 'ones_like', (['X'], {}), '(X)\n', (15497, 15500), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((15535, 15548), 'numpy.ones_like', 'ones_like', (['Xb'], {}), '(Xb)\n', (15544, 15548), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((15607, 15621), 'numpy.zeros_like', 'zeros_like', (['Xb'], {}), '(Xb)\n', (15617, 15621), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((15684, 15697), 'numpy.zeros_like', 'zeros_like', (['X'], {}), '(X)\n', (15694, 15697), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((16542, 16578), 'numpy.matmul', 'matmul', (['tridiag', 'field[..., newaxis]'], {}), '(tridiag, field[..., newaxis])\n', (16548, 16578), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((18414, 18450), 'scipy.linalg.solve_banded', 'solve_banded', (['(1, 1)', 'IminusTdt', 'RHS'], {}), '((1, 1), IminusTdt, RHS)\n', (18426, 18450), False, 'from scipy.linalg import solve_banded\n'), ((18553, 18568), 'numpy.diag_indices', 'diag_indices', (['J'], {}), '(J)\n', (18565, 18568), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((18719, 18740), 'numpy.linalg.solve', 'solve', (['IminusTdt', 'RHS'], {}), '(IminusTdt, RHS)\n', (18724, 18740), False, 'from numpy.linalg import solve\n'), ((15092, 15123), 'numpy.diag', 'diag', (['tridiag_banded[1, :]'], {'k': '(0)'}), '(tridiag_banded[1, :], k=0)\n', (15096, 15123), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n'), ((15137, 15169), 'numpy.diag', 'diag', (['tridiag_banded[0, 1:]'], {'k': '(1)'}), '(tridiag_banded[0, 1:], k=1)\n', (15141, 15169), False, 'from numpy import zeros, ones, zeros_like, ones_like, matmul, diag, diag_indices, diff, newaxis\n')] |
from ._accumulate_data import AccumulateData
from ..util import MaxSamplesWarning
from numpy import array, nan
import warnings
import numpy as np
class LDTransformBayesData(AccumulateData):
"""
Update and store transformation data based on low-discrepancy sequences.
See the stopping criterion that utilize this object for references.
"""
parameters = ['n_total', 'solution', 'error_bound']
def __init__(self, stopping_crit, integrand, true_measure, discrete_distrib, m_min: int, m_max: int, fbt, merge_fbt):
"""
Args:
stopping_crit (StoppingCriterion): a StoppingCriterion instance
integrand (Integrand): an Integrand instance
true_measure (TrueMeasure): A TrueMeasure instance
discrete_distrib (DiscreteDistribution): a DiscreteDistribution instance
m_min (int): initial n == 2^m_min
m_max (int): max n == 2^m_max
"""
self.stopping_crit = stopping_crit
self.integrand = integrand
self.true_measure = true_measure
self.discrete_distrib = discrete_distrib
self.distribution_name = type(self.discrete_distrib).__name__
# Set Attributes
self.m_min = m_min
self.m_max = m_max
self.debugEnable = True
self.n_total = 0 # total number of samples generated
self.solution = nan
self.iter = 0
self.m = self.m_min
self.mvec = np.arange(self.m_min, self.m_max + 1, dtype=int)
# Initialize various temporary storage between iterations
self.xpts_ = array([]) # shifted lattice points
self.xun_ = array([]) # un-shifted lattice points
self.ftilde_ = array([]) # fourier transformed integrand values
if self.distribution_name == 'Lattice':
# integrand after the periodization transform
self.ff = lambda x,*args,**kwargs: self.integrand.f_periodized(x,stopping_crit.ptransform,*args,**kwargs)
else:
self.ff = self.integrand.f
self.fbt = fbt
self.merge_fbt = merge_fbt
super(LDTransformBayesData, self).__init__()
def update_data(self):
""" See abstract method. """
# Generate sample values
if self.iter < len(self.mvec):
self.ftilde_, self.xun_, self.xpts_ = self.iter_fbt(self.iter, self.xun_, self.xpts_, self.ftilde_)
self.m = self.mvec[self.iter]
self.iter += 1
# update total samples
self.n_total = 2 ** self.m # updated the total evaluations
else:
warnings.warn(f'Already used maximum allowed sample size {2 ** self.m_max}.'
f' Note that error tolerances may no longer be satisfied',
MaxSamplesWarning)
return self.xun_, self.ftilde_, self.m
# Efficient Fast Bayesian Transform computation algorithm, avoids recomputing the full transform
def iter_fbt(self, iter, xun, xpts, ftilde_prev):
m = self.mvec[iter]
n = 2 ** m
# In every iteration except the first one, "n" number_of_points is doubled,
# but FBT is only computed for the newly added points.
# Previously computed FFT is reused.
if iter == 0:
# In the first iteration compute full FBT
# xun_ = mod(bsxfun( @ times, (0:1 / n:1-1 / n)',self.gen_vec),1)
# xun_ = np.arange(0, 1, 1 / n).reshape((n, 1))
# xun_ = np.mod((xun_ * self.gen_vec), 1)
# xpts_ = np.mod(bsxfun( @ plus, xun_, shift), 1) # shifted
xpts_,xun_ = self.gen_samples(n_min=0, n_max=n, return_unrandomized=True, distribution=self.discrete_distrib)
# Compute initial FBT
ftilde_ = self.fbt(self.ff(xpts_))
ftilde_ = ftilde_.reshape((n, 1))
else:
# xunnew = np.mod(bsxfun( @ times, (1/n : 2/n : 1-1/n)',self.gen_vec),1)
# xunnew = np.arange(1 / n, 1, 2 / n).reshape((n // 2, 1))
# xunnew = np.mod(xunnew * self.gen_vec, 1)
# xnew = np.mod(bsxfun( @ plus, xunnew, shift), 1)
xnew, xunnew = self.gen_samples(n_min=n // 2, n_max=n, return_unrandomized=True, distribution=self.discrete_distrib)
[xun_, xpts_] = self.merge_pts(xun, xunnew, xpts, xnew, n, self.discrete_distrib.d, distribution=self.distribution_name)
mnext = m - 1
ftilde_next_new = self.fbt(self.ff(xnew))
ftilde_next_new = ftilde_next_new.reshape((n // 2, 1))
if self.debugEnable:
self.alert_msg(ftilde_next_new, 'Nan', 'Inf')
# combine the previous batch and new batch to get FBT on all points
ftilde_ = self.merge_fbt(ftilde_prev, ftilde_next_new, mnext)
return ftilde_, xun_, xpts_
@staticmethod
def gen_samples(n_min, n_max, return_unrandomized, distribution):
warn = False if n_min == 0 else True
if type(distribution).__name__ == 'Lattice':
xpts_, xun_ = distribution.gen_samples(n_min=n_min, n_max=n_max, warn=warn, return_unrandomized=return_unrandomized)
else:
xpts_, xun_ = distribution.gen_samples(n_min=n_min, n_max=n_max, warn=warn, return_jlms=return_unrandomized)
return xpts_, xun_
# inserts newly generated points with the old set by interleaving them
# xun - unshifted points
@staticmethod
def merge_pts(xun, xunnew, x, xnew, n, d, distribution):
if distribution == 'Lattice':
temp = np.zeros((n, d))
temp[0::2, :] = xun
temp[1::2, :] = xunnew
xun = temp
temp = np.zeros((n, d))
temp[0::2, :] = x
temp[1::2, :] = xnew
x = temp
else:
x = np.vstack([x, xnew])
xun = np.vstack([xun, xunnew])
return xun, x
# prints debug message if the given variable is Inf, Nan or complex, etc
# Example: alertMsg(x, 'Inf', 'Imag')
# prints if variable 'x' is either Infinite or Imaginary
@staticmethod
def alert_msg(*args):
varargin = args
nargin = len(varargin)
if nargin > 1:
i_start = 0
var_tocheck = varargin[i_start]
i_start = i_start + 1
inpvarname = 'variable'
while i_start < nargin:
var_type = varargin[i_start]
i_start = i_start + 1
if var_type == 'Nan':
if np.any(np.isnan(var_tocheck)):
print(f'{inpvarname} has NaN values')
elif var_type == 'Inf':
if np.any(np.isinf(var_tocheck)):
print(f'{inpvarname} has Inf values')
elif var_type == 'Imag':
if not np.all(np.isreal(var_tocheck)):
print(f'{inpvarname} has complex values')
else:
print('unknown type check requested !')
| [
"numpy.isreal",
"numpy.zeros",
"numpy.isinf",
"numpy.isnan",
"numpy.array",
"numpy.arange",
"warnings.warn",
"numpy.vstack"
] | [((1456, 1504), 'numpy.arange', 'np.arange', (['self.m_min', '(self.m_max + 1)'], {'dtype': 'int'}), '(self.m_min, self.m_max + 1, dtype=int)\n', (1465, 1504), True, 'import numpy as np\n'), ((1593, 1602), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1598, 1602), False, 'from numpy import array, nan\n'), ((1649, 1658), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1654, 1658), False, 'from numpy import array, nan\n'), ((1711, 1720), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1716, 1720), False, 'from numpy import array, nan\n'), ((2603, 2763), 'warnings.warn', 'warnings.warn', (['f"""Already used maximum allowed sample size {2 ** self.m_max}. Note that error tolerances may no longer be satisfied"""', 'MaxSamplesWarning'], {}), "(\n f'Already used maximum allowed sample size {2 ** self.m_max}. Note that error tolerances may no longer be satisfied'\n , MaxSamplesWarning)\n", (2616, 2763), False, 'import warnings\n'), ((5552, 5568), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (5560, 5568), True, 'import numpy as np\n'), ((5678, 5694), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (5686, 5694), True, 'import numpy as np\n'), ((5809, 5829), 'numpy.vstack', 'np.vstack', (['[x, xnew]'], {}), '([x, xnew])\n', (5818, 5829), True, 'import numpy as np\n'), ((5848, 5872), 'numpy.vstack', 'np.vstack', (['[xun, xunnew]'], {}), '([xun, xunnew])\n', (5857, 5872), True, 'import numpy as np\n'), ((6534, 6555), 'numpy.isnan', 'np.isnan', (['var_tocheck'], {}), '(var_tocheck)\n', (6542, 6555), True, 'import numpy as np\n'), ((6690, 6711), 'numpy.isinf', 'np.isinf', (['var_tocheck'], {}), '(var_tocheck)\n', (6698, 6711), True, 'import numpy as np\n'), ((6851, 6873), 'numpy.isreal', 'np.isreal', (['var_tocheck'], {}), '(var_tocheck)\n', (6860, 6873), True, 'import numpy as np\n')] |
"""Module to read and parse native Phoenix Geophysics data formats of the MTU-5C Family
This module implements Streamed readers for segmented-decimated continuus-decimated
and native sampling rate time series formats of the MTU-5C family.
"""
__author__ = '<NAME>'
from numpy import empty, fromfile, float32, append
from struct import unpack_from, unpack
import os
import string
from cmath import phase
from PhoenixGeoPy.Reader.DataScaling import DataScaling
class _TSReaderBase(object):
def __init__(self, path, num_files=1, header_size=128, report_hw_sat=False):
self.base_path = path
self.base_dir, self.file_name = os.path.split(self.base_path)
file_name_base, self.file_extension = self.file_name.split(".", 2)
file_parts = file_name_base.split("_")
self.inst_id = file_parts[0]
self.rec_id = file_parts[1]
self.ch_id = file_parts[2]
seq_str = file_parts[3]
self.seq = int(seq_str, base=16)
self.last_seq = self.seq + num_files
self.stream = None
self.report_hw_sat = report_hw_sat
self.header_info = {}
self.header_size = header_size
self.dataHeader = None
self.open_file_seq(self.seq) # Open the file passed as the fisrt file in the sequence to stream
self.ad_plus_minus_range = 5.0 # differential voltage range that the A/D can measure (Board model dependent)
self.channel_type = "?" # "E" or "H"
self.channel_main_gain = None # The value of the main gain of the board
self.intrinsic_circuitry_gain = None # Circuitry Gain not directly configurable by the user
self.total_circuitry_gain = None # Total board Gain both intrinsic gain and user-seletable gain in circuit
self.total_selectable_gain = None # Total of the gain that is selectable by the user (i.e. att * pre * gain)
self.lpf_Hz = None # Nominal cutoff freq of the configured LPF of the channel
self.preamp_gain = 1.0
self.attenuator_gain = 1.0
def open_next(self):
ret_val = False
if self.stream is not None:
self.stream.close()
self.seq += 1
if self.seq < self.last_seq:
new_seq_str = "%08X" % (self.seq)
new_path = (self.base_dir + '/' + self.inst_id + '_' +
self.rec_id + '_' + self.ch_id + '_' +
new_seq_str + '.' + self.file_extension)
if os.path.exists(new_path):
self.stream = open(new_path, 'rb')
if self.header_size > 0:
self.dataHeader = self.stream.read(self.header_size)
ret_val = True
return ret_val
def open_file_seq(self, file_seq_num):
ret_val = False
if self.stream is not None:
self.stream.close()
self.seq = file_seq_num
new_seq_str = "%08X" % (self.seq)
new_path = (self.base_dir + '/' + self.inst_id + '_' +
self.rec_id + '_' + self.ch_id + '_' +
new_seq_str + '.' + self.file_extension)
if os.path.exists(new_path):
print (" opening " + new_path)
self.stream = open(new_path, 'rb')
if self.header_size > 0:
self.dataHeader = self.stream.read(self.header_size)
ret_val = True
return ret_val
def __populate_channel_type(self, config_fp):
if config_fp[1] & 0x08 == 0x08:
self.channel_type = "E"
else:
self.channel_type = "H"
# Channel type detected by electronics
# this normally matches self.channel_type, but used in electronics design and testing
if config_fp[1] & 0x20 == 0x20:
self.detected_channel_type = 'E'
else:
self.detected_channel_type = 'H'
def __populate_lpf(self, config_fp):
if config_fp[0] & 0x80 == 0x80: # LPF on
if config_fp[0] & 0x03 == 0x03:
self.lpf_Hz = 10
elif config_fp[0] & 0x03 == 0x02:
if (self.board_model_main == "BCM03" or self.board_model_main == "BCM06"):
self.lpf_Hz = 1000
else:
self.lpf_Hz = 100
elif config_fp[0] & 0x03 == 0x01:
if (self.board_model_main == "BCM03" or self.board_model_main == "BCM06"):
self.lpf_Hz = 10000
else:
self.lpf_Hz = 1000
else: # LPF off
if (self.board_model_main == "BCM03" or self.board_model_main == "BCM06"):
self.lpf_Hz = 17800
else:
self.lpf_Hz = 10000
def __popuate_peamp_gain(self, config_fp):
if self.channel_type == "?":
raise Exception("Channel type must be set before attemting to calculate preamp gain")
preamp_on = bool(config_fp[0] & 0x10)
self.preamp_gain = 1.0
if self.channel_type == "E":
if preamp_on is True:
if self.board_model_main == "BCM01" or self.board_model_main == "BCM03":
self.preamp_gain = 4.0
if (self.board_model_revision == "L"):
#Account for BCM01-L experimental prototype
self.preamp_gain = 8.0
else:
self.preamp_gain = 8.0
# Acount for experimental prototype BCM05-A
if self.header_info['ch_hwv'][0:7] == "BCM05-A":
self.preamp_gain = 4.0
def __populate_main_gain(self, config_fp):
# BCM05-B and BCM06 introduced different selectable gains
new_gains = True # we asume any newer board will have the new gain banks
if self.board_model_main == "BCM01" or self.board_model_main == "BCM03":
# Original style 24 KSps boards and original 96 KSps boards
new_gains = False
if self.header_info['ch_hwv'][0:7] == "BCM05-A":
# Acount for experimental prototype BCM05-A, which also had original gain banks
new_gains = False
if config_fp[0] & 0x0C == 0x00:
self.channel_main_gain = 1.0
elif config_fp[0] & 0x0C == 0x04:
self.channel_main_gain = 4.0
elif config_fp[0] & 0x0C == 0x08:
self.channel_main_gain = 6.0
if not new_gains:
self.channel_main_gain = 16.0
elif config_fp[0] & 0x0C == 0x0C:
self.channel_main_gain = 8.0
if not new_gains:
self.channel_main_gain = 32.0
def __handle_sensor_range(self, config_fp):
"""This function will adjust the intrinsic circuitry gain based on the
sensor range configuration in the configuration fingerprint
For this, we consider that for the Electric channel, calibration path, or H-legacy
sensors all go through a 1/4 gain stage, and then they get a virtial x2 gain from
Single-ended-diff before the A/D. In the case of newer sensors (differential)
instead of a 1/4 gain stage, there is only a 1/2 gain stage
Therefore, in the E,cal and legacy sensor case the circuitry gain is 1/2, while for
newer sensors it is 1
"""
if self.channel_type == "?":
raise Exception("Channel type must be set before attemting to calculate preamp gain")
self.intrinsic_circuitry_gain = 0.5
if self.channel_type == "H":
if config_fp[1] & 0x01 == 0x01:
self.intrinsic_circuitry_gain = 1.0
def __populate_attenuator_gain(self, config_fp):
self.attenuator_gain = 1.0 # Asume attenuator off
if self.channel_type == "?":
raise Exception("Channel type must be set before attemting to calculate preamp gain")
attenuator_on = bool(config_fp[4] & 0x01)
if attenuator_on and self.channel_type == "E":
new_attenuator = True # By default assume that we are dealing with a newer types of boards
if self.board_model_main == "BCM01" or self.board_model_main == "BCM03":
# Original style 24 KSps boards and original 96 KSps boards
new_attenuator = False
if self.header_info['ch_hwv'][0:7] == "BCM05-A":
# Acount for experimental prototype BCM05-A, which also had original gain banks
new_attenuator = False
if new_attenuator:
self.attenuator_gain = 523.0 / 5223.0
else:
self.attenuator_gain = 0.1
def unpack_header(self):
self.header_info['file_type'] = unpack_from('B', self.dataHeader, 0)[0]
self.header_info['file_version'] = unpack_from('B', self.dataHeader, 1)[0]
self.header_info['length'] = unpack_from('H', self.dataHeader, 2)[0]
self.header_info['inst_type'] = unpack_from('8s', self.dataHeader, 4)[0].decode("utf-8").strip(' ').strip('\x00')
self.header_info['inst_serial'] = b''.join(unpack_from('cccccccc', self.dataHeader, 12)).strip(b'\x00')
self.header_info['rec_id'] = unpack_from('I', self.dataHeader, 20)[0]
self.header_info['ch_id'] = unpack_from('B', self.dataHeader, 24)[0]
self.header_info['file_sequence'] = unpack_from('I', self.dataHeader, 25)[0]
self.header_info['frag_period'] = unpack_from('H', self.dataHeader, 29)[0]
self.header_info['ch_hwv'] = unpack_from('8s', self.dataHeader, 31)[0].decode("utf-8").strip(' ')
self.board_model_main = self.header_info['ch_hwv'][0:5]
self.board_model_revision = self.header_info['ch_hwv'][6:1]
self.header_info['ch_ser'] = unpack_from('8s', self.dataHeader, 39)[0].decode("utf-8").strip('\x00')
# handle the case of backend < v0.14, which puts '--------' in ch_ser
if all(chars in string.hexdigits for chars in self.header_info['ch_ser']):
self.header_info['ch_ser'] = int(self.header_info['ch_ser'], 16)
else:
self.header_info['ch_ser'] = 0
self.header_info['ch_fir'] = hex(unpack_from('I', self.dataHeader, 47)[0])
config_fp = unpack_from('BBBBBBBB', self.dataHeader, 51)
self.header_info['conf_fp'] = config_fp
# Channel type
self.__populate_channel_type(config_fp)
# Electric channel Preamp
self.__popuate_peamp_gain(config_fp)
# LPF
self.__populate_lpf(config_fp)
# Main Gain Stage
self.__populate_main_gain(config_fp)
# Sensor range
self.__handle_sensor_range(config_fp)
# Electric channel attenuator
self.__populate_attenuator_gain(config_fp)
# Board-wide gains
self.total_selectable_gain = self.channel_main_gain * self.preamp_gain * self.attenuator_gain
self.total_circuitry_gain = self.total_selectable_gain * self.intrinsic_circuitry_gain
self.header_info['sample_rate_base'] = unpack_from('H', self.dataHeader, 59)[0]
self.header_info['sample_rate_exp'] = unpack_from('b', self.dataHeader, 61)[0]
self.header_info['sample_rate'] = self.header_info['sample_rate_base']
if self.header_info['sample_rate_exp'] != 0:
self.header_info['sample_rate'] *= pow(10, self.header_info['sample_rate_exp'])
self.header_info['bytes_per_sample'] = unpack_from('B', self.dataHeader, 62)[0]
self.header_info['frame_size'] = unpack_from('I', self.dataHeader, 63)[0]
self.dataFooter = self.header_info['frame_size'] >> 24
self.frameSize = self.header_info['frame_size'] & 0x0ffffff
self.header_info['decimation_node_id'] = unpack_from('H', self.dataHeader, 67)[0]
self.header_info['frame_rollover_count'] = unpack_from('H', self.dataHeader, 69)[0]
self.header_info['gps_long'] = unpack_from('f', self.dataHeader, 71)[0]
self.header_info['gps_lat'] = unpack_from('f', self.dataHeader, 75)[0]
self.header_info['gps_height'] = unpack_from('f', self.dataHeader, 79)[0]
self.header_info['gps_hacc'] = unpack_from('I', self.dataHeader, 83)[0]
self.header_info['gps_vacc'] = unpack_from('I', self.dataHeader, 87)[0]
self.header_info['timing_status'] = unpack_from('BBH', self.dataHeader, 91)
self.header_info['timing_flags'] = self.header_info['timing_status'][0]
self.header_info['timing_sat_count'] = self.header_info['timing_status'][1]
self.header_info['timing_stability'] = self.header_info['timing_status'][2]
self.header_info['future1'] = unpack_from('b', self.dataHeader, 95)[0]
self.header_info['future2'] = unpack_from('i', self.dataHeader, 97)[0]
self.header_info['saturated_frames'] = unpack_from('H', self.dataHeader, 101)[0]
if self.header_info['saturated_frames'] & 0x80 == 0x80:
self.header_info['saturated_frames'] &= 0x7F
self.header_info['saturated_frames'] <<= 4
self.header_info['missing_frames'] = unpack_from('H', self.dataHeader, 103)[0]
self.header_info['battery_voltage_mV'] = unpack_from('H', self.dataHeader, 105)[0]
self.header_info['min_signal'] = unpack_from('f', self.dataHeader, 107)[0]
self.header_info['max_signal'] = unpack_from('f', self.dataHeader, 111)[0]
def close(self):
if self.stream is not None:
self.stream.close()
class NativeReader(_TSReaderBase):
"""Native sampling rate 'Raw' time series reader class"""
def __init__(self, path, num_files=1, scale_to=DataScaling.AD_input_volts,
header_size=128, last_frame=0, channel_gain=0.5, ad_plus_minus_range = 5.0,
channel_type="E", report_hw_sat=False):
# Init the base class
_TSReaderBase.__init__(self, path, num_files, header_size, report_hw_sat)
# Track the last frame seen by the streamer, to report missing frames
self.last_frame = last_frame
self.header_size = header_size
self.data_scaling = scale_to
self.total_circuitry_gain = channel_gain
self.ad_plus_minus_range = ad_plus_minus_range
if header_size == 128:
self.unpack_header()
# Now that we have the channel circuit-based gain (either form init or from the header)
# We can calculate the voltage range at the input of the board.
self.input_plusminus_range = self.ad_plus_minus_range / self.total_circuitry_gain
if self.data_scaling == DataScaling.AD_in_ADunits:
self._scale_factor = 256
elif self.data_scaling == DataScaling.AD_input_volts:
self._scale_factor = self.ad_plus_minus_range / (2 ** 31)
elif self.data_scaling == DataScaling.instrument_input_volts:
self._scale_factor = self.input_plusminus_range / (2 ** 31)
else:
raise LookupError("Invalid scaling requested")
# optimization variables
self.footer_idx_samp_mask = int('0x0fffffff', 16)
self.footer_sat_mask = int('0x70000000', 16)
def unpack_header(self):
super(NativeReader, self).unpack_header()
# TODO: Implement any specific header unpacking for this particular class below
def read_frames(self, num_frames):
frames_in_buf = 0
_idx_buf = 0
_data_buf = empty([num_frames * 20]) # 20 samples packed in a frame
while (frames_in_buf < num_frames):
dataFrame = self.stream.read(64)
if not dataFrame:
if not self.open_next():
return empty([0])
dataFrame = self.stream.read(64)
if not dataFrame:
return empty([0])
dataFooter = unpack_from("I", dataFrame, 60)
# Check that there are no skipped frames
frameCount = dataFooter[0] & self.footer_idx_samp_mask
difCount = frameCount - self.last_frame
if (difCount != 1):
print ("Ch [%s] Missing frames at %d [%d]\n" %
(self.ch_id, frameCount, difCount))
self.last_frame = frameCount
for ptrSamp in range(0, 60, 3):
tmpSampleTupple = unpack(">i", dataFrame[ptrSamp:ptrSamp + 3] + b'\x00')
_data_buf[_idx_buf] = tmpSampleTupple[0] * self._scale_factor
_idx_buf += 1
frames_in_buf += 1
if self.report_hw_sat:
satCount = (dataFooter[0] & self.footer_sat_mask) >> 24
if satCount:
print ("Ch [%s] Frame %d has %d saturations" %
(self.ch_id, frameCount, satCount))
return _data_buf
def skip_frames(self, num_frames):
bytes_to_skip = int(num_frames * 64)
# Python is dumb for seek and tell, it cannot tell us if a seek goes
# past EOF so instead we need to do inefficient reads to skip bytes
while (bytes_to_skip > 0):
foo = self.stream.read(bytes_to_skip)
local_read_size = len(foo)
# If we ran out of data in this file before finishing the skip,
# open the next file and return false if there is no next file
# to indicate that the skip ran out of
# data before completion
if local_read_size == 0:
more_data = self.open_next()
if not more_data:
return False
else:
bytes_to_skip -= local_read_size
# If we reached here we managed to skip all the data requested
# return true
self.last_frame += num_frames
return True
class DecimatedSegmentedReader(_TSReaderBase):
"""Class to create a streamer for segmented decimated time series,
i.e. *.td_24k"""
def __init__(self, path, num_files=1, report_hw_sat=False):
# Init the base class
_TSReaderBase.__init__(self, path, num_files, 128, report_hw_sat)
self.unpack_header()
self.subheader = {}
def unpack_header(self): # TODO: Work in progress, for now unpacking as raw time series header
if self.header_size == 128:
super(DecimatedSegmentedReader, self).unpack_header()
# TODO: Implement any specific header unpacking for this particular class below
def read_subheader(self):
subheaderBytes = self.stream.read(32)
if not subheaderBytes:
if self.open_next():
subheaderBytes = self.stream.read(32)
if not subheaderBytes or len(subheaderBytes) < 32:
self.subheader['timestamp'] = 0
self.subheader['samplesInRecord'] = 0
else:
self.subheader['timestamp'] = unpack_from('I', subheaderBytes, 0)[0]
self.subheader['samplesInRecord'] = unpack_from('I', subheaderBytes, 4)[0]
self.subheader['satCount'] = unpack_from('H', subheaderBytes, 8)[0]
self.subheader['missCount'] = unpack_from('H', subheaderBytes, 10)[0]
self.subheader['minVal'] = unpack_from('f', subheaderBytes, 12)[0]
self.subheader['maxVal'] = unpack_from('f', subheaderBytes, 16)[0]
self.subheader['avgVal'] = unpack_from('f', subheaderBytes, 20)[0]
def read_record_data(self):
ret_array = empty([0])
if (self.stream is not None
and self.subheader['samplesInRecord'] is not None
and self.subheader['samplesInRecord'] != 0):
ret_array = fromfile(self.stream, dtype=float32, count=self.subheader['samplesInRecord'])
if ret_array.size == 0:
if not self.open_next():
return empty([0])
# Array below will contain the data, or will be an empty array if end of series as desired
ret_array = fromfile(self.stream, dtype=float32, count=self.subheader['samplesInRecord'])
return ret_array
def read_record(self):
self.read_subheader()
return self.read_record_data()
class DecimatedContinuousReader(_TSReaderBase):
"""Class to create a streamer for continuous decimated time series,
i.e. *.td_150, *.td_30"""
def __init__(self, path, num_files=1, report_hw_sat=False):
# Init the base class
_TSReaderBase.__init__(self, path, num_files, 128, report_hw_sat)
self.unpack_header()
self.subheader = {}
def unpack_header(self): # TODO: Work in progress, for now unpacking as raw time series header
if self.header_size == 128:
super(DecimatedContinuousReader, self).unpack_header()
# TODO: Implement any specific header unpacking for this particular class below
def read_data(self, numSamples):
ret_array = empty([0])
if self.stream is not None:
ret_array = fromfile(self.stream, dtype=float32, count=numSamples)
while ret_array.size < numSamples:
if not self.open_next():
return empty([0])
# Array below will contain the data, or will be an empty array if end of series as desired
ret_array = append(ret_array,
fromfile(self.stream,
dtype=float32,
count=(numSamples - ret_array.size)))
return ret_array
| [
"numpy.fromfile",
"numpy.empty",
"struct.unpack",
"os.path.exists",
"os.path.split",
"struct.unpack_from"
] | [((644, 673), 'os.path.split', 'os.path.split', (['self.base_path'], {}), '(self.base_path)\n', (657, 673), False, 'import os\n'), ((3143, 3167), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (3157, 3167), False, 'import os\n'), ((10298, 10342), 'struct.unpack_from', 'unpack_from', (['"""BBBBBBBB"""', 'self.dataHeader', '(51)'], {}), "('BBBBBBBB', self.dataHeader, 51)\n", (10309, 10342), False, 'from struct import unpack_from, unpack\n'), ((12375, 12414), 'struct.unpack_from', 'unpack_from', (['"""BBH"""', 'self.dataHeader', '(91)'], {}), "('BBH', self.dataHeader, 91)\n", (12386, 12414), False, 'from struct import unpack_from, unpack\n'), ((15444, 15468), 'numpy.empty', 'empty', (['[num_frames * 20]'], {}), '([num_frames * 20])\n', (15449, 15468), False, 'from numpy import empty, fromfile, float32, append\n'), ((19433, 19443), 'numpy.empty', 'empty', (['[0]'], {}), '([0])\n', (19438, 19443), False, 'from numpy import empty, fromfile, float32, append\n'), ((20891, 20901), 'numpy.empty', 'empty', (['[0]'], {}), '([0])\n', (20896, 20901), False, 'from numpy import empty, fromfile, float32, append\n'), ((2493, 2517), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (2507, 2517), False, 'import os\n'), ((8796, 8832), 'struct.unpack_from', 'unpack_from', (['"""B"""', 'self.dataHeader', '(0)'], {}), "('B', self.dataHeader, 0)\n", (8807, 8832), False, 'from struct import unpack_from, unpack\n'), ((8879, 8915), 'struct.unpack_from', 'unpack_from', (['"""B"""', 'self.dataHeader', '(1)'], {}), "('B', self.dataHeader, 1)\n", (8890, 8915), False, 'from struct import unpack_from, unpack\n'), ((8956, 8992), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(2)'], {}), "('H', self.dataHeader, 2)\n", (8967, 8992), False, 'from struct import unpack_from, unpack\n'), ((9267, 9304), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'self.dataHeader', '(20)'], {}), "('I', self.dataHeader, 20)\n", (9278, 9304), False, 'from struct import unpack_from, unpack\n'), ((9344, 9381), 'struct.unpack_from', 'unpack_from', (['"""B"""', 'self.dataHeader', '(24)'], {}), "('B', self.dataHeader, 24)\n", (9355, 9381), False, 'from struct import unpack_from, unpack\n'), ((9429, 9466), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'self.dataHeader', '(25)'], {}), "('I', self.dataHeader, 25)\n", (9440, 9466), False, 'from struct import unpack_from, unpack\n'), ((9512, 9549), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(29)'], {}), "('H', self.dataHeader, 29)\n", (9523, 9549), False, 'from struct import unpack_from, unpack\n'), ((11095, 11132), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(59)'], {}), "('H', self.dataHeader, 59)\n", (11106, 11132), False, 'from struct import unpack_from, unpack\n'), ((11182, 11219), 'struct.unpack_from', 'unpack_from', (['"""b"""', 'self.dataHeader', '(61)'], {}), "('b', self.dataHeader, 61)\n", (11193, 11219), False, 'from struct import unpack_from, unpack\n'), ((11494, 11531), 'struct.unpack_from', 'unpack_from', (['"""B"""', 'self.dataHeader', '(62)'], {}), "('B', self.dataHeader, 62)\n", (11505, 11531), False, 'from struct import unpack_from, unpack\n'), ((11576, 11613), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'self.dataHeader', '(63)'], {}), "('I', self.dataHeader, 63)\n", (11587, 11613), False, 'from struct import unpack_from, unpack\n'), ((11797, 11834), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(67)'], {}), "('H', self.dataHeader, 67)\n", (11808, 11834), False, 'from struct import unpack_from, unpack\n'), ((11889, 11926), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(69)'], {}), "('H', self.dataHeader, 69)\n", (11900, 11926), False, 'from struct import unpack_from, unpack\n'), ((11969, 12006), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'self.dataHeader', '(71)'], {}), "('f', self.dataHeader, 71)\n", (11980, 12006), False, 'from struct import unpack_from, unpack\n'), ((12048, 12085), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'self.dataHeader', '(75)'], {}), "('f', self.dataHeader, 75)\n", (12059, 12085), False, 'from struct import unpack_from, unpack\n'), ((12130, 12167), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'self.dataHeader', '(79)'], {}), "('f', self.dataHeader, 79)\n", (12141, 12167), False, 'from struct import unpack_from, unpack\n'), ((12210, 12247), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'self.dataHeader', '(83)'], {}), "('I', self.dataHeader, 83)\n", (12221, 12247), False, 'from struct import unpack_from, unpack\n'), ((12290, 12327), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'self.dataHeader', '(87)'], {}), "('I', self.dataHeader, 87)\n", (12301, 12327), False, 'from struct import unpack_from, unpack\n'), ((12701, 12738), 'struct.unpack_from', 'unpack_from', (['"""b"""', 'self.dataHeader', '(95)'], {}), "('b', self.dataHeader, 95)\n", (12712, 12738), False, 'from struct import unpack_from, unpack\n'), ((12780, 12817), 'struct.unpack_from', 'unpack_from', (['"""i"""', 'self.dataHeader', '(97)'], {}), "('i', self.dataHeader, 97)\n", (12791, 12817), False, 'from struct import unpack_from, unpack\n'), ((12868, 12906), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(101)'], {}), "('H', self.dataHeader, 101)\n", (12879, 12906), False, 'from struct import unpack_from, unpack\n'), ((13131, 13169), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(103)'], {}), "('H', self.dataHeader, 103)\n", (13142, 13169), False, 'from struct import unpack_from, unpack\n'), ((13222, 13260), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'self.dataHeader', '(105)'], {}), "('H', self.dataHeader, 105)\n", (13233, 13260), False, 'from struct import unpack_from, unpack\n'), ((13305, 13343), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'self.dataHeader', '(107)'], {}), "('f', self.dataHeader, 107)\n", (13316, 13343), False, 'from struct import unpack_from, unpack\n'), ((13388, 13426), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'self.dataHeader', '(111)'], {}), "('f', self.dataHeader, 111)\n", (13399, 13426), False, 'from struct import unpack_from, unpack\n'), ((15848, 15879), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'dataFrame', '(60)'], {}), "('I', dataFrame, 60)\n", (15859, 15879), False, 'from struct import unpack_from, unpack\n'), ((19631, 19708), 'numpy.fromfile', 'fromfile', (['self.stream'], {'dtype': 'float32', 'count': "self.subheader['samplesInRecord']"}), "(self.stream, dtype=float32, count=self.subheader['samplesInRecord'])\n", (19639, 19708), False, 'from numpy import empty, fromfile, float32, append\n'), ((20962, 21016), 'numpy.fromfile', 'fromfile', (['self.stream'], {'dtype': 'float32', 'count': 'numSamples'}), '(self.stream, dtype=float32, count=numSamples)\n', (20970, 21016), False, 'from numpy import empty, fromfile, float32, append\n'), ((10236, 10273), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'self.dataHeader', '(47)'], {}), "('I', self.dataHeader, 47)\n", (10247, 10273), False, 'from struct import unpack_from, unpack\n'), ((16327, 16381), 'struct.unpack', 'unpack', (['""">i"""', "(dataFrame[ptrSamp:ptrSamp + 3] + b'\\x00')"], {}), "('>i', dataFrame[ptrSamp:ptrSamp + 3] + b'\\x00')\n", (16333, 16381), False, 'from struct import unpack_from, unpack\n'), ((18855, 18890), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'subheaderBytes', '(0)'], {}), "('I', subheaderBytes, 0)\n", (18866, 18890), False, 'from struct import unpack_from, unpack\n'), ((18942, 18977), 'struct.unpack_from', 'unpack_from', (['"""I"""', 'subheaderBytes', '(4)'], {}), "('I', subheaderBytes, 4)\n", (18953, 18977), False, 'from struct import unpack_from, unpack\n'), ((19022, 19057), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'subheaderBytes', '(8)'], {}), "('H', subheaderBytes, 8)\n", (19033, 19057), False, 'from struct import unpack_from, unpack\n'), ((19103, 19139), 'struct.unpack_from', 'unpack_from', (['"""H"""', 'subheaderBytes', '(10)'], {}), "('H', subheaderBytes, 10)\n", (19114, 19139), False, 'from struct import unpack_from, unpack\n'), ((19182, 19218), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'subheaderBytes', '(12)'], {}), "('f', subheaderBytes, 12)\n", (19193, 19218), False, 'from struct import unpack_from, unpack\n'), ((19261, 19297), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'subheaderBytes', '(16)'], {}), "('f', subheaderBytes, 16)\n", (19272, 19297), False, 'from struct import unpack_from, unpack\n'), ((19340, 19376), 'struct.unpack_from', 'unpack_from', (['"""f"""', 'subheaderBytes', '(20)'], {}), "('f', subheaderBytes, 20)\n", (19351, 19376), False, 'from struct import unpack_from, unpack\n'), ((19959, 20036), 'numpy.fromfile', 'fromfile', (['self.stream'], {'dtype': 'float32', 'count': "self.subheader['samplesInRecord']"}), "(self.stream, dtype=float32, count=self.subheader['samplesInRecord'])\n", (19967, 20036), False, 'from numpy import empty, fromfile, float32, append\n'), ((9169, 9213), 'struct.unpack_from', 'unpack_from', (['"""cccccccc"""', 'self.dataHeader', '(12)'], {}), "('cccccccc', self.dataHeader, 12)\n", (9180, 9213), False, 'from struct import unpack_from, unpack\n'), ((15690, 15700), 'numpy.empty', 'empty', (['[0]'], {}), '([0])\n', (15695, 15700), False, 'from numpy import empty, fromfile, float32, append\n'), ((15811, 15821), 'numpy.empty', 'empty', (['[0]'], {}), '([0])\n', (15816, 15821), False, 'from numpy import empty, fromfile, float32, append\n'), ((19813, 19823), 'numpy.empty', 'empty', (['[0]'], {}), '([0])\n', (19818, 19823), False, 'from numpy import empty, fromfile, float32, append\n'), ((21132, 21142), 'numpy.empty', 'empty', (['[0]'], {}), '([0])\n', (21137, 21142), False, 'from numpy import empty, fromfile, float32, append\n'), ((21331, 21402), 'numpy.fromfile', 'fromfile', (['self.stream'], {'dtype': 'float32', 'count': '(numSamples - ret_array.size)'}), '(self.stream, dtype=float32, count=numSamples - ret_array.size)\n', (21339, 21402), False, 'from numpy import empty, fromfile, float32, append\n'), ((9590, 9628), 'struct.unpack_from', 'unpack_from', (['"""8s"""', 'self.dataHeader', '(31)'], {}), "('8s', self.dataHeader, 31)\n", (9601, 9628), False, 'from struct import unpack_from, unpack\n'), ((9828, 9866), 'struct.unpack_from', 'unpack_from', (['"""8s"""', 'self.dataHeader', '(39)'], {}), "('8s', self.dataHeader, 39)\n", (9839, 9866), False, 'from struct import unpack_from, unpack\n'), ((9036, 9073), 'struct.unpack_from', 'unpack_from', (['"""8s"""', 'self.dataHeader', '(4)'], {}), "('8s', self.dataHeader, 4)\n", (9047, 9073), False, 'from struct import unpack_from, unpack\n')] |
import pandas as pd
import numpy as np
dataset_train = pd.read_csv("tslatrain.csv")
training_set = dataset_train.iloc[:, 1:2].values
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
print(training_set_scaled)
#Creating the timestamp datastructures with the 1 output. could change the 60 hyperparameter
input_train = []
benchmark_train = []
for i in range(60, len(training_set_scaled)):
input_train.append(training_set_scaled[i-60:i, 0])
benchmark_train.append(training_set_scaled[i, 0])
input_train, benchmark_train = np.array(input_train), np.array(benchmark_train)
# Part 2 - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
#Reshaping
input_train = np.reshape(input_train, (input_train.shape[0], input_train.shape[1], 1))
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (input_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(input_train, benchmark_train, epochs = 100, batch_size = 32)
# Part 3 - Making the predictions and visualising the results
import matplotlib.pyplot as plt
# Getting the real stock price of 2017
dataset_test = pd.read_csv('tslatest.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
dataset_total = pd.concat((dataset_train['open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
print(inputs)
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
input_test = []
for i in range(60, 83):
input_test.append(inputs[i-60:i, 0])
input_test = np.array(input_test)
input_test = np.reshape(input_test, (input_test.shape[0], input_test.shape[1], 1))
predicted_stock_price = regressor.predict(input_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# Visualising the results
plt.plot(real_stock_price, color = 'red', label = 'Real Tesla Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Tesla Stock Price')
plt.title('Tesla Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Tesla Stock Price')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"keras.layers.LSTM",
"matplotlib.pyplot.legend",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.ylabel",
"keras.layers.Dropout",
"keras.layers.Dense",
"numpy.array",
"numpy.reshape",
"... | [((56, 84), 'pandas.read_csv', 'pd.read_csv', (['"""tslatrain.csv"""'], {}), "('tslatrain.csv')\n", (67, 84), True, 'import pandas as pd\n'), ((187, 221), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (199, 221), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((912, 924), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (922, 924), False, 'from keras.models import Sequential\n'), ((951, 1023), 'numpy.reshape', 'np.reshape', (['input_train', '(input_train.shape[0], input_train.shape[1], 1)'], {}), '(input_train, (input_train.shape[0], input_train.shape[1], 1))\n', (961, 1023), True, 'import numpy as np\n'), ((2038, 2065), 'pandas.read_csv', 'pd.read_csv', (['"""tslatest.csv"""'], {}), "('tslatest.csv')\n", (2049, 2065), True, 'import pandas as pd\n'), ((2134, 2198), 'pandas.concat', 'pd.concat', (["(dataset_train['open'], dataset_test['Open'])"], {'axis': '(0)'}), "((dataset_train['open'], dataset_test['Open']), axis=0)\n", (2143, 2198), True, 'import pandas as pd\n'), ((2447, 2467), 'numpy.array', 'np.array', (['input_test'], {}), '(input_test)\n', (2455, 2467), True, 'import numpy as np\n'), ((2481, 2550), 'numpy.reshape', 'np.reshape', (['input_test', '(input_test.shape[0], input_test.shape[1], 1)'], {}), '(input_test, (input_test.shape[0], input_test.shape[1], 1))\n', (2491, 2550), True, 'import numpy as np\n'), ((2700, 2771), 'matplotlib.pyplot.plot', 'plt.plot', (['real_stock_price'], {'color': '"""red"""', 'label': '"""Real Tesla Stock Price"""'}), "(real_stock_price, color='red', label='Real Tesla Stock Price')\n", (2708, 2771), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2863), 'matplotlib.pyplot.plot', 'plt.plot', (['predicted_stock_price'], {'color': '"""blue"""', 'label': '"""Predicted Tesla Stock Price"""'}), "(predicted_stock_price, color='blue', label=\n 'Predicted Tesla Stock Price')\n", (2784, 2863), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2904), 'matplotlib.pyplot.title', 'plt.title', (['"""Tesla Stock Price Prediction"""'], {}), "('Tesla Stock Price Prediction')\n", (2872, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2905, 2923), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2915, 2923), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2955), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tesla Stock Price"""'], {}), "('Tesla Stock Price')\n", (2934, 2955), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2968), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2966, 2968), True, 'import matplotlib.pyplot as plt\n'), ((2969, 2979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2977, 2979), True, 'import matplotlib.pyplot as plt\n'), ((622, 643), 'numpy.array', 'np.array', (['input_train'], {}), '(input_train)\n', (630, 643), True, 'import numpy as np\n'), ((645, 670), 'numpy.array', 'np.array', (['benchmark_train'], {}), '(benchmark_train)\n', (653, 670), True, 'import numpy as np\n'), ((1101, 1177), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)', 'input_shape': '(input_train.shape[1], 1)'}), '(units=50, return_sequences=True, input_shape=(input_train.shape[1], 1))\n', (1105, 1177), False, 'from keras.layers import LSTM\n'), ((1199, 1211), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1206, 1211), False, 'from keras.layers import Dropout\n'), ((1289, 1326), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)'}), '(units=50, return_sequences=True)\n', (1293, 1326), False, 'from keras.layers import LSTM\n'), ((1346, 1358), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1353, 1358), False, 'from keras.layers import Dropout\n'), ((1435, 1472), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)'}), '(units=50, return_sequences=True)\n', (1439, 1472), False, 'from keras.layers import LSTM\n'), ((1492, 1504), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1499, 1504), False, 'from keras.layers import Dropout\n'), ((1582, 1596), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)'}), '(units=50)\n', (1586, 1596), False, 'from keras.layers import LSTM\n'), ((1614, 1626), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1621, 1626), False, 'from keras.layers import Dropout\n'), ((1669, 1683), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), '(units=1)\n', (1674, 1683), False, 'from keras.layers import Dense\n')] |
import csv
import re
import numpy as np
import torch
from torch.autograd import Variable
from lyrics_prediction.word_rnn import CharBasedRNN
MAX_SEQ_LEN = 69
class RNNTools:
def __init__(self):
self.lang = None
self.model = None
"""Neaural network functionality"""
"""model utilities"""
def load_model(self, lang, cp='70000', hidden_size=500, n_layers=1, n_tags=2):
self.lang = lang
self.model = CharBasedRNN(self.lang.word_count, hidden_size, n_tags, n_layers, self.lang)
self.model.load_state_dict(
torch.load("./model/model.checkpoint." + cp, map_location=lambda storage, loc: storage))
self.model.cpu()
self.model.eval()
def softmax(self, x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def precision(self, output, target):
true_pos = 0
false_pos = 0
for i in range(len(output)):
if output[i] == 1:
if target[i] == 1:
true_pos += 1
else:
false_pos += 1
return true_pos / (true_pos + false_pos)
def recall(self, output, target):
true_pos = 0
overall = 0
for i in range(len(output)):
if output[i] == 1:
if target[i] == 1:
true_pos += 1
if output[i] == 0:
if target[i] == 1:
overall += 1
return true_pos / (true_pos + overall)
def sentence2variable(self, input_vec, output):
input_var = Variable(torch.LongTensor(input_vec).view(-1, 1))
target_var = Variable(torch.LongTensor(output).view(-1, 1))
return (input_var, target_var)
def format_text(self, text):
new_word_list = []
for word in text.split():
if (word == " "):
continue
word = re.sub(r"[^A-Za-z0-9:.<>/]", "", word.strip())
new_word_list += word.split()
cleaned_text = []
for word in range(0, len(new_word_list)):
cleanr = re.compile('<.*?>')
cleaned_text.append(re.sub(cleanr, '', new_word_list[word]))
if (cleaned_text[-1].endswith('.')):
while (cleaned_text[-1].endswith('.')):
cleaned_text[-1] = cleaned_text[-1][:-1]
cleaned_text.append('.')
return cleaned_text
def load_file(self, filepath):
with open(filepath) as csvDataFile:
csvReader = csv.reader(csvDataFile)
text = next(csvReader)
try:
tags = next(csvReader)
while (len(tags) == 0):
tags = next(csvReader)
except:
tags = [0] * len(text)
dot_count = 0
for s in range(len(text) - 1, 0, -1):
if tags[s] not in ('0', '1'):
tags[s] = '0'
if text[s] == '.':
dot_count += 1
else:
dot_count = 0
if dot_count > 1:
del text[s + 1]
del tags[s + 1]
output_vec = []
input_vec = []
for i in range(0, len(text)):
input_vec.append(self.lang.get_token_id(text[i]))
output_vec.append(int(tags[i]))
# building instances
instances = []
instance_input = []
instance_output = []
for i in range(0, len(input_vec)):
instance_input.append(input_vec[i])
instance_output.append(output_vec[i])
if self.lang.ind2word[input_vec[i]] == "." or i == len(input_vec) - 1 or len(
instance_input) >= MAX_SEQ_LEN:
instances.append(self.sentence2variable(instance_input, instance_output))
instance_input = []
instance_output = []
return instances
def load_text(self, text):
text = self.format_text(text);
tags = [0] * len(text)
dot_count = 0
for s in range(len(text) - 1, 0, -1):
if tags[s] not in ('0', '1'):
tags[s] = '0'
if text[s] == '.':
dot_count += 1
else:
dot_count = 0
if dot_count > 1:
del text[s + 1]
del tags[s + 1]
output_vec = []
input_vec = []
for i in range(0, len(text)):
input_vec.append(self.lang.get_token_id(text[i]))
output_vec.append(int(tags[i]))
# building instances
instances = []
instance_input = []
instance_output = []
for i in range(0, len(input_vec)):
instance_input.append(input_vec[i])
instance_output.append(output_vec[i])
if self.lang.ind2word[input_vec[i]] == "." or i == len(input_vec) - 1 or len(instance_input) >= MAX_SEQ_LEN:
instances.append(self.sentence2variable(instance_input, instance_output))
instance_input = []
instance_output = []
return instances
def build_dataset_csv(self, *args):
data = []
for f in args[0]:
print(f)
if not f.endswith('.csv'):
continue
print("processing", f)
instances = self.load_file(f)
for i in instances:
data.append(i)
return data
def build_dataset_text(self, text):
data = []
instances = self.load_text(text)
for i in instances:
data.append(i)
return data
| [
"csv.reader",
"torch.LongTensor",
"torch.load",
"numpy.exp",
"lyrics_prediction.word_rnn.CharBasedRNN",
"re.sub",
"re.compile"
] | [((452, 528), 'lyrics_prediction.word_rnn.CharBasedRNN', 'CharBasedRNN', (['self.lang.word_count', 'hidden_size', 'n_tags', 'n_layers', 'self.lang'], {}), '(self.lang.word_count, hidden_size, n_tags, n_layers, self.lang)\n', (464, 528), False, 'from lyrics_prediction.word_rnn import CharBasedRNN\n'), ((577, 668), 'torch.load', 'torch.load', (["('./model/model.checkpoint.' + cp)"], {'map_location': '(lambda storage, loc: storage)'}), "('./model/model.checkpoint.' + cp, map_location=lambda storage,\n loc: storage)\n", (587, 668), False, 'import torch\n'), ((759, 768), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (765, 768), True, 'import numpy as np\n'), ((2075, 2094), 're.compile', 're.compile', (['"""<.*?>"""'], {}), "('<.*?>')\n", (2085, 2094), False, 'import re\n'), ((2507, 2530), 'csv.reader', 'csv.reader', (['csvDataFile'], {}), '(csvDataFile)\n', (2517, 2530), False, 'import csv\n'), ((778, 787), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (784, 787), True, 'import numpy as np\n'), ((2127, 2166), 're.sub', 're.sub', (['cleanr', '""""""', 'new_word_list[word]'], {}), "(cleanr, '', new_word_list[word])\n", (2133, 2166), False, 'import re\n'), ((1571, 1598), 'torch.LongTensor', 'torch.LongTensor', (['input_vec'], {}), '(input_vec)\n', (1587, 1598), False, 'import torch\n'), ((1642, 1666), 'torch.LongTensor', 'torch.LongTensor', (['output'], {}), '(output)\n', (1658, 1666), False, 'import torch\n')] |
#!/usr/bin/env python
import math, cv2
import numpy as np
from utils import *
import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
class ImageEncoder(nn.Module):
def __init__(self, config):
super(ImageEncoder, self).__init__()
self.cfg = config
# self.cnn = models.resnet152(pretrained=True)
# # For efficient memory usage.
# for param in self.cnn.parameters():
# param.requires_grad = self.cfg.finetune
self.fc = nn.Linear(2048, self.cfg.n_feature_dim)
# self.cnn.fc = nn.Sequential()
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features + self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
# features = self.cnn(images)
# normalization in the image embedding space
# features = l2norm(features)
# linear projection to the joint embedding space
features = self.fc(images)
# normalization in the joint embedding space
# features = l2norm(features)
return features | [
"numpy.sqrt",
"torch.nn.Linear"
] | [((526, 565), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'self.cfg.n_feature_dim'], {}), '(2048, self.cfg.n_feature_dim)\n', (535, 565), True, 'import torch.nn as nn\n'), ((750, 762), 'numpy.sqrt', 'np.sqrt', (['(6.0)'], {}), '(6.0)\n', (757, 762), True, 'import numpy as np\n'), ((764, 815), 'numpy.sqrt', 'np.sqrt', (['(self.fc.in_features + self.fc.out_features)'], {}), '(self.fc.in_features + self.fc.out_features)\n', (771, 815), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
# mean and cov for the first dataset
mean1 = [0,0]
cov1 = [[1,0.1],[0.1,1]]
# mean and cov for the second dataset
mean2 = [1,1]
cov2 = [[1,-0.1],[-0.1,1]]
# initializing sample points
data1 = np.random.multivariate_normal(mean1,cov1,100)
data2 = np.random.multivariate_normal(mean2,cov2,100)
# Plotting
fig_gen_syn_data = plt.figure()
plot_gsd = fig_gen_syn_data.add_subplot(111)
plot_gsd.scatter(data1[:,0],data1[:,1],c='blue')
plot_gsd.scatter(data2[:,0],data2[:,1],c='red')
fig_gen_syn_data.show()
| [
"matplotlib.pyplot.figure",
"numpy.random.multivariate_normal"
] | [((305, 352), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean1', 'cov1', '(100)'], {}), '(mean1, cov1, 100)\n', (334, 352), True, 'import numpy as np\n'), ((370, 417), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean2', 'cov2', '(100)'], {}), '(mean2, cov2, 100)\n', (399, 417), True, 'import numpy as np\n'), ((446, 458), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (456, 458), True, 'import matplotlib.pyplot as plt\n')] |
from datetime import timedelta
import cv2
import numpy as np
import os
# i.e if video of duration 30 seconds, saves 10 frame per second = 300 frames saved in total
SAVING_FRAMES_PER_SECOND = 10
def format_timedelta(td):
"""Utility function to format timedelta objects in a cool way (e.g 00:00:20.05)
omitting microseconds and retaining milliseconds"""
result = str(td)
try:
result, ms = result.split(".")
except ValueError:
return result + ".00".replace(":", "-")
ms = int(ms)
ms = round(ms / 1e4)
return f"{result}.{ms:02}".replace(":", "-")
def get_saving_frames_durations(cap, saving_fps):
"""A function that returns the list of durations where to save the frames"""
s = []
# get the clip duration by dividing number of frames by the number of frames per second
clip_duration = cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS)
# use np.arange() to make floating-point steps
for i in np.arange(0, clip_duration, 1 / saving_fps):
s.append(i)
return s
def main(video_file):
filename, _ = os.path.splitext(video_file)
filename += "-opencv"
# make a folder by the name of the video file
if not os.path.isdir(filename):
os.mkdir(filename)
# read the video file
cap = cv2.VideoCapture(video_file)
# get the FPS of the video
fps = cap.get(cv2.CAP_PROP_FPS)
# if the SAVING_FRAMES_PER_SECOND is above video FPS, then set it to FPS (as maximum)
saving_frames_per_second = min(fps, SAVING_FRAMES_PER_SECOND)
# get the list of duration spots to save
saving_frames_durations = get_saving_frames_durations(cap, saving_frames_per_second)
# start the loop
count = 0
while True:
is_read, frame = cap.read()
if not is_read:
# break out of the loop if there are no frames to read
break
# get the duration by dividing the frame count by the FPS
frame_duration = count / fps
try:
# get the earliest duration to save
closest_duration = saving_frames_durations[0]
except IndexError:
# the list is empty, all duration frames were saved
break
if frame_duration >= closest_duration:
# if closest duration is less than or equals the frame duration,
# then save the frame
frame_duration_formatted = format_timedelta(timedelta(seconds=frame_duration))
cv2.imwrite(os.path.join(filename, f"frame{frame_duration_formatted}.jpg"), frame)
# drop the duration spot from the list, since this duration spot is already saved
try:
saving_frames_durations.pop(0)
except IndexError:
pass
# increment the frame count
count += 1
if __name__ == "__main__":
import sys
video_file = sys.argv[1]
main(video_file) | [
"os.mkdir",
"os.path.isdir",
"cv2.VideoCapture",
"numpy.arange",
"os.path.splitext",
"datetime.timedelta",
"os.path.join"
] | [((976, 1019), 'numpy.arange', 'np.arange', (['(0)', 'clip_duration', '(1 / saving_fps)'], {}), '(0, clip_duration, 1 / saving_fps)\n', (985, 1019), True, 'import numpy as np\n'), ((1096, 1124), 'os.path.splitext', 'os.path.splitext', (['video_file'], {}), '(video_file)\n', (1112, 1124), False, 'import os\n'), ((1304, 1332), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (1320, 1332), False, 'import cv2\n'), ((1212, 1235), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (1225, 1235), False, 'import os\n'), ((1245, 1263), 'os.mkdir', 'os.mkdir', (['filename'], {}), '(filename)\n', (1253, 1263), False, 'import os\n'), ((2432, 2465), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'frame_duration'}), '(seconds=frame_duration)\n', (2441, 2465), False, 'from datetime import timedelta\n'), ((2491, 2553), 'os.path.join', 'os.path.join', (['filename', 'f"""frame{frame_duration_formatted}.jpg"""'], {}), "(filename, f'frame{frame_duration_formatted}.jpg')\n", (2503, 2553), False, 'import os\n')] |
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import pickle
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
print("Loading Data ... ")
# 导入数据
li = ['cate2_id','cate3_id']
column = 'title_words'
train = pd.read_csv('train_a.txt', delimiter='\t')
valid = pd.read_csv('valid_a.txt', delimiter='\t')
test = pd.read_csv('test_a.txt', delimiter='\t')
test_id = test["item_id"].copy()
vec = TfidfVectorizer(ngram_range=(1,2),min_df=3, max_df=0.5, use_idf=1, smooth_idf=1, sublinear_tf=1)
trn_term_doc = vec.fit_transform(train[column])
test_term_doc = vec.transform(test[column])
valid_term_doc = vec.transform(valid[column])
train_x, test_x, valid_x = trn_term_doc, test_term_doc, valid_term_doc
leavesmapping={'cate1_id':500, 'cate2_id':200, 'cate3_id':100}
for classes in li:
list_of_cate = list(set(train[classes]))
mapping_dict = {list_of_cate[i]:i for i in range(len(list_of_cate))}
return_dict = dict([(v, k) for (k, v) in mapping_dict.items()])
train_y = (train[classes].map(mapping_dict)).astype(int)
valid_y = (valid[classes].map(mapping_dict)).astype(int)
X_train = train_x
y_train = train_y
X_test = valid_x
y_test = valid_y
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'boosting_type': 'dart',
# 'device': 'gpu',
'max_depth': -1,
'max_bin': 300,
'objective': 'multiclassova',
'num_class': len(list_of_cate),
'metric': 'multi_error',
'num_leaves': leavesmapping[classes],
'min_data_in_leaf': 20,
'num_iterations': 1000,
'learning_rate': 0.15,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'lambda_l1': 0.4,
'lambda_l2': 0.5,
'min_gain_to_split': 0.2,
'verbose': 5,
'is_unbalance': True
}
# train
print('Start training...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=10000,
valid_sets=lgb_eval,
early_stopping_rounds=8)
print('Start predicting...')
preds = gbm.predict(test_x, num_iteration=gbm.best_iteration) # 输出的是概率结果
# 导出结果
fid0 = open(classes + '.csv', 'w')
fid0.write("id," + classes + "\n")
i = 0
for pred in preds:
result = int(np.argmax(pred))
fid0.write(str(test['item_id'][i]) + "," + str(return_dict[result]) + "\n")
i = i + 1
fid0.close()
print(classes + ' finished!')
| [
"lightgbm.train",
"numpy.argmax",
"pandas.read_csv",
"sklearn.feature_extraction.text.TfidfVectorizer",
"lightgbm.Dataset"
] | [((344, 386), 'pandas.read_csv', 'pd.read_csv', (['"""train_a.txt"""'], {'delimiter': '"""\t"""'}), "('train_a.txt', delimiter='\\t')\n", (355, 386), True, 'import pandas as pd\n'), ((395, 437), 'pandas.read_csv', 'pd.read_csv', (['"""valid_a.txt"""'], {'delimiter': '"""\t"""'}), "('valid_a.txt', delimiter='\\t')\n", (406, 437), True, 'import pandas as pd\n'), ((445, 486), 'pandas.read_csv', 'pd.read_csv', (['"""test_a.txt"""'], {'delimiter': '"""\t"""'}), "('test_a.txt', delimiter='\\t')\n", (456, 486), True, 'import pandas as pd\n'), ((526, 628), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'min_df': '(3)', 'max_df': '(0.5)', 'use_idf': '(1)', 'smooth_idf': '(1)', 'sublinear_tf': '(1)'}), '(ngram_range=(1, 2), min_df=3, max_df=0.5, use_idf=1,\n smooth_idf=1, sublinear_tf=1)\n', (541, 628), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((1363, 1392), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1374, 1392), True, 'import lightgbm as lgb\n'), ((1408, 1456), 'lightgbm.Dataset', 'lgb.Dataset', (['X_test', 'y_test'], {'reference': 'lgb_train'}), '(X_test, y_test, reference=lgb_train)\n', (1419, 1456), True, 'import lightgbm as lgb\n'), ((2167, 2268), 'lightgbm.train', 'lgb.train', (['params', 'lgb_train'], {'num_boost_round': '(10000)', 'valid_sets': 'lgb_eval', 'early_stopping_rounds': '(8)'}), '(params, lgb_train, num_boost_round=10000, valid_sets=lgb_eval,\n early_stopping_rounds=8)\n', (2176, 2268), True, 'import lightgbm as lgb\n'), ((2602, 2617), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (2611, 2617), True, 'import numpy as np\n')] |
import numpy as np
from pystella.rf import Band
from pystella.rf.rad_func import Flux2MagAB
from pystella.util.phys_var import phys
__author__ = 'bakl'
class Star:
def __init__(self, name, spec=None, is_flux_eq_luminosity=False):
"""Creates a Star with Spectrum instance. Required parameters: name."""
self._name = name
self._sp = spec
self.is_flux_eq_luminosity = is_flux_eq_luminosity
self.radius_ph = None
self._z = None
self._magnification = 1.
self.distance = None
self.Tcol = {}
self.zeta = {}
def set_radius_ph(self, radius):
self.radius_ph = radius
def set_distance(self, distance):
"""
Set distance to the star [cm]
:param distance:
"""
self.distance = distance
def set_redshift(self, z): # shift spectrum to rest frame
self._z = z
def set_magnification(self, m): # shift spectrum to rest frame
self._magnification = m
def set_Tcol(self, Tcol, bset):
self.Tcol[bset] = Tcol
def get_Tcol(self, bset):
if bset in self.Tcol:
return self.Tcol[bset]
return None
def set_zeta(self, zeta, bset):
self.zeta[bset] = zeta
def get_zeta(self, bset):
if bset in self.zeta:
return self.zeta[bset]
return None
@property
def Name(self):
return self._name
@property
def z(self):
return self._z
@property
def IsRedshift(self):
return self.z is not None and self.z > 0.
@property
def IsRadius(self):
return self.radius_ph is not None
@property
def IsDistance(self):
return self.distance is not None
@property
def IsRadiusDist(self):
return self.IsRadius and self.IsDistance
@property
def Freq(self):
if self._sp is None:
raise ValueError("Spectrum has not been defined. ")
if self.IsRedshift:
return self._sp.Freq / (1. + self.z) # redshift the flux
else:
return self._sp.Freq
@property
def Wl(self):
if self._sp is None:
raise ValueError("Spectrum has not been defined. ")
return phys.c / self.Freq
@property
def Flux(self):
if self._sp is None:
raise ValueError("Spectrum has not been defined. ")
flux = self._sp.Flux * self._magnification
if self.IsRedshift:
return Star.flux_to_redshift(flux, self.z)
else:
return flux
@property
def Flux_wl(self):
return self.Flux * self.Freq ** 2 / phys.c # flux [erg/cm^2/cm) ]
@property
def Luminosity(self):
if self.is_flux_eq_luminosity:
return self.Flux
if self.radius_ph is None:
raise ValueError("Photospheric radius has not been defined. ")
return 4. * np.pi * self.radius_ph ** 2 * self.Flux
@property
def FluxObs(self):
if self.IsRadiusDist:
return self.Luminosity / (4 * np.pi * self.distance ** 2)
elif self.IsDistance:
return self.Flux / (4 * np.pi * self.distance ** 2)
# return self.Flux / (4 * np.pi * self.distance ** 2)
else:
return self.Flux
@property
def FluxWlObs(self):
if self.IsRadiusDist:
if self.is_flux_eq_luminosity:
return self.Flux_wl / (4 * np.pi * self.distance ** 2)
else:
return self.Flux_wl * (self.radius_ph / self.distance) ** 2
elif self.IsDistance:
return self.Flux_wl / (4 * np.pi * self.distance ** 2)
else:
return self.Flux_wl
@property
def FluxAB(self):
return -2.5 * np.log10(self.FluxObs) + phys.ZP_AB
def _response_lmb(self, band, is_b_spline=True):
"""
Compute response flux using provided spectral band
:param band: photometric band
:param is_b_spline: the method of interpolation
:return: :raise ValueError:
"""
from scipy import integrate
from scipy import interpolate
wl = self.Wl
if min(wl) > band.wl[0] or max(wl) < band.wl[-1]:
raise ValueError("Spectrum must be wider then band: " + str(band))
flux = self.FluxWlObs / phys.cm_to_angs # to flux [erg/cm^2/A) ]
wl_s = wl * phys.cm_to_angs
wl_b = band.wl * phys.cm_to_angs
if is_b_spline:
tck = interpolate.splrep(wl_s, flux, s=0)
flux_spline = interpolate.splev(wl_b, tck, der=0)
else:
flux_spline = np.interp(wl_b, wl_s, flux, 0, 0) # One-dimensional linear interpolation.
a = integrate.simps(flux_spline * band.resp_wl * wl_b, wl_b) / (phys.c * phys.cm_to_angs) / phys.h
return a
def magAB(self, b, kind='spline'): # kind='spline' log line
response = Band.response_nu(self.Freq, self.FluxObs, b)
if response <= 0:
raise ValueError("Spectrum should be more 0: %f" % response)
# mag = -2.5 * np.log10(conv) + phys.ZP_AB - band.zp
mag = Flux2MagAB(response / b.Norm) - b.zp
# print('mag= ', mag)
return mag
#
# # todo check
# response1 = b.response(self.Wl, self.FluxWlObs, kind=kind, is_out2zero=True, is_photons=True) #
# if response1 <= 0:
# raise ValueError("The Response of Spectrum should be > 0: %f" % response1)
#
# # norm = b.response(b.wl, np.ones(len(b.wl)), kind='spline')
# mag1 = -2.5 * np.log10(response1 / b.NormWl) - 21.1 - b.zp
# # mag1 = -2.5 * np.log10(response1 ) - 21.1 - b.zp
# return mag1
def magBol(self, b, kind='spline'): # kind='spline' log line
"""
Bolometric magnitude via Luminosity of Sun
:return:
"""
from scipy.integrate import simps
lum = Band.response_nu(self.Freq, self.Flux, b, is_freq_norm=False)
M = phys.Mag_sun + 5. * np.log10(self.distance/phys.pc) - 5
bol = M - 2.5 * np.log10(np.abs(lum) / phys.L_sun)
# print('bol= ', bol)
return bol
def magBolOld(self):
"""
Bolometric magnitude via Luminosity of Sun
:return:
"""
from scipy.integrate import simps
lum = simps(self.Flux[::-1], self.Freq[::-1])
# lum = np.trapz(self.Flux[::-1], self.Freq[::-1])
M = phys.Mag_sun + 5. * np.log10(self.distance/phys.pc) - 5
bol = M - 2.5 * np.log10(np.abs(lum) / phys.L_sun)
# print('bol= ', bol)
return bol
def k_cor(self, band_r, band_o, z=0.):
"""
Compute K-correction for observed and rest-frame bands.
Args:
band_r: Rest-frame band.
band_o: Observed band.
z: redshift
Returns:
* K: K-correction
* If failed return None
"""
# todo make k-correction with b-splinesec
if z > 0:
self.set_redshift(z)
z_o = z
if self.IsRedshift:
z_o = self.z
self.set_redshift(0.)
resp_0 = self._response_lmb(band_r, is_b_spline=False)
self.set_redshift(z_o)
resp_z = self._response_lmb(band_o, is_b_spline=False)
if resp_0 < 0 or resp_z <= 0:
return None
else:
kcor = -2.5 * np.log10(resp_z / resp_0 / (1 + z_o)) + band_r.zp - band_o.zp
return kcor
@staticmethod
def flux_to_redshift(flux, z):
if z <= 0.:
return flux
flux_z = flux * (1.+z)
# flux_z = flux / (1.+z)
# flux_z = flux
# flux_z = np.interp(freq / (1. + z), freq[::-1], flux[::-1])
return flux_z
| [
"numpy.abs",
"pystella.rf.Band.response_nu",
"numpy.interp",
"pystella.rf.rad_func.Flux2MagAB",
"scipy.interpolate.splev",
"numpy.log10",
"scipy.interpolate.splrep",
"scipy.integrate.simps"
] | [((4938, 4982), 'pystella.rf.Band.response_nu', 'Band.response_nu', (['self.Freq', 'self.FluxObs', 'b'], {}), '(self.Freq, self.FluxObs, b)\n', (4954, 4982), False, 'from pystella.rf import Band\n'), ((5952, 6013), 'pystella.rf.Band.response_nu', 'Band.response_nu', (['self.Freq', 'self.Flux', 'b'], {'is_freq_norm': '(False)'}), '(self.Freq, self.Flux, b, is_freq_norm=False)\n', (5968, 6013), False, 'from pystella.rf import Band\n'), ((6365, 6404), 'scipy.integrate.simps', 'simps', (['self.Flux[::-1]', 'self.Freq[::-1]'], {}), '(self.Flux[::-1], self.Freq[::-1])\n', (6370, 6404), False, 'from scipy.integrate import simps\n'), ((4514, 4549), 'scipy.interpolate.splrep', 'interpolate.splrep', (['wl_s', 'flux'], {'s': '(0)'}), '(wl_s, flux, s=0)\n', (4532, 4549), False, 'from scipy import interpolate\n'), ((4576, 4611), 'scipy.interpolate.splev', 'interpolate.splev', (['wl_b', 'tck'], {'der': '(0)'}), '(wl_b, tck, der=0)\n', (4593, 4611), False, 'from scipy import interpolate\n'), ((4652, 4685), 'numpy.interp', 'np.interp', (['wl_b', 'wl_s', 'flux', '(0)', '(0)'], {}), '(wl_b, wl_s, flux, 0, 0)\n', (4661, 4685), True, 'import numpy as np\n'), ((5158, 5187), 'pystella.rf.rad_func.Flux2MagAB', 'Flux2MagAB', (['(response / b.Norm)'], {}), '(response / b.Norm)\n', (5168, 5187), False, 'from pystella.rf.rad_func import Flux2MagAB\n'), ((3782, 3804), 'numpy.log10', 'np.log10', (['self.FluxObs'], {}), '(self.FluxObs)\n', (3790, 3804), True, 'import numpy as np\n'), ((4740, 4796), 'scipy.integrate.simps', 'integrate.simps', (['(flux_spline * band.resp_wl * wl_b)', 'wl_b'], {}), '(flux_spline * band.resp_wl * wl_b, wl_b)\n', (4755, 4796), False, 'from scipy import integrate\n'), ((6046, 6079), 'numpy.log10', 'np.log10', (['(self.distance / phys.pc)'], {}), '(self.distance / phys.pc)\n', (6054, 6079), True, 'import numpy as np\n'), ((6496, 6529), 'numpy.log10', 'np.log10', (['(self.distance / phys.pc)'], {}), '(self.distance / phys.pc)\n', (6504, 6529), True, 'import numpy as np\n'), ((6115, 6126), 'numpy.abs', 'np.abs', (['lum'], {}), '(lum)\n', (6121, 6126), True, 'import numpy as np\n'), ((6565, 6576), 'numpy.abs', 'np.abs', (['lum'], {}), '(lum)\n', (6571, 6576), True, 'import numpy as np\n'), ((7425, 7462), 'numpy.log10', 'np.log10', (['(resp_z / resp_0 / (1 + z_o))'], {}), '(resp_z / resp_0 / (1 + z_o))\n', (7433, 7462), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, group_factor=8, upsample=True):
super(ResBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.upsample = upsample
self.gn1 = nn.GroupNorm(in_channels // group_factor, in_channels)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.gn2 = nn.GroupNorm(out_channels // group_factor, out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.activation = nn.ReLU(inplace=True)
if self.in_channels != self.out_channels:
self.conv_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
h = self.activation(self.gn1(x))
if self.upsample:
h = F.interpolate(h, scale_factor=2, mode='nearest')
x = F.interpolate(x, scale_factor=2, mode='nearest')
h = self.conv1(h)
h = self.activation(self.gn2(h))
h = self.conv2(h)
if self.in_channels != self.out_channels:
x = self.conv_sc(x)
return x + h
class ResDecoder(nn.Module):
def __init__(self, zdim=256, cout=2, size=128, nf=32, gn_base=8, activation=nn.Sigmoid):
super(ResDecoder, self).__init__()
extra = int(np.log2(size) - 6)
for i in range(extra):
nf *= 2
self.linear = nn.Linear(zdim, nf*8)
## upsampling
network = [
nn.Conv2d(nf*8+2, nf*8, kernel_size=1, stride=1, padding=0, bias=False),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(nf*8, nf*8, kernel_size=4, stride=1, padding=0, bias=False), # 1x1 -> 4x4
nn.ReLU(inplace=True),
nn.Conv2d(nf*8, nf*8, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(nf*8, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 4x4 -> 8x8
nn.GroupNorm(nf*2, nf*8),
nn.ReLU(inplace=True),
nn.Conv2d(nf*8, nf*8, kernel_size=3, stride=1, padding=1, bias=False),
ResBlock(nf*8, nf*4, upsample=True), # 16
ResBlock(nf*4, nf*2, upsample=True), # 32
ResBlock(nf*2, nf, upsample=True)] # 64
for i in range(extra):
nf = nf // 2
network += [ResBlock(nf*2, nf, upsample=True)]
network += [
nn.GroupNorm(nf // 4, nf),
nn.ReLU(inplace=True),
nn.Conv2d(nf, cout, kernel_size=5, stride=1, padding=2, bias=False)]
if activation is not None:
network += [activation()]
self.network = nn.Sequential(*network)
def forward(self, input, pose):
x = self.linear(input)
x = torch.cat([x, pose], dim=-1)
return self.network(x[...,None,None])
| [
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.Sequential",
"numpy.log2",
"torch.nn.Conv2d",
"torch.cat",
"torch.nn.GroupNorm",
"torch.nn.Linear",
"torch.nn.functional.interpolate"
] | [((373, 427), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(in_channels // group_factor)', 'in_channels'], {}), '(in_channels // group_factor, in_channels)\n', (385, 427), True, 'import torch.nn as nn\n'), ((449, 537), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=3, stride=1, padding=1,\n bias=False)\n', (458, 537), True, 'import torch.nn as nn\n'), ((553, 609), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(out_channels // group_factor)', 'out_channels'], {}), '(out_channels // group_factor, out_channels)\n', (565, 609), True, 'import torch.nn as nn\n'), ((631, 720), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(out_channels, out_channels, kernel_size=3, stride=1, padding=1,\n bias=False)\n', (640, 720), True, 'import torch.nn as nn\n'), ((743, 764), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (750, 764), True, 'import torch.nn as nn\n'), ((1593, 1616), 'torch.nn.Linear', 'nn.Linear', (['zdim', '(nf * 8)'], {}), '(zdim, nf * 8)\n', (1602, 1616), True, 'import torch.nn as nn\n'), ((2845, 2868), 'torch.nn.Sequential', 'nn.Sequential', (['*network'], {}), '(*network)\n', (2858, 2868), True, 'import torch.nn as nn\n'), ((2949, 2977), 'torch.cat', 'torch.cat', (['[x, pose]'], {'dim': '(-1)'}), '([x, pose], dim=-1)\n', (2958, 2977), False, 'import torch\n'), ((842, 893), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)'}), '(in_channels, out_channels, kernel_size=1)\n', (851, 893), True, 'import torch.nn as nn\n'), ((1004, 1052), 'torch.nn.functional.interpolate', 'F.interpolate', (['h'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(h, scale_factor=2, mode='nearest')\n", (1017, 1052), True, 'import torch.nn.functional as F\n'), ((1069, 1117), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(x, scale_factor=2, mode='nearest')\n", (1082, 1117), True, 'import torch.nn.functional as F\n'), ((1669, 1746), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 8 + 2)', '(nf * 8)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(nf * 8 + 2, nf * 8, kernel_size=1, stride=1, padding=0, bias=False)\n', (1678, 1746), True, 'import torch.nn as nn\n'), ((1754, 1775), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1761, 1775), True, 'import torch.nn as nn\n'), ((1789, 1876), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 8)', '(nf * 8)'], {'kernel_size': '(4)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(nf * 8, nf * 8, kernel_size=4, stride=1, padding=0, bias\n =False)\n', (1807, 1876), True, 'import torch.nn as nn\n'), ((1895, 1916), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1902, 1916), True, 'import torch.nn as nn\n'), ((1930, 2003), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 8)', '(nf * 8)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(nf * 8, nf * 8, kernel_size=3, stride=1, padding=1, bias=False)\n', (1939, 2003), True, 'import torch.nn as nn\n'), ((2013, 2034), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2020, 2034), True, 'import torch.nn as nn\n'), ((2048, 2135), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 8)', '(nf * 8)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(nf * 8, nf * 8, kernel_size=4, stride=2, padding=1, bias\n =False)\n', (2066, 2135), True, 'import torch.nn as nn\n'), ((2154, 2182), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(nf * 2)', '(nf * 8)'], {}), '(nf * 2, nf * 8)\n', (2166, 2182), True, 'import torch.nn as nn\n'), ((2192, 2213), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2199, 2213), True, 'import torch.nn as nn\n'), ((2227, 2300), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 8)', '(nf * 8)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(nf * 8, nf * 8, kernel_size=3, stride=1, padding=1, bias=False)\n', (2236, 2300), True, 'import torch.nn as nn\n'), ((2606, 2631), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(nf // 4)', 'nf'], {}), '(nf // 4, nf)\n', (2618, 2631), True, 'import torch.nn as nn\n'), ((2645, 2666), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2652, 2666), True, 'import torch.nn as nn\n'), ((2680, 2747), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'cout'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': '(False)'}), '(nf, cout, kernel_size=5, stride=1, padding=2, bias=False)\n', (2689, 2747), True, 'import torch.nn as nn\n'), ((1501, 1514), 'numpy.log2', 'np.log2', (['size'], {}), '(size)\n', (1508, 1514), True, 'import numpy as np\n')] |
import functools
import itertools
import operator
import numpy as np
from qecsim.model import StabilizerCode, cli_description
from qecsim.models.rotatedtoric import RotatedToricPauli
@cli_description('Rotated toric (rows INT even >= 2, cols INT even >= 2)')
class RotatedToricCode(StabilizerCode):
r"""
Implements a rotated toric code defined by its lattice size.
In addition to the members defined in :class:`qecsim.model.StabilizerCode`, it provides several lattice methods as
described below.
Lattice methods:
* Get size: :meth:`size`.
* Get plaquette type: :meth:`is_x_plaquette`, :meth:`is_z_plaquette`.
* Get and test bounds: :meth:`bounds`, :meth:`is_in_bounds`.
* Resolve a syndrome to plaquettes: :meth:`syndrome_to_plaquette_indices`.
* Find shortest translation between plaquettes: :meth:`translation`.
* Construct a Pauli operator on the lattice: :meth:`new_pauli`.
Indices:
* Indices are in the format (x, y).
* Qubit sites (vertices) are indexed by (x, y) coordinates with the origin at the lower left qubit.
* Stabilizer plaquettes are indexed by (x, y) coordinates such that the lower left corner of the plaquette is on the
qubit site at (x, y).
* X-type stabilizer plaquette indices satisfy (x-y) % 2 == 1.
* Z-type stabilizer plaquette indices satisfy (x-y) % 2 == 0.
For example, qubit site indices on a 4 x 4 lattice:
::
| | | |
| | | |
| | | |
(0,3)-----(1,3)-----(2,3)-----(3,3)-----
| | | |
| | | |
| | | |
(0,2)-----(1,2)-----(2,2)-----(3,2)-----
| | | |
| | | |
| | | |
(0,1)-----(1,1)-----(2,1)-----(3,1)-----
| | | |
| | | |
| | | |
(0,0)-----(1,0)-----(2,0)-----(3,0)-----
For example, stabilizer plaquette types and indices on a 4 x 4 lattice:
::
| X | Z | X | Z
| (0,3) | (1,3) | (2,3) | (3,3)
| | | |
+---------+---------+---------+-------
| Z | X | Z | X
| (0,2) | (1,2) | (2,2) | (3,2)
| | | |
+---------+---------+---------+-------
| X | Z | X | Z
| (0,1) | (1,1) | (2,1) | (3,1)
| | | |
+---------+---------+---------+-------
| Z | X | Z | X
| (0,0) | (1,0) | (2,0) | (3,0)
| | | |
+---------+---------+---------+-------
"""
MIN_SIZE = (2, 2)
def __init__(self, rows, columns):
"""
Initialise new rotated toric code.
:param rows: Number of rows in lattice.
:type rows: int
:param columns: Number of columns in lattice.
:type columns: int
:raises ValueError: if (rows, columns) smaller than (2, 2) in either dimension.
:raises ValueError: if rows or columns are odd.
:raises TypeError: if any parameter is of an invalid type.
"""
min_rows, min_cols = self.MIN_SIZE
try: # paranoid checking for CLI. (operator.index ensures the parameter can be treated as an int)
if operator.index(rows) < min_rows or operator.index(columns) < min_cols:
raise ValueError('{} minimum size is {}.'.format(type(self).__name__, self.MIN_SIZE))
if rows % 2 or columns % 2:
raise ValueError('{} dimensions must be even.'.format(type(self).__name__))
except TypeError as ex:
raise TypeError('{} invalid parameter type'.format(type(self).__name__)) from ex
self._size = rows, columns
# < StabilizerCode interface methods >
@property
@functools.lru_cache()
def n_k_d(self):
"""See :meth:`qecsim.model.StabilizerCode.n_k_d`"""
# n = r*c, k = 1, d = min(r, c)
rows, cols = self.size
return rows * cols, 2, min(rows, cols)
@property
def label(self):
"""See :meth:`qecsim.model.StabilizerCode.label`"""
return 'Rotated toric {}x{}'.format(*self.size)
@property
@functools.lru_cache()
def stabilizers(self):
"""See :meth:`qecsim.model.StabilizerCode.stabilizers`"""
return np.array([self.new_pauli().plaquette(i).to_bsf() for i in self._plaquette_indices])
@property
@functools.lru_cache()
def logical_xs(self):
"""See :meth:`qecsim.model.StabilizerCode.logical_xs`"""
return np.array([self.new_pauli().logical_x1().to_bsf(), self.new_pauli().logical_x2().to_bsf()])
@property
@functools.lru_cache()
def logical_zs(self):
"""See :meth:`qecsim.model.StabilizerCode.logical_zs`"""
return np.array([self.new_pauli().logical_z1().to_bsf(), self.new_pauli().logical_z2().to_bsf()])
# </ StabilizerCode interface methods >
@property
def size(self):
"""
Size of the lattice in format (rows, columns), e.g. (4, 4).
:rtype: 2-tuple of int
"""
return self._size
@classmethod
def is_x_plaquette(cls, index):
"""
Return True if the plaquette index specifies an X-type plaquette, irrespective of lattice bounds.
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index specifies an X-type plaquette.
:rtype: bool
"""
x, y = index
return (x - y) % 2 == 1
@classmethod
def is_z_plaquette(cls, index):
"""
Return True if the plaquette index specifies an Z-type plaquette, irrespective of lattice bounds.
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index specifies an Z-type plaquette.
:rtype: bool
"""
return not cls.is_x_plaquette(index)
@property
def bounds(self):
"""
Maximum x and y value that an index coordinate can take.
:rtype: 2-tuple of int
"""
# max_row, max_col
rows, cols = self.size
return cols - 1, rows - 1 # max_x, max_y
def is_in_bounds(self, index):
"""
Return True if the index is within lattice bounds inclusive.
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index is within lattice bounds inclusive.
:rtype: bool
"""
x, y = index
max_x, max_y = self.bounds
return 0 <= x <= max_x and 0 <= y <= max_y
@property
@functools.lru_cache()
def _plaquette_indices(self):
"""
Return a list of the plaquette indices of the lattice.
Notes:
* Each index is in the format (x, y).
* Indices are in order of increasing type, y, x. (Z-type first)
:return: List of indices in the format (x, y).
:rtype: list of 2-tuple of int
"""
max_x, max_y = self.bounds
z_plaquette_indices, x_plaquette_indices = [], []
for y in range(max_y + 1):
for x in range(max_x + 1):
index = x, y
if self.is_z_plaquette(index):
z_plaquette_indices.append(index)
else:
x_plaquette_indices.append(index)
return list(itertools.chain(z_plaquette_indices, x_plaquette_indices))
def syndrome_to_plaquette_indices(self, syndrome):
"""
Returns the indices of the plaquettes associated with the non-commuting stabilizers identified by the syndrome.
:param syndrome: Binary vector identifying commuting and non-commuting stabilizers by 0 and 1 respectively.
:type syndrome: numpy.array (1d)
:return: Set of plaquette indices.
:rtype: set of 2-tuple of int
"""
return set(tuple(index) for index in np.array(self._plaquette_indices)[syndrome.nonzero()])
def translation(self, a_index, b_index):
"""
Evaluate the shortest taxi-cab translation from plaquette A to plaquette B in format (x_steps, y_steps).
Notes:
* Indices are in the format (x, y).
* Indices are modulo lattice dimensions, i.e. on a (2, 2) lattice, (2, -1) indexes the same site as (0, 1).
* Both plaquettes must be of the same type, i.e. X or Z.
* Negative x_steps / y_steps indicate steps in the direction of decreasing index.
:param a_index: Plaquette index as (x, y).
:type a_index: (int, int)
:param b_index: Plaquette index as (x, y).
:type b_index: (int, int)
:return: Taxi-cab translation between plaquettes.
:rtype: 2-tuple of int
:raises IndexError: If plaquettes are not of the same type (i.e. X or Z).
"""
# check both plaquettes are the same type
if self.is_z_plaquette(a_index) != self.is_z_plaquette(b_index):
raise IndexError('Path undefined between plaquettes of different types: {}, {}.'.format(a_index, b_index))
# dimensions
dim_y, dim_x = self.size
# indices modulo dimensions
a_x, a_y = np.mod(a_index, (dim_x, dim_y))
b_x, b_y = np.mod(b_index, (dim_x, dim_y))
# cardinal steps from A to B
steps_north = (b_y - a_y) % dim_y
steps_south = (a_y - b_y) % dim_y
steps_east = (b_x - a_x) % dim_x
steps_west = (a_x - b_x) % dim_x
# translation steps from A to B
x_steps = steps_east if steps_east <= steps_west else -steps_west
y_steps = steps_north if steps_north <= steps_south else -steps_south
return x_steps, y_steps
def __eq__(self, other):
if type(other) is type(self):
return self._size == other._size
return NotImplemented
def __hash__(self):
return hash(self._size)
def __repr__(self):
return '{}({!r}, {!r})'.format(type(self).__name__, *self.size)
def ascii_art(self, syndrome=None, pauli=None, plaquette_labels=None, site_labels=None):
"""
Return ASCII art style lattice showing primal lattice lines with syndrome bits and Pauli operators as given.
Notes:
* Optional plaquette_labels override syndrome. (Out of bound indices are ignored.)
* Optional site_labels override pauli. (Out of bound indices are ignored.)
:param syndrome: Syndrome (optional) as binary vector.
:type syndrome: numpy.array (1d)
:param pauli: Rotated toric Pauli (optional)
:type pauli: RotatedToricPauli
:param plaquette_labels: Dictionary of plaquette indices as (x, y) to single-character labels (optional).
:type plaquette_labels: dict of (int, int) to char
:param site_labels: Dictionary of site indices as (x, y) to single-character labels (optional).
:type site_labels: dict of (int, int) to char
:return: ASCII art style lattice.
:rtype: str
"""
# See https://unicode-table.com/en/blocks/box-drawing/ for box-drawing unicode characters
max_x, max_y = self.bounds
syndrome_indices = set() if syndrome is None else self.syndrome_to_plaquette_indices(syndrome)
pauli = self.new_pauli() if pauli is None else pauli
plaquette_labels = {} if plaquette_labels is None else plaquette_labels
site_labels = {} if site_labels is None else site_labels
# Build row templates
# e.g. (where @=plaquette, o=site):
#
# |#@#| @ |#@#| @ :plaquette_row_odd
# o---o---o---o--- :site_row
# | @ |#@#| @ |#@# :plaquette_row_even
# o---o---o---o--- :site_row
# |#@#| @ |#@#| @ :plaquette_row_odd
# o---o---o---o--- :site_row
# | @ |#@#| @ |#@# :plaquette_row_even
# o---o---o---o--- :site_row
#
# Common chars
c_dot = chr(0x00B7)
c_dash = chr(0x2500)
c_bar = chr(0x2502)
c_shade = chr(0x2591)
# Common char sequences
cs_px = c_bar + c_shade + '{}' + c_shade # '|#{}#'
cs_pz = c_bar + ' {} ' # '| {} '
cs_s = '{}' + c_dash * 3 # '{}---'
# |#@#| @ |#@#| @
t_plaquette_row_odd = ''.join(([cs_px, cs_pz] * (max_x + 1))[:max_x + 1])
# o---o---o---o---
t_site_row = cs_s * (max_x + 1)
# | @ |#@#| @ |#@#
t_plaquette_row_even = ''.join(([cs_pz, cs_px] * (max_x + 1))[:max_x + 1])
# Parameter extraction functions
def _site_parameters(y):
indices = [i for i in ((x, y) for x in range(max_x + 1))]
parameters = []
for i in indices:
if i in site_labels:
parameters.append(site_labels[i])
else:
op = pauli.operator(i)
parameters.append(c_dot if op == 'I' else op)
return parameters
def _plaquette_parameters(y):
indices = [i for i in ((x, y) for x in range(0, max_x + 1))]
parameters = []
for i in indices:
is_z_plaquette = self.is_z_plaquette(i)
if i in plaquette_labels:
parameters.append(plaquette_labels[i])
elif i in syndrome_indices:
parameters.append('Z' if is_z_plaquette else 'X')
else:
parameters.append(' ' if is_z_plaquette else c_shade)
return parameters
# Append templates to text with parameters
text = []
for y in range(max_y, -1, -1):
if y % 2 == 0:
text.append(t_plaquette_row_even.format(*_plaquette_parameters(y)))
else:
text.append(t_plaquette_row_odd.format(*_plaquette_parameters(y)))
text.append(t_site_row.format(*_site_parameters(y)))
return '\n'.join(text)
def new_pauli(self, bsf=None):
"""
Convenience constructor of rotated toric Pauli for this code.
Notes:
* For performance reasons, the new Pauli is a view of the given bsf. Modifying one will modify the other.
:param bsf: Binary symplectic representation of Pauli. (Optional. Defaults to identity.)
:type bsf: numpy.array (1d)
:return: Rotated toric Pauli
:rtype: RotatedToricPauli
"""
return RotatedToricPauli(self, bsf)
| [
"qecsim.model.cli_description",
"operator.index",
"numpy.mod",
"numpy.array",
"functools.lru_cache",
"itertools.chain",
"qecsim.models.rotatedtoric.RotatedToricPauli"
] | [((188, 261), 'qecsim.model.cli_description', 'cli_description', (['"""Rotated toric (rows INT even >= 2, cols INT even >= 2)"""'], {}), "('Rotated toric (rows INT even >= 2, cols INT even >= 2)')\n", (203, 261), False, 'from qecsim.model import StabilizerCode, cli_description\n'), ((4165, 4186), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4184, 4186), False, 'import functools\n'), ((4558, 4579), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4577, 4579), False, 'import functools\n'), ((4792, 4813), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4811, 4813), False, 'import functools\n'), ((5031, 5052), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (5050, 5052), False, 'import functools\n'), ((6980, 7001), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (6999, 7001), False, 'import functools\n'), ((9552, 9583), 'numpy.mod', 'np.mod', (['a_index', '(dim_x, dim_y)'], {}), '(a_index, (dim_x, dim_y))\n', (9558, 9583), True, 'import numpy as np\n'), ((9603, 9634), 'numpy.mod', 'np.mod', (['b_index', '(dim_x, dim_y)'], {}), '(b_index, (dim_x, dim_y))\n', (9609, 9634), True, 'import numpy as np\n'), ((14776, 14804), 'qecsim.models.rotatedtoric.RotatedToricPauli', 'RotatedToricPauli', (['self', 'bsf'], {}), '(self, bsf)\n', (14793, 14804), False, 'from qecsim.models.rotatedtoric import RotatedToricPauli\n'), ((7746, 7803), 'itertools.chain', 'itertools.chain', (['z_plaquette_indices', 'x_plaquette_indices'], {}), '(z_plaquette_indices, x_plaquette_indices)\n', (7761, 7803), False, 'import itertools\n'), ((3636, 3656), 'operator.index', 'operator.index', (['rows'], {}), '(rows)\n', (3650, 3656), False, 'import operator\n'), ((3671, 3694), 'operator.index', 'operator.index', (['columns'], {}), '(columns)\n', (3685, 3694), False, 'import operator\n'), ((8289, 8322), 'numpy.array', 'np.array', (['self._plaquette_indices'], {}), '(self._plaquette_indices)\n', (8297, 8322), True, 'import numpy as np\n')] |
# bat_can.py
"""
BatCan - Battery Modeling in Cantera
This file reads in the user input, runs the simulation, and then produces any requested output (saved data file, preliminary plots, etc.)
"""
# Import modules
import importlib # allows us to import from user input string.
import numpy as np
from bat_can_init import initialize
# This is the main function that runs the model. We define it this way so it
# is called by "main," below:
def bat_can(input = None):
if input is None:
# Default is a single-particle model of graphite/LCO
input_file = 'inputs/spmGraphite_PorousSep_spmLCO_input.yaml'
else:
if input[-5:] == '.yaml':
input_file = 'inputs/'+input
# Strip the file extension:
input = input[:-4]
else:
input_file = 'inputs/'+input+'.yaml'
#===========================================================================
# READ IN USER INPUTS
#===========================================================================
an_inputs, sep_inputs, ca_inputs, parameters = initialize(input_file)
# Save name of input file, without path or extension:
parameters['input'] = input
#===========================================================================
# CREATE ELEMENT CLASSES AND INITIAL SOLUTION VECTOR SV_0
#===========================================================================
# For each element (anode 'an', separator 'sep', cathode 'ca') the 'class'
# variable from the inputs tells what kind of anode, separator, or cathode
# it is, and points to a '.py' file in this directory. We import that
# module, and then run its 'initialize' routine to create an intial
# solution vector and an object that stores needed parameters.
# import single_particle_electrode as an_module_0
an_module = importlib.import_module('electrode_models.'
+ an_inputs['class'])
an = an_module.electrode(input_file, an_inputs, sep_inputs, ca_inputs,
'anode', parameters, offset=0)
sep_module = importlib.import_module('separator_models.'
+ sep_inputs['class'])
sep = sep_module.separator(input_file, sep_inputs, parameters,
offset=an.n_vars)
# Check to see if the anode object needs to adjust the separator properties:
sep = an.adjust_separator(sep)
ca_module = importlib.import_module('electrode_models.'
+ ca_inputs['class'])
ca = ca_module.electrode(input_file, ca_inputs, sep_inputs, an_inputs,
'cathode', parameters, offset= an.n_vars+sep.n_vars*sep.n_points)
# Check to see if the cathode object needs to adjust the separator
# properties:
sep = ca.adjust_separator(sep)
# Initialize the solution vector:
SV_an_0 = an.initialize(an_inputs, sep_inputs)
SV_sep_0 = sep.initialize(sep_inputs)
SV_ca_0 = ca.initialize(ca_inputs, sep_inputs)
# Stack the three initial solution vectors into a single vector:
SV_0 = np.hstack([SV_an_0, SV_sep_0, SV_ca_0])
# Ditto for the algebraic variable indices:
algvars = np.hstack([an.algvars, sep.algvars, ca.algvars])
#===========================================================================
# RUN THE SIMULATION
#===========================================================================
# The inputs tell us what type of experiment we will simulate. Load the
# module, then call its 'run' function:
for sim in parameters['simulations']:
model = importlib.import_module('.'+sim['type'], package='simulations')
solution = model.run(SV_0, an, sep, ca, algvars, parameters, sim)
#=======================================================================
# CREATE FIGURES AND SAVE ALL OUTPUTS
#=======================================================================
# Call any output routines related to the simulation type:
model.output(solution, an, sep, ca, parameters, sim)
#===========================================================================
# FUNCTIONALITY TO RUN FROM THE COMMAND LINE
#===========================================================================
if __name__ == '__main__':
import argparse
# Currently, the only command line keyword enabled is --input, to specify
# the input file location:
parser = argparse.ArgumentParser()
parser.add_argument('--input')
args = parser.parse_args()
bat_can(args.input)
| [
"bat_can_init.initialize",
"argparse.ArgumentParser",
"importlib.import_module",
"numpy.hstack"
] | [((1095, 1117), 'bat_can_init.initialize', 'initialize', (['input_file'], {}), '(input_file)\n', (1105, 1117), False, 'from bat_can_init import initialize\n'), ((1881, 1946), 'importlib.import_module', 'importlib.import_module', (["('electrode_models.' + an_inputs['class'])"], {}), "('electrode_models.' + an_inputs['class'])\n", (1904, 1946), False, 'import importlib\n'), ((2097, 2163), 'importlib.import_module', 'importlib.import_module', (["('separator_models.' + sep_inputs['class'])"], {}), "('separator_models.' + sep_inputs['class'])\n", (2120, 2163), False, 'import importlib\n'), ((2408, 2473), 'importlib.import_module', 'importlib.import_module', (["('electrode_models.' + ca_inputs['class'])"], {}), "('electrode_models.' + ca_inputs['class'])\n", (2431, 2473), False, 'import importlib\n'), ((3027, 3066), 'numpy.hstack', 'np.hstack', (['[SV_an_0, SV_sep_0, SV_ca_0]'], {}), '([SV_an_0, SV_sep_0, SV_ca_0])\n', (3036, 3066), True, 'import numpy as np\n'), ((3129, 3177), 'numpy.hstack', 'np.hstack', (['[an.algvars, sep.algvars, ca.algvars]'], {}), '([an.algvars, sep.algvars, ca.algvars])\n', (3138, 3177), True, 'import numpy as np\n'), ((4400, 4425), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4423, 4425), False, 'import argparse\n'), ((3548, 3613), 'importlib.import_module', 'importlib.import_module', (["('.' + sim['type'])"], {'package': '"""simulations"""'}), "('.' + sim['type'], package='simulations')\n", (3571, 3613), False, 'import importlib\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 20:56:53 2019
@author: minghao
"""
import cv2
import numpy as np
def warpFrame(frame, ratio, src_points, dst_points):
frame_size = (int(ratio[0]*frame.shape[1]), int(ratio[1]*frame.shape[0]))
M = cv2.getPerspectiveTransform(src_points, dst_points)
Minv = cv2.getPerspectiveTransform(dst_points, src_points)
warped_frame = cv2.warpPerspective(frame, M, frame_size, flags=cv2.INTER_LINEAR)
return warped_frame, M, Minv
imgs_path = '/Users/mac/Documents/University/Github/Lane-detection-learning/imgs/'
img = cv2.imread(imgs_path+'um_000032.png') #00,32,63,81
def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)
cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 0), thickness=1)
cv2.imshow("image", img)
cv2.namedWindow("image")
cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
cv2.imshow("image", img)
src = np.float32([ [553,234], [679,234], [848,374], [420,374] ])
dst = np.float32([ [420,0], [848,0], [848,750], [420,750] ])
test_warp_image, M, Minv = warpFrame(img, (1,2), src, dst)
cv2.imshow('warp',test_warp_image)
def absSobelThreshold(img, orient='x', min_thre=60, max_thre=255):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if orient == 'x':
abs_sobel = np.absolute( cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute( cv2.Sobel(gray, cv2.CV_64F, 0, 1))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = cv2.inRange(scaled_sobel, min_thre, max_thre)/255
return binary_output
c = absSobelThreshold(test_warp_image)
cv2.imshow('c',c) | [
"cv2.warpPerspective",
"cv2.circle",
"cv2.putText",
"cv2.getPerspectiveTransform",
"cv2.cvtColor",
"numpy.float32",
"cv2.imread",
"numpy.max",
"cv2.setMouseCallback",
"cv2.imshow",
"cv2.inRange",
"cv2.Sobel",
"cv2.namedWindow"
] | [((610, 649), 'cv2.imread', 'cv2.imread', (["(imgs_path + 'um_000032.png')"], {}), "(imgs_path + 'um_000032.png')\n", (620, 649), False, 'import cv2\n'), ((991, 1015), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (1006, 1015), False, 'import cv2\n'), ((1016, 1067), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'on_EVENT_LBUTTONDOWN'], {}), "('image', on_EVENT_LBUTTONDOWN)\n", (1036, 1067), False, 'import cv2\n'), ((1068, 1092), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1078, 1092), False, 'import cv2\n'), ((1101, 1161), 'numpy.float32', 'np.float32', (['[[553, 234], [679, 234], [848, 374], [420, 374]]'], {}), '([[553, 234], [679, 234], [848, 374], [420, 374]])\n', (1111, 1161), True, 'import numpy as np\n'), ((1166, 1222), 'numpy.float32', 'np.float32', (['[[420, 0], [848, 0], [848, 750], [420, 750]]'], {}), '([[420, 0], [848, 0], [848, 750], [420, 750]])\n', (1176, 1222), True, 'import numpy as np\n'), ((1280, 1315), 'cv2.imshow', 'cv2.imshow', (['"""warp"""', 'test_warp_image'], {}), "('warp', test_warp_image)\n", (1290, 1315), False, 'import cv2\n'), ((1807, 1825), 'cv2.imshow', 'cv2.imshow', (['"""c"""', 'c'], {}), "('c', c)\n", (1817, 1825), False, 'import cv2\n'), ((281, 332), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src_points', 'dst_points'], {}), '(src_points, dst_points)\n', (308, 332), False, 'import cv2\n'), ((344, 395), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst_points', 'src_points'], {}), '(dst_points, src_points)\n', (371, 395), False, 'import cv2\n'), ((415, 480), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'M', 'frame_size'], {'flags': 'cv2.INTER_LINEAR'}), '(frame, M, frame_size, flags=cv2.INTER_LINEAR)\n', (434, 480), False, 'import cv2\n'), ((1393, 1430), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1405, 1430), False, 'import cv2\n'), ((792, 845), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(1)', '(255, 0, 0)'], {'thickness': '(-1)'}), '(img, (x, y), 1, (255, 0, 0), thickness=-1)\n', (802, 845), False, 'import cv2\n'), ((854, 939), 'cv2.putText', 'cv2.putText', (['img', 'xy', '(x, y)', 'cv2.FONT_HERSHEY_PLAIN', '(1.0)', '(0, 0, 0)'], {'thickness': '(1)'}), '(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0),\n thickness=1)\n', (865, 939), False, 'import cv2\n'), ((964, 988), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (974, 988), False, 'import cv2\n'), ((1692, 1737), 'cv2.inRange', 'cv2.inRange', (['scaled_sobel', 'min_thre', 'max_thre'], {}), '(scaled_sobel, min_thre, max_thre)\n', (1703, 1737), False, 'import cv2\n'), ((1486, 1519), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_64F', '(1)', '(0)'], {}), '(gray, cv2.CV_64F, 1, 0)\n', (1495, 1519), False, 'import cv2\n'), ((1576, 1609), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_64F', '(0)', '(1)'], {}), '(gray, cv2.CV_64F, 0, 1)\n', (1585, 1609), False, 'import cv2\n'), ((1653, 1670), 'numpy.max', 'np.max', (['abs_sobel'], {}), '(abs_sobel)\n', (1659, 1670), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
import nolds
import seaborn as sns
plt.ioff()
sns.set(font_scale=1, font="arial")
def dfa(nn=None, rpeaks=None, short=None, long=None, show=True, figsize=None, legend=True):
# Check input values
nn = Check_Input(nn, rpeaks)
# Check intervals
short = Check_Interval(short, default=(4, 16))
long = Check_Interval(long, default=(17, 64))
# Create arrays
short = range(short[0], short[1] + 1)
long = range(long[0], long[1] + 1)
# Prepare plot
if figsize is None:
figsize = (10, 10)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.set_title('Detrended Fluctuation Analysis (DFA)', fontsize=25)
ax.set_xlabel('log n [beats]',fontsize=20)
ax.set_ylabel('log F(n)',fontsize=20)
# try:
# Compute alpha values
try:
alpha1, dfa_short = nolds.dfa(nn, short, debug_data=True, overlap=False)
alpha2, dfa_long = nolds.dfa(nn, long, debug_data=True, overlap=False)
except ValueError:
# If DFA could not be conducted due to insufficient number of NNIs, return an empty graph and 'nan' for alpha1/2
warnings.warn("Not enough NNI samples for Detrended Fluctuations Analysis.")
ax.axis([0, 1, 0, 1])
ax.text(0.5, 0.5, '[Insufficient number of NNI samples for DFA]', horizontalalignment='center',
verticalalignment='center')
alpha1, alpha2 = 'nan', 'nan'
else:
# Plot DFA results if number of NNI were sufficent to conduct DFA
# Plot short term DFA
vals, flucts, poly = dfa_short[0], dfa_short[1], np.polyval(dfa_short[2], dfa_short[0])
label = r'$ \alpha_{1}: %0.2f$' % alpha1
ax.plot(vals, flucts, 'bo', markersize=1)
ax.plot(vals, poly, 'b', label=label, alpha=0.7)
# Plot long term DFA
vals, flucts, poly = dfa_long[0], dfa_long[1], np.polyval(dfa_long[2], dfa_long[0])
label = r'$ \alpha_{2}: %0.2f$' % alpha2
ax.plot(vals, flucts, 'go', markersize=1)
ax.plot(vals, poly, 'g', label=label, alpha=0.7)
# Add legend
if legend:
ax.legend()
ax.grid()
# Plot axis
# if show:
# plt.show()
# Output
# args = (fig, alpha1, alpha2, short, long)
# names = ('dfa_plot', 'dfa_alpha1', 'dfa_alpha2', 'dfa_alpha1_beats', 'dfa_alpha2_beats')
fig.savefig("static/Temporary/DFA_Temporary.png")
# Detrended_Fluctuation_Calculations = dict(zip(args, names))
plt.close()
return "Succesfully Saved DFA Plot"
def Check_Interval(interval=None, limits=None, default=None):
if interval is None and limits is None and default is None:
raise TypeError("No input data specified. Please verify your input data.")
# Create local copy to prevent changing input variable
interval = list(interval) if interval is not None else None
# Check default limits
if default is not None:
default = _check_limits(default, 'default')
# Check maximum range limits
if limits is None and default is not None:
limits = default
elif limits is not None:
limits = _check_limits(limits, 'limits')
# Check interval limits
if interval is None:
if default is not None:
return default
elif default is None and limits is not None:
return limits
# If only interval is specified, but not 'min', 'max' or 'default' check if lower limit >= upper limit
elif interval is not None and limits is None:
return _check_limits(interval, 'interval')
# If none of the input is 'None'
else:
# Check interval
interval = _check_limits(interval, 'interval')
if not limits[0] <= interval[0]:
interval[0] = limits[0]
warnings.warn("Interval limits out of boundaries. Interval set to: %s" % interval, stacklevel=2)
if not limits[1] >= interval[1]:
interval[1] = limits[1]
warnings.warn("Interval limits out of boundaries. Interval set to: %s" % interval, stacklevel=2)
return interval
def _check_limits(interval, name):
# upper limit < 0 or upper limit > max interval -> set upper limit to max
if interval[0] > interval[1]:
interval[0], interval[1] = interval[1], interval[0]
vals = (name, name, interval[0], interval[1])
warnings.warn("Corrected invalid '%s' limits (lower limit > upper limit).'%s' set to: %s" % vals)
if interval[0] == interval[1]:
raise ValueError("'%f': Invalid interval limits as they are equal." % name)
return interval
def Check_Input(nni=None, rpeaks=None):
# Check input
if nni is None and rpeaks is not None:
# Compute NN intervals if r_peaks array is given
nni = nn_intervals(rpeaks)
elif nni is not None:
# Use given NN intervals & confirm numpy
nni = nn_format(nni)
else:
raise TypeError("No R-peak data or NN intervals provided. Please specify input data.")
return nni
def nn_intervals(rpeaks=None):
# Check input signal
if rpeaks is None:
raise TypeError("No data for R-peak locations provided. Please specify input data.")
elif type(rpeaks) is not list and not np.ndarray:
raise TypeError("List, tuple or numpy array expected, received %s" % type(rpeaks))
# if all(isinstance(n, int) for n in rpeaks) is False or all(isinstance(n, float) for n in rpeaks) is False:
# raise TypeError("Incompatible data type in list or numpy array detected (only int or float allowed).")
# Confirm numpy arrays & compute NN intervals
rpeaks = np.asarray(rpeaks)
nn_int = np.zeros(rpeaks.size - 1)
for i in range(nn_int.size):
nn_int[i] = rpeaks[i + 1] - rpeaks[i]
return nn_format(nn_int)
def nn_format(nni=None):
# Check input
if nni is None:
raise TypeError("No input data provided for 'nn'. Please specify input data")
nn_ = np.asarray(nni, dtype='float64')
# Convert if data has been identified in [s], else proceed with ensuring the NumPy array format
if np.max(nn_) < 10:
nn_ = [int(x * 1000) for x in nn_]
return np.asarray(nn_) | [
"matplotlib.pyplot.ioff",
"numpy.polyval",
"matplotlib.pyplot.close",
"numpy.asarray",
"numpy.zeros",
"nolds.dfa",
"matplotlib.pyplot.figure",
"numpy.max",
"warnings.warn",
"seaborn.set"
] | [((128, 138), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (136, 138), True, 'import matplotlib.pyplot as plt\n'), ((140, 175), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1)', 'font': '"""arial"""'}), "(font_scale=1, font='arial')\n", (147, 175), True, 'import seaborn as sns\n'), ((635, 662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (645, 662), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2578), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2576, 2578), True, 'import matplotlib.pyplot as plt\n'), ((5424, 5442), 'numpy.asarray', 'np.asarray', (['rpeaks'], {}), '(rpeaks)\n', (5434, 5442), True, 'import numpy as np\n'), ((5453, 5478), 'numpy.zeros', 'np.zeros', (['(rpeaks.size - 1)'], {}), '(rpeaks.size - 1)\n', (5461, 5478), True, 'import numpy as np\n'), ((5723, 5755), 'numpy.asarray', 'np.asarray', (['nni'], {'dtype': '"""float64"""'}), "(nni, dtype='float64')\n", (5733, 5755), True, 'import numpy as np\n'), ((5922, 5937), 'numpy.asarray', 'np.asarray', (['nn_'], {}), '(nn_)\n', (5932, 5937), True, 'import numpy as np\n'), ((928, 980), 'nolds.dfa', 'nolds.dfa', (['nn', 'short'], {'debug_data': '(True)', 'overlap': '(False)'}), '(nn, short, debug_data=True, overlap=False)\n', (937, 980), False, 'import nolds\n'), ((1008, 1059), 'nolds.dfa', 'nolds.dfa', (['nn', 'long'], {'debug_data': '(True)', 'overlap': '(False)'}), '(nn, long, debug_data=True, overlap=False)\n', (1017, 1059), False, 'import nolds\n'), ((4250, 4357), 'warnings.warn', 'warnings.warn', (['("Corrected invalid \'%s\' limits (lower limit > upper limit).\'%s\' set to: %s" %\n vals)'], {}), '(\n "Corrected invalid \'%s\' limits (lower limit > upper limit).\'%s\' set to: %s"\n % vals)\n', (4263, 4357), False, 'import warnings\n'), ((5858, 5869), 'numpy.max', 'np.max', (['nn_'], {}), '(nn_)\n', (5864, 5869), True, 'import numpy as np\n'), ((1212, 1288), 'warnings.warn', 'warnings.warn', (['"""Not enough NNI samples for Detrended Fluctuations Analysis."""'], {}), "('Not enough NNI samples for Detrended Fluctuations Analysis.')\n", (1225, 1288), False, 'import warnings\n'), ((1676, 1714), 'numpy.polyval', 'np.polyval', (['dfa_short[2]', 'dfa_short[0]'], {}), '(dfa_short[2], dfa_short[0])\n', (1686, 1714), True, 'import numpy as np\n'), ((1956, 1992), 'numpy.polyval', 'np.polyval', (['dfa_long[2]', 'dfa_long[0]'], {}), '(dfa_long[2], dfa_long[0])\n', (1966, 1992), True, 'import numpy as np\n'), ((3726, 3826), 'warnings.warn', 'warnings.warn', (["('Interval limits out of boundaries. Interval set to: %s' % interval)"], {'stacklevel': '(2)'}), "('Interval limits out of boundaries. Interval set to: %s' %\n interval, stacklevel=2)\n", (3739, 3826), False, 'import warnings\n'), ((3888, 3988), 'warnings.warn', 'warnings.warn', (["('Interval limits out of boundaries. Interval set to: %s' % interval)"], {'stacklevel': '(2)'}), "('Interval limits out of boundaries. Interval set to: %s' %\n interval, stacklevel=2)\n", (3901, 3988), False, 'import warnings\n')] |
import sys
import os
import boto3
import random
import datetime
import math
import time
import numpy as np
from concurrent import futures
import sagemaker
from sagemaker import get_execution_role
from sagemaker.serializers import NumpySerializer
def one_thread(endpoint_name, feed_data):
global latency_list
global num_infer
global live
global num_error
sagemaker_session = sagemaker.Session()
role = get_execution_role()
pred = sagemaker.predictor.Predictor(endpoint_name)
pred.serializer = NumpySerializer()
# Warm up
for i in range(100):
output = pred.predict(feed_data)
feed_data.seek(0)
time.sleep(3)
# Predictions
while True:
start = time.time()
try:
pred.predict(feed_data)
except:
num_error += 1
pass
latency = time.time() - start
latency_list.append(latency*1000/throughput_interval)
feed_data.seek(0)
num_infer += batch_size
if not live:
break
def one_thread_boto3(endpoint_name, feed_data):
global latency_list
global num_infer
global live
global num_error
client = boto3.client('sagemaker-runtime')
# Warm up
for i in range(100):
client.invoke_endpoint(EndpointName=endpoint_name, Body=feed_data)
feed_data.seek(0)
time.sleep(3)
# Predictions
while True:
start = time.time()
try:
client.invoke_endpoint(EndpointName=endpoint_name, Body=feed_data)
except:
num_error += 1
pass
latency = time.time() - start
latency_list.append(latency*1000/throughput_interval)
feed_data.seek(0)
num_infer += batch_size
if not live:
break
def current_performance():
last_num_infer = num_infer
print(' TPS | P50 | P90 | P95 | P99 | err ')
for _ in range(throughput_time // throughput_interval):
current_num_infer = num_infer
throughput = (current_num_infer - last_num_infer) / throughput_interval
client_avg = 0.0
client_p50 = 0.0
client_p90 = 0.0
client_p95 = 0.0
client_p99 = 0.0
if latency_list:
client_avg = np.mean(latency_list[-latency_window_size:])
client_p50 = np.percentile(latency_list[-latency_window_size:], 50)
client_p90 = np.percentile(latency_list[-latency_window_size:], 90)
client_p95 = np.percentile(latency_list[-latency_window_size:], 95)
client_p99 = np.percentile(latency_list[-latency_window_size:], 99)
print('{:5.3f}|{:.5f}|{:.5f}|{:.5f}|{:.5f} |{:4d}'.format(throughput, client_p50, client_p90, client_p95, client_p99, int(num_error) ))
last_num_infer = current_num_infer
time.sleep(throughput_interval)
global live
live = False
def check_endpoint_exists(endpoint_name):
try:
client = boto3.client("sagemaker")
status = client.describe_endpoint(EndpointName=endpoint_name)['EndpointStatus']
if status == 'InService':
return True
else:
raise
except:
return False
def load_tester(num_thread, endpoint_name, filename, request_type):
global throughput_interval
throughput_interval = 10
global throughput_time
throughput_time = 200
global latency_window_size
latency_window_size = 1000
global batch_size
batch_size = 1
global live
live = True
global num_infer
num_infer = 0
global latency_list
latency_list = []
global num_error
num_error = 0
try:
assert check_endpoint_exists(endpoint_name)
except AssertionError:
print(f'The endpoint {endpoint_name} does not exist or is not available.')
else:
if request_type == 'sm':
print('Using SageMaker Python SDK for requests.')
else:
print('Using boto3 for requests.')
executor = futures.ThreadPoolExecutor(max_workers=num_thread+1)
executor.submit(current_performance)
if request_type == 'sm':
for pred in range(num_thread):
executor.submit(one_thread, endpoint_name, open(filename, 'rb'))
else:
for pred in range(num_thread):
executor.submit(one_thread_boto3, endpoint_name, open(filename, 'rb'))
executor.shutdown(wait=True)
if __name__ == '__main__':
num_thread = int(sys.argv[1]) # First cmd line argument: number of concurrent client threads (int)
endpoint_name = sys.argv[2] # Second command line argument: SageMaker Endpoint Name (str)
filename = sys.argv[3]
request_type = sys.argv[4]
load_tester(num_thread, endpoint_name, filename, request_type) | [
"sagemaker.predictor.Predictor",
"boto3.client",
"sagemaker.get_execution_role",
"time.sleep",
"sagemaker.serializers.NumpySerializer",
"time.time",
"numpy.percentile",
"numpy.mean",
"sagemaker.Session",
"concurrent.futures.ThreadPoolExecutor"
] | [((404, 423), 'sagemaker.Session', 'sagemaker.Session', ([], {}), '()\n', (421, 423), False, 'import sagemaker\n'), ((441, 461), 'sagemaker.get_execution_role', 'get_execution_role', ([], {}), '()\n', (459, 461), False, 'from sagemaker import get_execution_role\n'), ((473, 517), 'sagemaker.predictor.Predictor', 'sagemaker.predictor.Predictor', (['endpoint_name'], {}), '(endpoint_name)\n', (502, 517), False, 'import sagemaker\n'), ((540, 557), 'sagemaker.serializers.NumpySerializer', 'NumpySerializer', ([], {}), '()\n', (555, 557), False, 'from sagemaker.serializers import NumpySerializer\n'), ((673, 686), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (683, 686), False, 'import time\n'), ((1211, 1244), 'boto3.client', 'boto3.client', (['"""sagemaker-runtime"""'], {}), "('sagemaker-runtime')\n", (1223, 1244), False, 'import boto3\n'), ((1394, 1407), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1404, 1407), False, 'import time\n'), ((742, 753), 'time.time', 'time.time', ([], {}), '()\n', (751, 753), False, 'import time\n'), ((1463, 1474), 'time.time', 'time.time', ([], {}), '()\n', (1472, 1474), False, 'import time\n'), ((2881, 2912), 'time.sleep', 'time.sleep', (['throughput_interval'], {}), '(throughput_interval)\n', (2891, 2912), False, 'import time\n'), ((3019, 3044), 'boto3.client', 'boto3.client', (['"""sagemaker"""'], {}), "('sagemaker')\n", (3031, 3044), False, 'import boto3\n'), ((4067, 4121), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(num_thread + 1)'}), '(max_workers=num_thread + 1)\n', (4093, 4121), False, 'from concurrent import futures\n'), ((878, 889), 'time.time', 'time.time', ([], {}), '()\n', (887, 889), False, 'import time\n'), ((1642, 1653), 'time.time', 'time.time', ([], {}), '()\n', (1651, 1653), False, 'import time\n'), ((2315, 2359), 'numpy.mean', 'np.mean', (['latency_list[-latency_window_size:]'], {}), '(latency_list[-latency_window_size:])\n', (2322, 2359), True, 'import numpy as np\n'), ((2385, 2439), 'numpy.percentile', 'np.percentile', (['latency_list[-latency_window_size:]', '(50)'], {}), '(latency_list[-latency_window_size:], 50)\n', (2398, 2439), True, 'import numpy as np\n'), ((2465, 2519), 'numpy.percentile', 'np.percentile', (['latency_list[-latency_window_size:]', '(90)'], {}), '(latency_list[-latency_window_size:], 90)\n', (2478, 2519), True, 'import numpy as np\n'), ((2545, 2599), 'numpy.percentile', 'np.percentile', (['latency_list[-latency_window_size:]', '(95)'], {}), '(latency_list[-latency_window_size:], 95)\n', (2558, 2599), True, 'import numpy as np\n'), ((2625, 2679), 'numpy.percentile', 'np.percentile', (['latency_list[-latency_window_size:]', '(99)'], {}), '(latency_list[-latency_window_size:], 99)\n', (2638, 2679), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
sys.path.append('../../')
from Glucose.utils.utils import *
if not os.path.exists('../../dataset/'):
os.mkdir('../../dataset')
assert os.path.isfile(
'../../dataset/GLUCOSE.csv'), 'Please download the Glucose Dataset and Rename it to ' \
'GLUCOSE.csv, and then move it to the ../dataset/ folder.'
glucose = pd.read_csv('../../dataset/GLUCOSE.csv')
glucose_index2sentence = {i: glucose.loc[i, 'selected_sentence'] for i in glucose.index}
glucose_unique_sentences = glucose['selected_sentence'].unique()
glucose_sentence2uniqueID = {sentence: index for index, sentence in enumerate(glucose_unique_sentences)}
glucose_index2uniqueID = {i: glucose_sentence2uniqueID[glucose_index2sentence[i]] for i in glucose.index}
np.save('../../dataset/glucose_index2sentence.npy', glucose_index2sentence)
np.save('../../dataset/glucose_sentence2uniqueID.npy', glucose_sentence2uniqueID)
np.save('../../dataset/glucose_index2uniqueID.npy', glucose_index2uniqueID)
parsed_stru_dict = {'head': {}, 'tail': {}, 'head_id': {}, 'tail_id': {}}
for j in range(1, 11):
for k in parsed_stru_dict.keys():
parsed_stru_dict[k][j] = []
for i in tqdm(glucose.index):
for j in range(1, 11):
if glucose.loc[i, '{}_generalStructured'.format(j)] != 'escaped':
parts = glucose.loc[i, '{}_generalStructured'.format(j)].split('>')
parsed_stru_dict['head'][j].append(trim(parts[0]))
parsed_stru_dict['tail'][j].append(trim(parts[2]))
parsed_stru_dict['head_id'][j].append(glucose_sentence2uniqueID[glucose.loc[i, 'selected_sentence']])
parsed_stru_dict['tail_id'][j].append(glucose_sentence2uniqueID[glucose.loc[i, 'selected_sentence']])
np.save('../../dataset/Glucose_parsed_stru_dict', parsed_stru_dict)
print("Glucose is ready to use at ../../dataset/")
for i in range(1, 11):
print("List [{}]: Total Knowledge: {}\tUnique Knowledge: {}".format(i, len(parsed_stru_dict['head'][i]),
len(np.unique(parsed_stru_dict['head_id'][i]))))
| [
"sys.path.append",
"os.mkdir",
"tqdm.tqdm",
"numpy.save",
"pandas.read_csv",
"os.path.exists",
"os.path.isfile",
"numpy.unique"
] | [((84, 109), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (99, 109), False, 'import sys\n'), ((224, 267), 'os.path.isfile', 'os.path.isfile', (['"""../../dataset/GLUCOSE.csv"""'], {}), "('../../dataset/GLUCOSE.csv')\n", (238, 267), False, 'import os\n'), ((436, 476), 'pandas.read_csv', 'pd.read_csv', (['"""../../dataset/GLUCOSE.csv"""'], {}), "('../../dataset/GLUCOSE.csv')\n", (447, 476), True, 'import pandas as pd\n'), ((842, 917), 'numpy.save', 'np.save', (['"""../../dataset/glucose_index2sentence.npy"""', 'glucose_index2sentence'], {}), "('../../dataset/glucose_index2sentence.npy', glucose_index2sentence)\n", (849, 917), True, 'import numpy as np\n'), ((918, 1003), 'numpy.save', 'np.save', (['"""../../dataset/glucose_sentence2uniqueID.npy"""', 'glucose_sentence2uniqueID'], {}), "('../../dataset/glucose_sentence2uniqueID.npy',\n glucose_sentence2uniqueID)\n", (925, 1003), True, 'import numpy as np\n'), ((1000, 1075), 'numpy.save', 'np.save', (['"""../../dataset/glucose_index2uniqueID.npy"""', 'glucose_index2uniqueID'], {}), "('../../dataset/glucose_index2uniqueID.npy', glucose_index2uniqueID)\n", (1007, 1075), True, 'import numpy as np\n'), ((1258, 1277), 'tqdm.tqdm', 'tqdm', (['glucose.index'], {}), '(glucose.index)\n', (1262, 1277), False, 'from tqdm import tqdm\n'), ((1815, 1882), 'numpy.save', 'np.save', (['"""../../dataset/Glucose_parsed_stru_dict"""', 'parsed_stru_dict'], {}), "('../../dataset/Glucose_parsed_stru_dict', parsed_stru_dict)\n", (1822, 1882), True, 'import numpy as np\n'), ((152, 184), 'os.path.exists', 'os.path.exists', (['"""../../dataset/"""'], {}), "('../../dataset/')\n", (166, 184), False, 'import os\n'), ((190, 215), 'os.mkdir', 'os.mkdir', (['"""../../dataset"""'], {}), "('../../dataset')\n", (198, 215), False, 'import os\n'), ((2143, 2184), 'numpy.unique', 'np.unique', (["parsed_stru_dict['head_id'][i]"], {}), "(parsed_stru_dict['head_id'][i])\n", (2152, 2184), True, 'import numpy as np\n')] |
from typing import Any, Awaitable, Callable, Mapping, Optional, Protocol, Sequence, Tuple, Type, TypeVar
import argparse
import asyncio
import itertools
import logging
import logging.handlers
import math
import traceback
import httpx
import numpy as np
import toml
from binancebot import binance, trader, server
logger = logging.getLogger(__name__)
T = TypeVar("T")
async def suppress(exc_class: Type[BaseException], task: Awaitable[T]) -> Optional[T]:
try:
return await task
except exc_class:
logger.error(traceback.format_exc())
return None
class Task(Protocol):
def __call__(self, time: float, time_delta: float, step_count: int) -> Awaitable:
...
async def periodic(period: float, task: Task):
loop = asyncio.get_running_loop()
start = loop.time()
for i in itertools.count(1):
now = loop.time()
delta = max(start + i * period - now, 0.0)
await asyncio.gather(
suppress(BaseException, task(
time=now,
time_delta=delta,
step_count=i,
)), asyncio.sleep(delta)
)
def linear_least_squares(
phis: Sequence[Callable[[float], float]],
data: Sequence[Tuple[float, float]],
) -> Sequence[float]:
xs, ys = zip(*data)
X = np.array([[phi(x) for phi in phis] for x in xs], dtype=np.float64)
Y = np.array(ys, dtype=np.float64).reshape(-1, 1)
# https://en.wikipedia.org/wiki/Least_squares#Linear_least_squares
return np.linalg.solve(X.T @ X, X.T @ Y).ravel()
async def target_distribution(
assets: Sequence[trader.Asset],
value_asset: trader.Asset,
client: binance.BinanceClient,
period_ms: int,
window: binance.Interval,
beta: float,
) -> Mapping[trader.Asset, float]:
"""Weights assets higher based on the relative rate of change of their price."""
async def get_price_change(asset: trader.Asset) -> float:
if asset == value_asset:
return 0.0
candlesticks = await client.get_candlesticks(
base=asset, quote=value_asset, period_ms=period_ms, interval=window,
)
def average_price(candlestick: binance.Candlestick) -> float:
estimates = (
candlestick.high,
candlestick.low,
candlestick.close,
)
return float(sum(estimates) / len(estimates))
def t(candlestick: binance.Candlestick) -> float:
return (
(candlestick.close_time - candlesticks[0].close_time)
/ binance.Interval.ONE_DAY.milliseconds
)
# linear least squares fit in log space
# i.e. y = e^(mx + c) = da^x
phis = [lambda x: x, lambda x: 1.0]
betas = linear_least_squares(
phis, [(t(c), np.log(float(average_price(c)))) for c in candlesticks]
)
# log rate of change
return betas[0]
# def model(x):
# return np.exp(sum(beta * phi(x) for beta, phi in zip(betas, phis)))
# # candlesticks are sorted so -1 is most recent
# now = t(candlesticks[-1])
# then = now - binance.Interval.ONE_DAY.milliseconds
# change = model(now) / model(then)
# return change - 1.0
price_changes = await asyncio.gather(*map(get_price_change, assets))
inputs = [5 * x if x < 0 else x for x in price_changes]
# beta defines the sharpness of the softmax
numerators = tuple(math.exp(beta * x) for x in inputs)
denominator = sum(numerators)
return {asset: numerator / denominator for asset, numerator in zip(assets, numerators)}
async def do_trading(
assets: Sequence[trader.Asset],
minima: Mapping[trader.Asset, trader.Quantity],
value_asset: trader.Asset,
quote_asset: trader.Asset,
period_ms: int,
window: binance.Interval,
beta: float,
threshold: float,
client: binance.BinanceClient,
time: float = 0.0, # timestamp in seconds
time_delta: float = 0.0, # time between this run and the last
) -> None:
target = await target_distribution(
assets=assets,
value_asset=value_asset,
client=client,
period_ms=period_ms,
window=window,
beta=beta,
)
if time % 3600.0 < 1200.0 and (time - time_delta) % 3600.0 > 1200.0:
logger.info("Target distribution:\n{}".format("\n".join(f"{k}:\t{v:.4f}" for k, v in target.items())))
else:
logger.debug("Target distribution:\n{}".format("\n".join(f"{k}:\t{v:.4f}" for k, v in target.items())))
await trader.rebalance(
target=target,
minima=minima,
value_asset=value_asset,
quote_asset=quote_asset,
threshold=threshold,
client=trader_client,
)
parser = argparse.ArgumentParser()
parser.add_argument("--config-file", default="config.toml")
args = parser.parse_args()
with open(args.config_file) as f:
config = toml.load(f)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s\t%(levelname)s\t%(name)s\t%(message)s")
file_handler = logging.handlers.TimedRotatingFileHandler(
config.get("logging", {}).get("file", "binancebot.log"),
when="midnight",
backupCount=7,
)
file_handler.setLevel(getattr(logging, config.get("logging", {}).get("file_level", "debug").upper()))
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setLevel(getattr(logging, config.get("logging", {}).get("console_level", "info").upper()))
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
class ServerHandler(logging.handlers.QueueHandler):
def __init__(self, queue: server.RingBuffer):
logging.handlers.QueueHandler.__init__(self, queue) # type: ignore
def prepare(self, record):
return dict(
time=record.asctime,
level=record.levelname,
name=record.name,
message=record.message,
)
def enqueue(self, record):
self.queue.push(record)
server_buffer: server.RingBuffer[Any]
server_buffer = server.RingBuffer(size=config.get("logging", {}).get("server_num", 100))
server_handler = ServerHandler(server_buffer)
server_handler.setLevel(getattr(logging, config.get("logging", {}).get("server_level", "info").upper()))
server_handler.setFormatter(formatter)
root_logger.addHandler(server_handler)
root_logger.info("(re)started")
http_client = httpx.AsyncClient()
trader_client = binance.BinanceClient(
api_base=config["binance"]["api_base"],
api_key=config["binance"]["api_key"],
secret_key=config["binance"]["secret_key"].encode("ascii"),
http_client=http_client,
)
loop = asyncio.get_event_loop()
debug_server = server.DebugInfoServer(trader_client, server_buffer)
loop.create_task(debug_server.start(config["server"]["host"], config["server"]["port"]))
main = loop.create_task(
periodic(
config["trader"]["update_period_s"],
lambda time, time_delta, step_count: do_trading(
assets=config["trader"]["traded_assets"],
minima={k: trader.Quantity(v) for k, v in config["trader"]["minima"].items()},
value_asset=config["trader"]["value_asset"],
quote_asset=config["trader"]["quote_asset"],
threshold=config["trader"]["threshold"],
period_ms=config["trader"]["history_window_ms"],
window=binance.Interval(config["trader"]["history_resolution"]),
beta=config["trader"]["beta"],
client=trader_client,
time=time,
time_delta=time_delta,
),
),
)
loop.run_until_complete(main)
cleanup = loop.create_task(asyncio.gather(http_client.aclose(), debug_server.stop()))
loop.run_until_complete(cleanup)
| [
"argparse.ArgumentParser",
"binancebot.trader.Quantity",
"logging.Formatter",
"numpy.linalg.solve",
"toml.load",
"traceback.format_exc",
"typing.TypeVar",
"binancebot.server.DebugInfoServer",
"asyncio.get_event_loop",
"asyncio.sleep",
"logging.StreamHandler",
"itertools.count",
"httpx.AsyncC... | [((326, 353), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (343, 353), False, 'import logging\n'), ((360, 372), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (367, 372), False, 'from typing import Any, Awaitable, Callable, Mapping, Optional, Protocol, Sequence, Tuple, Type, TypeVar\n'), ((4789, 4814), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4812, 4814), False, 'import argparse\n'), ((4978, 4997), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4995, 4997), False, 'import logging\n'), ((5047, 5117), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s\t%(levelname)s\t%(name)s\t%(message)s"""'], {}), "('%(asctime)s\\t%(levelname)s\\t%(name)s\\t%(message)s')\n", (5064, 5117), False, 'import logging\n'), ((5475, 5498), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (5496, 5498), False, 'import logging\n'), ((6534, 6553), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {}), '()\n', (6551, 6553), False, 'import httpx\n'), ((6782, 6806), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6804, 6806), False, 'import asyncio\n'), ((6823, 6875), 'binancebot.server.DebugInfoServer', 'server.DebugInfoServer', (['trader_client', 'server_buffer'], {}), '(trader_client, server_buffer)\n', (6845, 6875), False, 'from binancebot import binance, trader, server\n'), ((766, 792), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (790, 792), False, 'import asyncio\n'), ((831, 849), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (846, 849), False, 'import itertools\n'), ((4950, 4962), 'toml.load', 'toml.load', (['f'], {}), '(f)\n', (4959, 4962), False, 'import toml\n'), ((4583, 4726), 'binancebot.trader.rebalance', 'trader.rebalance', ([], {'target': 'target', 'minima': 'minima', 'value_asset': 'value_asset', 'quote_asset': 'quote_asset', 'threshold': 'threshold', 'client': 'trader_client'}), '(target=target, minima=minima, value_asset=value_asset,\n quote_asset=quote_asset, threshold=threshold, client=trader_client)\n', (4599, 4726), False, 'from binancebot import binance, trader, server\n'), ((5798, 5849), 'logging.handlers.QueueHandler.__init__', 'logging.handlers.QueueHandler.__init__', (['self', 'queue'], {}), '(self, queue)\n', (5836, 5849), False, 'import logging\n'), ((1381, 1411), 'numpy.array', 'np.array', (['ys'], {'dtype': 'np.float64'}), '(ys, dtype=np.float64)\n', (1389, 1411), True, 'import numpy as np\n'), ((1509, 1542), 'numpy.linalg.solve', 'np.linalg.solve', (['(X.T @ X)', '(X.T @ Y)'], {}), '(X.T @ X, X.T @ Y)\n', (1524, 1542), True, 'import numpy as np\n'), ((3484, 3502), 'math.exp', 'math.exp', (['(beta * x)'], {}), '(beta * x)\n', (3492, 3502), False, 'import math\n'), ((540, 562), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (560, 562), False, 'import traceback\n'), ((1106, 1126), 'asyncio.sleep', 'asyncio.sleep', (['delta'], {}), '(delta)\n', (1119, 1126), False, 'import asyncio\n'), ((7499, 7555), 'binancebot.binance.Interval', 'binance.Interval', (["config['trader']['history_resolution']"], {}), "(config['trader']['history_resolution'])\n", (7515, 7555), False, 'from binancebot import binance, trader, server\n'), ((7184, 7202), 'binancebot.trader.Quantity', 'trader.Quantity', (['v'], {}), '(v)\n', (7199, 7202), False, 'from binancebot import binance, trader, server\n')] |
import tensorflow as tf
import numpy as np
import math
import matplotlib.pyplot as PLT
import tflowtools as TFT
import random
random.seed(123)
np.random.seed(123)
tf.set_random_seed(123)
# *********** CASE MANAGER ********
# This is a simple class for organizing the cases (training, validation and test) for a
# a machine-learning system
class Caseman():
def __init__(self,cfunc,vfrac=0,tfrac=0,cfrac=1.0):
random.seed(123)
np.random.seed(123)
tf.set_random_seed(123)
self.casefunc = cfunc
self.validation_fraction = vfrac
self.test_fraction = tfrac
self.cfrac = cfrac
self.training_fraction = 1 - (vfrac + tfrac)
self.generate_cases()
self.organize_cases()
def generate_cases(self):
self.cases = self.casefunc() # Run the case generator. Case = [input-vector, target-vector]
def organize_cases(self):
ca = np.array(self.cases)
new_len = len(ca) * self.cfrac
ca = ca[0:int(new_len)]
np.random.shuffle(ca) # Randomly shuffle all cases
separator1 = round(len(self.cases) * self.training_fraction)
separator2 = separator1 + round(len(self.cases)*self.validation_fraction)
self.training_cases = ca[0:separator1]
self.validation_cases = ca[separator1:separator2] if self.validation_fraction>0 else ca
self.testing_cases = ca[separator2:] if self.test_fraction>0 else ca
def get_training_cases(self):
np.random.shuffle(self.training_cases)
return self.training_cases
def get_validation_cases(self): return self.validation_cases
def get_testing_cases(self): return self.testing_cases | [
"numpy.random.seed",
"tensorflow.set_random_seed",
"numpy.array",
"random.seed",
"numpy.random.shuffle"
] | [((127, 143), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (138, 143), False, 'import random\n'), ((144, 163), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (158, 163), True, 'import numpy as np\n'), ((164, 187), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(123)'], {}), '(123)\n', (182, 187), True, 'import tensorflow as tf\n'), ((424, 440), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (435, 440), False, 'import random\n'), ((449, 468), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (463, 468), True, 'import numpy as np\n'), ((477, 500), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(123)'], {}), '(123)\n', (495, 500), True, 'import tensorflow as tf\n'), ((924, 944), 'numpy.array', 'np.array', (['self.cases'], {}), '(self.cases)\n', (932, 944), True, 'import numpy as np\n'), ((1024, 1045), 'numpy.random.shuffle', 'np.random.shuffle', (['ca'], {}), '(ca)\n', (1041, 1045), True, 'import numpy as np\n'), ((1489, 1527), 'numpy.random.shuffle', 'np.random.shuffle', (['self.training_cases'], {}), '(self.training_cases)\n', (1506, 1527), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import time
from tensorflow.keras.callbacks import TensorBoard
from keras.utils import to_categorical
#DATA COLLECTION AND CATEGORIES DEFINITION
Datadir = "data"
Categories = ["Benign130" , "Non-cancer130" , "Malignant130"]
#print(img_array)
#print(img_array.shape)
#DATA PRE-PROCESSING
img_size = 150
training_data = []
def create_training_data():
for category in Categories:
path = os.path.join(Datadir, category)
class_num = Categories.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (img_size, img_size))
training_data.append([new_array,class_num])
except Exception as e:
pass
create_training_data()
import random
random.shuffle(training_data)
X = []
Y = []
for features,label in training_data:
X.append(features)
Y.append(label)
X = np.array(X).reshape(-1,img_size,img_size,1)
Y = np.array(Y)
Y= to_categorical(Y)
#MODELS BUILDING
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten,Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
import time
X = X/255.0
batch_size = [32, 64, 128]
epochs = [10, 15, 20, 50]
dense_layers = [0,1,2]
layer_sizes = [32,64,128]
conv_layers = [1,2,3]
#------------------------------------------------------------------------------------------------------------
for dense_layer in dense_layers: #LOOK FOR THE BEST COMBINATION
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME4 = "{}-conv-{}-nodes-{}dense-{}".format(conv_layer,layer_size,dense_layer,int(time.time( )))
tensorboard = TensorBoard(log_dir="logsmulti/{}".format(NAME4))
print(NAME4)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=(img_size, img_size, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer-1):
model.add(Conv2D(layer_size, (3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
for l in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(3))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer = "adam", metrics= ["accuracy"])
model.fit(X, Y, batch_size=32, epochs=20, validation_split=0.2, callbacks= [tensorboard])
#------------------------------------------------------------------------------------------------------------------------
#FIT THE MODEL WITH THE BEST VALUES OBTAINED
dense_layers = [1]
layer_sizes = [64]
conv_layers = [2]
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME4 = "{}-conv-{}-nodes-{}dense-{}".format(conv_layer,layer_size,dense_layer,int(time.time( )))
tensorboard = TensorBoard(log_dir="logsmultib/{}".format(NAME4))
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=(img_size, img_size, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer-1): #we had one before
model.add(Conv2D(layer_size, (3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
for l in range(dense_layer): #First dense must have a flatten layer
model.add(Dense(layer_size))
model.add(Activation("relu"))
model.add(Dropout(0.2)) #helps overfitting
model.add(Flatten())#aiuta il non overfitting
model.add(Dense(3))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer = "adam", metrics= ["accuracy"])
for batch in batch_size:
for epoch in epochs:
#model.fit(X, Y, batch_size=batch, epochs=epoch, validation_split=0.2) #Hypertuning of epochs and batch_size
model.fit(X, Y, batch_size=32, epochs=15, validation_split=0.2, callbacks = [tensorboard])
model.save("Cancer_prediction")
#82-86% accuracy con batch 32 e 15 epoche , come previsto
# val_loss 0.73
model2 = tf.keras.models.load_model("Cancer_prediction")
print(model2.summary())
# 2 convolutional layers , with 64 filters 3x3
| [
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"random.shuffle",
"tensorflow.keras.layers.Dropout",
"time.time",
"tensorflow.keras.layers.Activation",
"numpy.array",... | [((919, 948), 'random.shuffle', 'random.shuffle', (['training_data'], {}), '(training_data)\n', (933, 948), False, 'import random\n'), ((1098, 1109), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1106, 1109), True, 'import numpy as np\n'), ((1114, 1131), 'keras.utils.to_categorical', 'to_categorical', (['Y'], {}), '(Y)\n', (1128, 1131), False, 'from keras.utils import to_categorical\n'), ((4713, 4760), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""Cancer_prediction"""'], {}), "('Cancer_prediction')\n", (4739, 4760), True, 'import tensorflow as tf\n'), ((474, 505), 'os.path.join', 'os.path.join', (['Datadir', 'category'], {}), '(Datadir, category)\n', (486, 505), False, 'import os\n'), ((572, 588), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (582, 588), False, 'import os\n'), ((1050, 1061), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1058, 1061), True, 'import numpy as np\n'), ((2005, 2017), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2015, 2017), False, 'from tensorflow.keras.models import Sequential\n'), ((3410, 3422), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3420, 3422), False, 'from tensorflow.keras.models import Sequential\n'), ((721, 764), 'cv2.resize', 'cv2.resize', (['img_array', '(img_size, img_size)'], {}), '(img_array, (img_size, img_size))\n', (731, 764), False, 'import cv2\n'), ((2040, 2127), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['layer_size', '(3, 3)'], {'input_shape': '(img_size, img_size, 1)', 'activation': '"""relu"""'}), "(layer_size, (3, 3), input_shape=(img_size, img_size, 1), activation=\n 'relu')\n", (2046, 2127), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2146, 2176), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2158, 2176), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2573, 2582), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2580, 2582), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2608, 2616), 'tensorflow.keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (2613, 2616), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2640, 2661), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2650, 2661), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((3445, 3532), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['layer_size', '(3, 3)'], {'input_shape': '(img_size, img_size, 1)', 'activation': '"""relu"""'}), "(layer_size, (3, 3), input_shape=(img_size, img_size, 1), activation=\n 'relu')\n", (3451, 3532), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((3551, 3581), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3563, 3581), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((4059, 4068), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4066, 4068), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((4119, 4127), 'tensorflow.keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (4124, 4127), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((4151, 4172), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (4161, 4172), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((646, 669), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (658, 669), False, 'import os\n'), ((1868, 1879), 'time.time', 'time.time', ([], {}), '()\n', (1877, 1879), False, 'import time\n'), ((2248, 2274), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['layer_size', '(3, 3)'], {}), '(layer_size, (3, 3))\n', (2254, 2274), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2301, 2319), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2311, 2319), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2347, 2377), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2359, 2377), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2446, 2463), 'tensorflow.keras.layers.Dense', 'Dense', (['layer_size'], {}), '(layer_size)\n', (2451, 2463), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2491, 2509), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2501, 2509), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((2537, 2549), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2544, 2549), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((3296, 3307), 'time.time', 'time.time', ([], {}), '()\n', (3305, 3307), False, 'import time\n'), ((3672, 3698), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['layer_size', '(3, 3)'], {}), '(layer_size, (3, 3))\n', (3678, 3698), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((3725, 3743), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3735, 3743), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((3771, 3801), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3783, 3801), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((3910, 3927), 'tensorflow.keras.layers.Dense', 'Dense', (['layer_size'], {}), '(layer_size)\n', (3915, 3927), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((3955, 3973), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3965, 3973), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((4001, 4013), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4008, 4013), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n')] |
# (c) 2019-2020, <NAME> @ ETH Zurich
# Computer-assisted Applications in Medicine (CAiM) Group, Prof. <NAME>
# Based on https://github.com/higgsfield/RL-Adventure-2
from network import ActorCritic
import torch
import numpy as np
from multiprocessing_env import SubprocVecEnv
from tensorboardX import SummaryWriter
from datetime import datetime
import os
import pickle
import torch.nn as nn
import random
import time
import pdb
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s', level=logging.INFO, stream=sys.stdout)
logger.setLevel(logging.INFO)
class PPO(object):
"""Main PPO class"""
def __init__(self, args):
""""Constructor which allows the PPO class to initialize the attributes of the class"""
self.args = args
self.random_seed()
# Check if GPU is available via CUDA driver
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
# Initialize the actor critic class
self.actor_critic = ActorCritic(self.args.nb_states,
self.args.nb_actions,
self.args.hidden_layer_size).to(self.device)
# Define the optimizer used for the optimization of the surrogate loss
self.optimizer = self.args.optimizer(self.actor_critic.parameters(), self.args.lr)
# For training multiple instances of the env are needed (Shoulder model)
self.envs = [self.make_env() for i in range(self.args.num_envs)]
self.envs = SubprocVecEnv(self.envs)
# To validate the intermediate learning process one test env is needed
self.env_test = self.args.env
self.env_test.seed(self.args.seed)
self.env_test.set_scaling(self.args.output_scaling)
# Lists for Tensorboard to visualize learning process during learning
self.test_rewards = []
self.loss = []
self.lr = []
self.actor_grad_weight = []
self.action_bang_bang = []
self.lr.append(self.args.lr)
# Dump bin files
if self.args.play is False:
self.output_path = "trained_models" + '/PPO_{}'.format(datetime.now().strftime('%Y%b%d_%H%M%S')) + "/"
os.mkdir(self.output_path)
self.writer = SummaryWriter(self.output_path)
#self.delta = (self.args.lr-self.args.lr_end)/1e6
def train(self):
"""Main training function"""
frame_idx = 0
state = self.envs.reset()
mean_100_reward = -np.inf
self.info()
while frame_idx < self.args.max_frames:
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
entropy = self.args.entropy
for _ in range(self.args.nb_steps):
state = torch.FloatTensor(state).to(self.device)
dist, value = self.actor_critic(state)
action = dist.sample()
# Make sure action is loaded to CPU (not GPU)
next_state, reward, done, _ = self.envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(self.device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(self.device))
states.append(state)
actions.append(action)
state = next_state
frame_idx += 1
#self.scheduler()
# Evaluate training process and write data to tensorboard
if frame_idx % 1000 == 0:
test_reward = np.mean([self.test_env(self.args.vis) for _ in range(10)])
self.test_rewards.append(test_reward)
if self.args.play is False:
print("Mean reward: ", np.round(np.mean(self.test_rewards[-101:-1]), 0))
if mean_100_reward < np.round(np.mean(self.test_rewards[-101:-1]), 0):
mean_100_reward = np.round(np.mean(self.test_rewards[-101:-1]), 0)
self.save_network(mean_100_reward)
if len(self.test_rewards) >= 10:
self.writer.add_scalar('data/reward', np.mean(self.test_rewards[-11:-1]),
frame_idx*self.args.num_envs)
self.writer.add_scalar('data/ppo_loss', np.mean(self.loss[-11:-1]),
frame_idx*self.args.num_envs)
self.writer.add_scalar('data/nb_actions_outside_range', np.mean(self.action_bang_bang[-11:-1]),
frame_idx*self.args.num_envs)
# if test_reward > threshold_reward: early_stop = True
next_state = torch.FloatTensor(next_state).to(self.device)
_, next_value = self.actor_critic(next_state)
returns = self.calc_gae(next_value, rewards, masks, values, self.args.gamma, self.args.tau)
# detach() to take it away from the graph i.e. this operations are ignored for gradient calculations
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
self.ppo_update(self.args.ppo_epochs, self.args.mini_batch_size, states, actions, log_probs, returns,
advantage, self.args.clip)
def make_env(self):
# Private trunk function for calling the SubprocVecEnv class
def _trunk():
env = self.args.env # in this simple case the class TestEnv() is called (see openAI for more envs)
env.seed(self.args.seed)
env.set_scaling(self.args.output_scaling)
return env
return _trunk
def test_env(self, vis=False):
state = self.env_test.reset()
if vis:
self.env_test.render()
done = False
total_reward = 0
action_bang_bang = 0
step = 0
while not done:
step+=1
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
dist, _ = self.actor_critic(state)
action = dist.sample().cpu().numpy()[0]
force = action * self.args.output_scaling
next_state, reward, done, _ = self.env_test.step(action)
if force > 0.5 or force < -0.5:
action_bang_bang += 1
state = next_state
if vis:
self.env_test.render()
total_reward += reward
self.action_bang_bang.append(action_bang_bang/step)
return total_reward
# Plain functions except that one can call them from an instance or the class
@staticmethod
def calc_gae(next_value, rewards, masks, values, gamma=0.99, tau=0.95):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
gae = delta + gamma * tau * masks[step] * gae
returns.insert(0, gae + values[step])
return returns
@staticmethod
def ppo_iter(mini_batch_size, states, actions, log_probs, returns, advantage):
batch_size = states.size(0)
for _ in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield states[rand_ids, :], actions[rand_ids, :], log_probs[rand_ids, :], returns[rand_ids, :], advantage[
rand_ids, :]
def ppo_update(self, ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantages, clip_param=0.2):
for _ in range(ppo_epochs):
for state, action, old_log_probs, return_, advantage in self.ppo_iter(mini_batch_size,
states,
actions,
log_probs,
returns,
advantages):
dist, value = self.actor_critic(state)
entropy = dist.entropy().mean()
new_log_probs = dist.log_prob(action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * advantage
actor_loss = - torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss = 0.5 * critic_loss + actor_loss - 0.001 * entropy
self.loss.append(loss.item())
# Important step:
self.optimizer.zero_grad()
#pdb.set_trace()
loss.backward()
if self.args.grad_norm is not None:
nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.args.grad_norm)
self.optimizer.step()
def save_network(self, reward):
network_path = self.output_path + "/network" + str(reward)
pickle.dump(self.actor_critic.state_dict(), open(network_path, "wb"))
def load_network(self, path):
network_new = pickle.load(open(path, "rb"))
self.actor_critic.load_state_dict(network_new)
def random_seed(self):
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
np.random.seed(self.args.seed)
def scheduler(self):
for g in self.optimizer.param_groups:
lr = g["lr"]
if self.args.lr_end > lr:
lr = self.args.lr_end
else:
lr -= self.delta
self.lr.append(lr)
g["lr"] = lr
def info(self):
fhandler = logging.FileHandler(filename=self.output_path + '/mylog.log', mode='a')
logger.addHandler(fhandler)
logger.info("--- INFO ---")
logger.info("args: {}".format(self.args))
| [
"os.mkdir",
"numpy.random.seed",
"torch.cat",
"numpy.random.randint",
"numpy.mean",
"torch.device",
"network.ActorCritic",
"multiprocessing_env.SubprocVecEnv",
"logging.FileHandler",
"torch.FloatTensor",
"random.seed",
"datetime.datetime.now",
"torch.manual_seed",
"torch.clamp",
"torch.c... | [((468, 495), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (485, 495), False, 'import logging\n'), ((496, 610), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s | %(levelname)s : %(message)s"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format='%(asctime)s | %(levelname)s : %(message)s',\n level=logging.INFO, stream=sys.stdout)\n", (515, 610), False, 'import logging\n'), ((937, 962), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (960, 962), False, 'import torch\n'), ((985, 1033), 'torch.device', 'torch.device', (["('cuda' if self.use_cuda else 'cpu')"], {}), "('cuda' if self.use_cuda else 'cpu')\n", (997, 1033), False, 'import torch\n'), ((1631, 1655), 'multiprocessing_env.SubprocVecEnv', 'SubprocVecEnv', (['self.envs'], {}), '(self.envs)\n', (1644, 1655), False, 'from multiprocessing_env import SubprocVecEnv\n'), ((10165, 10198), 'torch.manual_seed', 'torch.manual_seed', (['self.args.seed'], {}), '(self.args.seed)\n', (10182, 10198), False, 'import torch\n'), ((10207, 10234), 'random.seed', 'random.seed', (['self.args.seed'], {}), '(self.args.seed)\n', (10218, 10234), False, 'import random\n'), ((10243, 10273), 'numpy.random.seed', 'np.random.seed', (['self.args.seed'], {}), '(self.args.seed)\n', (10257, 10273), True, 'import numpy as np\n'), ((10606, 10677), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': "(self.output_path + '/mylog.log')", 'mode': '"""a"""'}), "(filename=self.output_path + '/mylog.log', mode='a')\n", (10625, 10677), False, 'import logging\n'), ((2329, 2355), 'os.mkdir', 'os.mkdir', (['self.output_path'], {}), '(self.output_path)\n', (2337, 2355), False, 'import os\n'), ((2382, 2413), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['self.output_path'], {}), '(self.output_path)\n', (2395, 2413), False, 'from tensorboardX import SummaryWriter\n'), ((5649, 5666), 'torch.cat', 'torch.cat', (['states'], {}), '(states)\n', (5658, 5666), False, 'import torch\n'), ((5689, 5707), 'torch.cat', 'torch.cat', (['actions'], {}), '(actions)\n', (5698, 5707), False, 'import torch\n'), ((7873, 7922), 'numpy.random.randint', 'np.random.randint', (['(0)', 'batch_size', 'mini_batch_size'], {}), '(0, batch_size, mini_batch_size)\n', (7890, 7922), True, 'import numpy as np\n'), ((1106, 1194), 'network.ActorCritic', 'ActorCritic', (['self.args.nb_states', 'self.args.nb_actions', 'self.args.hidden_layer_size'], {}), '(self.args.nb_states, self.args.nb_actions, self.args.\n hidden_layer_size)\n', (1117, 1194), False, 'from network import ActorCritic\n'), ((5154, 5183), 'torch.FloatTensor', 'torch.FloatTensor', (['next_state'], {}), '(next_state)\n', (5171, 5183), False, 'import torch\n'), ((5498, 5516), 'torch.cat', 'torch.cat', (['returns'], {}), '(returns)\n', (5507, 5516), False, 'import torch\n'), ((5550, 5570), 'torch.cat', 'torch.cat', (['log_probs'], {}), '(log_probs)\n', (5559, 5570), False, 'import torch\n'), ((5601, 5618), 'torch.cat', 'torch.cat', (['values'], {}), '(values)\n', (5610, 5618), False, 'import torch\n'), ((9163, 9217), 'torch.clamp', 'torch.clamp', (['ratio', '(1.0 - clip_param)', '(1.0 + clip_param)'], {}), '(ratio, 1.0 - clip_param, 1.0 + clip_param)\n', (9174, 9217), False, 'import torch\n'), ((2952, 2976), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (2969, 2976), False, 'import torch\n'), ((6562, 6586), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (6579, 6586), False, 'import torch\n'), ((9263, 9286), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (9272, 9286), False, 'import torch\n'), ((2269, 2283), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2281, 2283), False, 'from datetime import datetime\n'), ((4136, 4171), 'numpy.mean', 'np.mean', (['self.test_rewards[-101:-1]'], {}), '(self.test_rewards[-101:-1])\n', (4143, 4171), True, 'import numpy as np\n'), ((4231, 4266), 'numpy.mean', 'np.mean', (['self.test_rewards[-101:-1]'], {}), '(self.test_rewards[-101:-1])\n', (4238, 4266), True, 'import numpy as np\n'), ((4327, 4362), 'numpy.mean', 'np.mean', (['self.test_rewards[-101:-1]'], {}), '(self.test_rewards[-101:-1])\n', (4334, 4362), True, 'import numpy as np\n'), ((4553, 4587), 'numpy.mean', 'np.mean', (['self.test_rewards[-11:-1]'], {}), '(self.test_rewards[-11:-1])\n', (4560, 4587), True, 'import numpy as np\n'), ((4738, 4764), 'numpy.mean', 'np.mean', (['self.loss[-11:-1]'], {}), '(self.loss[-11:-1])\n', (4745, 4764), True, 'import numpy as np\n'), ((4931, 4969), 'numpy.mean', 'np.mean', (['self.action_bang_bang[-11:-1]'], {}), '(self.action_bang_bang[-11:-1])\n', (4938, 4969), True, 'import numpy as np\n'), ((3443, 3468), 'torch.FloatTensor', 'torch.FloatTensor', (['reward'], {}), '(reward)\n', (3460, 3468), False, 'import torch\n'), ((3528, 3555), 'torch.FloatTensor', 'torch.FloatTensor', (['(1 - done)'], {}), '(1 - done)\n', (3545, 3555), False, 'import torch\n')] |
# Common input & data reading routines for the electron ID
#
# <NAME>, 2020
# <EMAIL>
import copy
import math
import argparse
import pprint
import psutil
import os
import datetime
import json
import pickle
import sys
import yaml
import numpy as np
import torch
import uproot
from concurrent.futures import ThreadPoolExecutor
from termcolor import colored, cprint
from tqdm import tqdm
from icenet.tools import io
from icenet.tools import aux
from icenet.tools import plots
from icenet.tools import prints
from configs.eid.mctargets import *
from configs.eid.mcfilter import *
from configs.eid.mvavars import *
from configs.eid.cuts import *
def read_config():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type = str, default='tune0')
parser.add_argument("--datapath", type = str, default=".")
parser.add_argument("--datasets", type = str, default="0")
cli = parser.parse_args()
# Input is [0,1,2,..]
cli.datasets = cli.datasets.split(',')
## Read configuration
args = {}
config_yaml_file = cli.config + '.yml'
with open('./configs/eid/' + config_yaml_file, 'r') as stream:
try:
args = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
args['config'] = cli.config
print(args)
print('')
print('torch.__version__: ' + torch.__version__)
features = globals()[args['imputation_param']['var']]
return args, cli, features
def init(MAXEVENTS=None):
""" Initialize electron ID data.
Args:
Implicit commandline and yaml file input.
Returns:
jagged array data, arguments
"""
args, cli, features = read_config()
# --------------------------------------------------------------------
### SET GLOBALS (used only in this file)
global ARGS
ARGS = args
if MAXEVENTS is not None:
ARGS['MAXEVENTS'] = MAXEVENTS
print(__name__ + f'.init: inputvar = {args["inputvar"]}')
print(__name__ + f'.init: cutfunc = {args["cutfunc"]}')
print(__name__ + f'.init: targetfunc = {args["targetfunc"]}')
# --------------------------------------------------------------------
### Load data
paths = []
for i in cli.datasets:
paths.append(cli.datapath + '/output_' + str(i) + '.root')
# Background (0) and signal (1)
class_id = [0,1]
data = io.DATASET(func_loader=load_root_file_new, files=paths, class_id=class_id, frac=args['frac'], rngseed=args['rngseed'])
# @@ Imputation @@
if args['imputation_param']['active']:
special_values = args['imputation_param']['values'] # possible special values
print(__name__ + f': Imputing data for special values {special_values} for variables in <{args["imputation_param"]["var"]}>')
# Choose active dimensions
dim = np.array([i for i in range(len(data.VARS)) if data.VARS[i] in features], dtype=int)
# Parameters
param = {
"dim": dim,
"values": special_values,
"labels": data.VARS,
"algorithm": args['imputation_param']['algorithm'],
"fill_value": args['imputation_param']['fill_value'],
"knn_k": args['imputation_param']['knn_k']
}
# NOTE, UPDATE NEEDED: one should save here 'imputer_trn' to a disk -> can be used with data
data.trn.x, imputer_trn = io.impute_data(X=data.trn.x, imputer=None, **param)
data.tst.x, _ = io.impute_data(X=data.tst.x, imputer=imputer_trn, **param)
data.val.x, _ = io.impute_data(X=data.val.x, imputer=imputer_trn, **param)
else:
# No imputation, but fix spurious NaN / Inf
data.trn.x[np.logical_not(np.isfinite(data.trn.x))] = 0
data.val.x[np.logical_not(np.isfinite(data.val.x))] = 0
data.tst.x[np.logical_not(np.isfinite(data.tst.x))] = 0
cprint(__name__ + f""".common: Process RAM usage: {io.process_memory_use():0.2f} GB
[total RAM in use: {psutil.virtual_memory()[2]} %]""", 'red')
return data, args, features
def compute_reweights(data, args):
""" Compute (eta,pt) reweighting coefficients.
Args:
data : training data object
args : arguments object
Returns:
weights : array of re-weights
"""
# Re-weighting variables
PT = data.trn.x[:,data.VARS.index('trk_pt')]
ETA = data.trn.x[:,data.VARS.index('trk_eta')]
pt_binedges = np.linspace(
args['reweight_param']['bins_pt'][0],
args['reweight_param']['bins_pt'][1],
args['reweight_param']['bins_pt'][2])
eta_binedges = np.linspace(
args['reweight_param']['bins_eta'][0],
args['reweight_param']['bins_eta'][1],
args['reweight_param']['bins_eta'][2])
print(__name__ + f".compute_reweights: reference_class: <{args['reweight_param']['reference_class']}>")
### Compute 2D-pdfs for each class
N_class = 2
pdf = {}
for c in range(N_class):
pdf[c] = aux.pdf_2D_hist(X_A=PT[data.trn.y==c], X_B=ETA[data.trn.y==c], binedges_A=pt_binedges, binedges_B=eta_binedges)
pdf['binedges_A'] = pt_binedges
pdf['binedges_B'] = eta_binedges
# Compute event-by-event weights
if args['reweight_param']['reference_class'] != -1:
trn_weights = aux.reweightcoeff2D(
X_A = PT, X_B = ETA, pdf = pdf, y = data.trn.y, N_class=N_class,
equal_frac = args['reweight_param']['equal_frac'],
reference_class = args['reweight_param']['reference_class'],
max_reg = args['reweight_param']['max_reg'])
else:
# No re-weighting
weights_doublet = np.zeros((data.trn.x.shape[0], N_class))
for c in range(N_class):
weights_doublet[data.trn.y == c, c] = 1
trn_weights = np.sum(weights_doublet, axis=1)
# Compute the sum of weights per class for the output print
frac = np.zeros(N_class)
sums = np.zeros(N_class)
for c in range(N_class):
frac[c] = np.sum(data.trn.y == c)
sums[c] = np.sum(trn_weights[data.trn.y == c])
print(__name__ + f'.compute_reweights: sum(trn.y==c): {frac}')
print(__name__ + f'.compute_reweights: sum(trn_weights[trn.y==c]): {sums}')
print(__name__ + f'.compute_reweights: [done]\n')
return trn_weights
def splitfactor(data, args):
"""
Split electron ID data into different datatypes.
Args:
data: jagged arrays
args: arguments dictionary
Returns:
scalar (vector) data
tensor data (images)
kinematic data
"""
### Pick kinematic variables out
k_ind, k_vars = io.pick_vars(data, KINEMATIC_ID)
data_kin = copy.deepcopy(data)
data_kin.trn.x = data.trn.x[:, k_ind].astype(np.float)
data_kin.val.x = data.val.x[:, k_ind].astype(np.float)
data_kin.tst.x = data.tst.x[:, k_ind].astype(np.float)
data_kin.VARS = k_vars
### Pick active jagged array / "image" variables out
j_ind, j_vars = io.pick_vars(data, globals()['CMSSW_MVA_ID_IMAGE'])
data_image = copy.deepcopy(data)
data_image.trn.x = data.trn.x[:, j_ind]
data_image.val.x = data.val.x[:, j_ind]
data_image.tst.x = data.tst.x[:, j_ind]
data_image.VARS = j_vars
# Use single channel tensors
if args['image_param']['channels'] == 1:
xyz = [['image_clu_eta', 'image_clu_phi', 'image_clu_e']]
# Use multichannel tensors
elif args['image_param']['channels'] == 2:
xyz = [['image_clu_eta', 'image_clu_phi', 'image_clu_e'],
['image_pf_eta', 'image_pf_phi', 'image_pf_p']]
else:
raise Except(__name__ + f'.splitfactor: Unknown [image_param][channels] parameter')
eta_binedges = args['image_param']['eta_bins']
phi_binedges = args['image_param']['phi_bins']
# Pick tensor data out
cprint(__name__ + f'.splitfactor: jagged2tensor processing ...', 'yellow')
data_tensor = {}
data_tensor['trn'] = aux.jagged2tensor(X=data_image.trn.x, VARS=j_vars, xyz=xyz, x_binedges=eta_binedges, y_binedges=phi_binedges)
data_tensor['val'] = aux.jagged2tensor(X=data_image.val.x, VARS=j_vars, xyz=xyz, x_binedges=eta_binedges, y_binedges=phi_binedges)
data_tensor['tst'] = aux.jagged2tensor(X=data_image.tst.x, VARS=j_vars, xyz=xyz, x_binedges=eta_binedges, y_binedges=phi_binedges)
### Pick active scalar variables out
s_ind, s_vars = io.pick_vars(data, globals()[args['inputvar']])
data.trn.x = data.trn.x[:, s_ind].astype(np.float)
data.val.x = data.val.x[:, s_ind].astype(np.float)
data.tst.x = data.tst.x[:, s_ind].astype(np.float)
data.VARS = s_vars
return data, data_tensor, data_kin
def load_root_file_new(root_path, VARS=None, entrystart=0, entrystop=None, class_id = [], args=None):
""" Loads the root file.
Args:
root_path : paths to root files
class_id : class ids
Returns:
X,Y : input, output matrices
VARS : variable names
"""
# -----------------------------------------------
# ** GLOBALS **
if args is None:
args = ARGS
CUTFUNC = globals()[args['cutfunc']]
TARFUNC = globals()[args['targetfunc']]
FILTERFUNC = globals()[args['filterfunc']]
if entrystop is None:
entrystop = args['MAXEVENTS']
# -----------------------------------------------
def showmem():
cprint(__name__ + f""".load_root_file: Process RAM usage: {io.process_memory_use():0.2f} GB
[total RAM in use {psutil.virtual_memory()[2]} %]""", 'red')
### From root trees
print('\n')
cprint( __name__ + f'.load_root_file: Loading with uproot from file ' + root_path, 'yellow')
cprint( __name__ + f'.load_root_file: entrystart = {entrystart}, entrystop = {entrystop}')
file = uproot.open(root_path)
events = file["ntuplizer"]["tree"]
print(events.name)
print(events.title)
cprint(__name__ + f'.load_root_file: events.numentries = {events.numentries}', 'green')
### All variables
if VARS is None:
VARS = [x.decode() for x in events.keys()]
#VARS_scalar = [x.decode() for x in events.keys() if b'image_' not in x]
# Turn into dictionaries
executor = ThreadPoolExecutor(4)
# Check is it MC (based on the first event)
X_test = events.arrays('is_mc', outputtype=list, executor=executor, namedecode = "utf-8", entrystart=entrystart, entrystop=entrystop)
isMC = bool(X_test[0][0])
N = len(X_test)
print(__name__ + f'.load_root_file: isMC: {isMC}')
# Now read the data
print(__name__ + '.load_root_file: Loading root file ...')
X = np.array(events.arrays(VARS, outputtype=list, executor=executor, namedecode = "utf-8", entrystart=entrystart, entrystop=entrystop))
X = X.T
Y = None
print(f'X.shape = {X.shape}')
showmem()
prints.printbar()
# =================================================================
# *** MC ONLY ***
if isMC:
# @@ MC target definition here @@
cprint(__name__ + f'.load_root_file: Computing MC <targetfunc> ...', 'yellow')
Y = TARFUNC(events, entrystart=entrystart, entrystop=entrystop)
# For info
labels1 = ['is_e', 'is_egamma']
aux.count_targets(events=events, names=labels1, entrystart=entrystart, entrystop=entrystop)
prints.printbar()
# @@ MC filtering done here @@
cprint(__name__ + f'.load_root_file: Computing MC <filterfunc> ...', 'yellow')
indmc = FILTERFUNC(X=X, VARS=VARS, xcorr_flow=args['xcorr_flow'])
cprint(__name__ + f'.load_root_file: Prior MC <filterfunc>: {len(X)} events', 'green')
cprint(__name__ + f'.load_root_file: After MC <filterfunc>: {sum(indmc)} events ', 'green')
prints.printbar()
Y = Y[indmc]
X = X[indmc]
# =================================================================
# -----------------------------------------------------------------
# @@ Observable cut selections done here @@
cprint(colored(__name__ + f'.load_root_file: Computing <cutfunc> ...'), 'yellow')
cind = CUTFUNC(X=X, VARS=VARS, xcorr_flow=args['xcorr_flow'])
# -----------------------------------------------------------------
N_before = X.shape[0]
### Select events
X = X[cind]
if isMC: Y = Y[cind]
N_after = X.shape[0]
cprint(__name__ + f".load_root_file: Prior <cutfunc> selections: {N_before} events ", 'green')
cprint(__name__ + f".load_root_file: Post <cutfunc> selections: {N_after} events ({N_after / N_before:.3f})", 'green')
print('')
showmem()
prints.printbar()
return X, Y, VARS
| [
"psutil.virtual_memory",
"numpy.sum",
"argparse.ArgumentParser",
"icenet.tools.aux.jagged2tensor",
"icenet.tools.io.impute_data",
"yaml.safe_load",
"icenet.tools.io.process_memory_use",
"icenet.tools.aux.pdf_2D_hist",
"icenet.tools.io.pick_vars",
"numpy.isfinite",
"numpy.linspace",
"uproot.ope... | [((683, 708), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (706, 708), False, 'import argparse\n'), ((2414, 2536), 'icenet.tools.io.DATASET', 'io.DATASET', ([], {'func_loader': 'load_root_file_new', 'files': 'paths', 'class_id': 'class_id', 'frac': "args['frac']", 'rngseed': "args['rngseed']"}), "(func_loader=load_root_file_new, files=paths, class_id=class_id,\n frac=args['frac'], rngseed=args['rngseed'])\n", (2424, 2536), False, 'from icenet.tools import io\n'), ((4559, 4689), 'numpy.linspace', 'np.linspace', (["args['reweight_param']['bins_pt'][0]", "args['reweight_param']['bins_pt'][1]", "args['reweight_param']['bins_pt'][2]"], {}), "(args['reweight_param']['bins_pt'][0], args['reweight_param'][\n 'bins_pt'][1], args['reweight_param']['bins_pt'][2])\n", (4570, 4689), True, 'import numpy as np\n'), ((4780, 4913), 'numpy.linspace', 'np.linspace', (["args['reweight_param']['bins_eta'][0]", "args['reweight_param']['bins_eta'][1]", "args['reweight_param']['bins_eta'][2]"], {}), "(args['reweight_param']['bins_eta'][0], args['reweight_param'][\n 'bins_eta'][1], args['reweight_param']['bins_eta'][2])\n", (4791, 4913), True, 'import numpy as np\n'), ((6153, 6170), 'numpy.zeros', 'np.zeros', (['N_class'], {}), '(N_class)\n', (6161, 6170), True, 'import numpy as np\n'), ((6182, 6199), 'numpy.zeros', 'np.zeros', (['N_class'], {}), '(N_class)\n', (6190, 6199), True, 'import numpy as np\n'), ((6900, 6932), 'icenet.tools.io.pick_vars', 'io.pick_vars', (['data', 'KINEMATIC_ID'], {}), '(data, KINEMATIC_ID)\n', (6912, 6932), False, 'from icenet.tools import io\n'), ((6961, 6980), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (6974, 6980), False, 'import copy\n'), ((7359, 7378), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (7372, 7378), False, 'import copy\n'), ((8143, 8217), 'termcolor.cprint', 'cprint', (["(__name__ + f'.splitfactor: jagged2tensor processing ...')", '"""yellow"""'], {}), "(__name__ + f'.splitfactor: jagged2tensor processing ...', 'yellow')\n", (8149, 8217), False, 'from termcolor import colored, cprint\n'), ((8264, 8378), 'icenet.tools.aux.jagged2tensor', 'aux.jagged2tensor', ([], {'X': 'data_image.trn.x', 'VARS': 'j_vars', 'xyz': 'xyz', 'x_binedges': 'eta_binedges', 'y_binedges': 'phi_binedges'}), '(X=data_image.trn.x, VARS=j_vars, xyz=xyz, x_binedges=\n eta_binedges, y_binedges=phi_binedges)\n', (8281, 8378), False, 'from icenet.tools import aux\n'), ((8399, 8513), 'icenet.tools.aux.jagged2tensor', 'aux.jagged2tensor', ([], {'X': 'data_image.val.x', 'VARS': 'j_vars', 'xyz': 'xyz', 'x_binedges': 'eta_binedges', 'y_binedges': 'phi_binedges'}), '(X=data_image.val.x, VARS=j_vars, xyz=xyz, x_binedges=\n eta_binedges, y_binedges=phi_binedges)\n', (8416, 8513), False, 'from icenet.tools import aux\n'), ((8534, 8648), 'icenet.tools.aux.jagged2tensor', 'aux.jagged2tensor', ([], {'X': 'data_image.tst.x', 'VARS': 'j_vars', 'xyz': 'xyz', 'x_binedges': 'eta_binedges', 'y_binedges': 'phi_binedges'}), '(X=data_image.tst.x, VARS=j_vars, xyz=xyz, x_binedges=\n eta_binedges, y_binedges=phi_binedges)\n', (8551, 8648), False, 'from icenet.tools import aux\n'), ((9944, 10039), 'termcolor.cprint', 'cprint', (["(__name__ + f'.load_root_file: Loading with uproot from file ' + root_path)", '"""yellow"""'], {}), "(__name__ + f'.load_root_file: Loading with uproot from file ' +\n root_path, 'yellow')\n", (9950, 10039), False, 'from termcolor import colored, cprint\n'), ((10041, 10134), 'termcolor.cprint', 'cprint', (["(__name__ +\n f'.load_root_file: entrystart = {entrystart}, entrystop = {entrystop}')"], {}), "(__name__ +\n f'.load_root_file: entrystart = {entrystart}, entrystop = {entrystop}')\n", (10047, 10134), False, 'from termcolor import colored, cprint\n'), ((10144, 10166), 'uproot.open', 'uproot.open', (['root_path'], {}), '(root_path)\n', (10155, 10166), False, 'import uproot\n'), ((10258, 10349), 'termcolor.cprint', 'cprint', (["(__name__ + f'.load_root_file: events.numentries = {events.numentries}')", '"""green"""'], {}), "(__name__ +\n f'.load_root_file: events.numentries = {events.numentries}', 'green')\n", (10264, 10349), False, 'from termcolor import colored, cprint\n'), ((10565, 10586), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(4)'], {}), '(4)\n', (10583, 10586), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((11194, 11211), 'icenet.tools.prints.printbar', 'prints.printbar', ([], {}), '()\n', (11209, 11211), False, 'from icenet.tools import prints\n'), ((12722, 12825), 'termcolor.cprint', 'cprint', (["(__name__ + f'.load_root_file: Prior <cutfunc> selections: {N_before} events ')", '"""green"""'], {}), "(__name__ +\n f'.load_root_file: Prior <cutfunc> selections: {N_before} events ', 'green'\n )\n", (12728, 12825), False, 'from termcolor import colored, cprint\n'), ((12821, 12949), 'termcolor.cprint', 'cprint', (["(__name__ +\n f'.load_root_file: Post <cutfunc> selections: {N_after} events ({N_after / N_before:.3f})'\n )", '"""green"""'], {}), "(__name__ +\n f'.load_root_file: Post <cutfunc> selections: {N_after} events ({N_after / N_before:.3f})'\n , 'green')\n", (12827, 12949), False, 'from termcolor import colored, cprint\n'), ((12974, 12991), 'icenet.tools.prints.printbar', 'prints.printbar', ([], {}), '()\n', (12989, 12991), False, 'from icenet.tools import prints\n'), ((3454, 3505), 'icenet.tools.io.impute_data', 'io.impute_data', ([], {'X': 'data.trn.x', 'imputer': 'None'}), '(X=data.trn.x, imputer=None, **param)\n', (3468, 3505), False, 'from icenet.tools import io\n'), ((3547, 3605), 'icenet.tools.io.impute_data', 'io.impute_data', ([], {'X': 'data.tst.x', 'imputer': 'imputer_trn'}), '(X=data.tst.x, imputer=imputer_trn, **param)\n', (3561, 3605), False, 'from icenet.tools import io\n'), ((3640, 3698), 'icenet.tools.io.impute_data', 'io.impute_data', ([], {'X': 'data.val.x', 'imputer': 'imputer_trn'}), '(X=data.val.x, imputer=imputer_trn, **param)\n', (3654, 3698), False, 'from icenet.tools import io\n'), ((5213, 5332), 'icenet.tools.aux.pdf_2D_hist', 'aux.pdf_2D_hist', ([], {'X_A': 'PT[data.trn.y == c]', 'X_B': 'ETA[data.trn.y == c]', 'binedges_A': 'pt_binedges', 'binedges_B': 'eta_binedges'}), '(X_A=PT[data.trn.y == c], X_B=ETA[data.trn.y == c],\n binedges_A=pt_binedges, binedges_B=eta_binedges)\n', (5228, 5332), False, 'from icenet.tools import aux\n'), ((5524, 5765), 'icenet.tools.aux.reweightcoeff2D', 'aux.reweightcoeff2D', ([], {'X_A': 'PT', 'X_B': 'ETA', 'pdf': 'pdf', 'y': 'data.trn.y', 'N_class': 'N_class', 'equal_frac': "args['reweight_param']['equal_frac']", 'reference_class': "args['reweight_param']['reference_class']", 'max_reg': "args['reweight_param']['max_reg']"}), "(X_A=PT, X_B=ETA, pdf=pdf, y=data.trn.y, N_class=N_class,\n equal_frac=args['reweight_param']['equal_frac'], reference_class=args[\n 'reweight_param']['reference_class'], max_reg=args['reweight_param'][\n 'max_reg'])\n", (5543, 5765), False, 'from icenet.tools import aux\n'), ((5893, 5933), 'numpy.zeros', 'np.zeros', (['(data.trn.x.shape[0], N_class)'], {}), '((data.trn.x.shape[0], N_class))\n', (5901, 5933), True, 'import numpy as np\n'), ((6045, 6076), 'numpy.sum', 'np.sum', (['weights_doublet'], {'axis': '(1)'}), '(weights_doublet, axis=1)\n', (6051, 6076), True, 'import numpy as np\n'), ((6247, 6270), 'numpy.sum', 'np.sum', (['(data.trn.y == c)'], {}), '(data.trn.y == c)\n', (6253, 6270), True, 'import numpy as np\n'), ((6289, 6325), 'numpy.sum', 'np.sum', (['trn_weights[data.trn.y == c]'], {}), '(trn_weights[data.trn.y == c])\n', (6295, 6325), True, 'import numpy as np\n'), ((11372, 11450), 'termcolor.cprint', 'cprint', (["(__name__ + f'.load_root_file: Computing MC <targetfunc> ...')", '"""yellow"""'], {}), "(__name__ + f'.load_root_file: Computing MC <targetfunc> ...', 'yellow')\n", (11378, 11450), False, 'from termcolor import colored, cprint\n'), ((11591, 11686), 'icenet.tools.aux.count_targets', 'aux.count_targets', ([], {'events': 'events', 'names': 'labels1', 'entrystart': 'entrystart', 'entrystop': 'entrystop'}), '(events=events, names=labels1, entrystart=entrystart,\n entrystop=entrystop)\n', (11608, 11686), False, 'from icenet.tools import aux\n'), ((11692, 11709), 'icenet.tools.prints.printbar', 'prints.printbar', ([], {}), '()\n', (11707, 11709), False, 'from icenet.tools import prints\n'), ((11758, 11836), 'termcolor.cprint', 'cprint', (["(__name__ + f'.load_root_file: Computing MC <filterfunc> ...')", '"""yellow"""'], {}), "(__name__ + f'.load_root_file: Computing MC <filterfunc> ...', 'yellow')\n", (11764, 11836), False, 'from termcolor import colored, cprint\n'), ((12115, 12132), 'icenet.tools.prints.printbar', 'prints.printbar', ([], {}), '()\n', (12130, 12132), False, 'from icenet.tools import prints\n'), ((12384, 12447), 'termcolor.colored', 'colored', (["(__name__ + f'.load_root_file: Computing <cutfunc> ...')"], {}), "(__name__ + f'.load_root_file: Computing <cutfunc> ...')\n", (12391, 12447), False, 'from termcolor import colored, cprint\n'), ((1190, 1212), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1204, 1212), False, 'import yaml\n'), ((3804, 3827), 'numpy.isfinite', 'np.isfinite', (['data.trn.x'], {}), '(data.trn.x)\n', (3815, 3827), True, 'import numpy as np\n'), ((3868, 3891), 'numpy.isfinite', 'np.isfinite', (['data.val.x'], {}), '(data.val.x)\n', (3879, 3891), True, 'import numpy as np\n'), ((3932, 3955), 'numpy.isfinite', 'np.isfinite', (['data.tst.x'], {}), '(data.tst.x)\n', (3943, 3955), True, 'import numpy as np\n'), ((4018, 4041), 'icenet.tools.io.process_memory_use', 'io.process_memory_use', ([], {}), '()\n', (4039, 4041), False, 'from icenet.tools import io\n'), ((4103, 4126), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (4124, 4126), False, 'import psutil\n'), ((9788, 9811), 'icenet.tools.io.process_memory_use', 'io.process_memory_use', ([], {}), '()\n', (9809, 9811), False, 'from icenet.tools import io\n'), ((9880, 9903), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (9901, 9903), False, 'import psutil\n')] |
import numpy as np
import os
from pipeline import newPipe
from helpers import *
from joblib import Parallel, delayed
from sklearn.utils import resample
hParams = {"iters":10,
"max_iter":4000,
"regularization":"l2",
"multi_class":"multinomial",
"subsample":10000,
"mode":"tf-idf",
"data":"small"
}
if hParams["data"]=="full":
allfeatures = np.load("../data/nik/tfidf.npy").item()
allgenres = np.load("../data/nik/genre.npy")
else:
allfeatures = np.load("../data/nik/smalltfidf.npy").item()
allgenres = np.load("../data/nik/smallword2VecGenresEncoded.npy")
if hParams["subsample"]=="all":
features = allfeatures
genres = allgenres
else:
features, genres = resample(allfeatures,
allgenres,
replace=False,
n_samples=hParams["subsample"])
# In[6]:
def runExp(features):
expPath = makeExpDir()
experimentDict = newPipe(features,
genres,
iters=hParams["iters"],
max_iter=hParams["max_iter"],
regularization=hParams["regularization"],
multi_class=hParams["multi_class"])
np.save(os.path.join(expPath, "LogisticRegressionDict.npy"), experimentDict)
np.save(os.path.join(expPath, "hParams.npy"), hParams)
with open(os.path.join(expPath, "hParams.txt"), "w") as f:
for k,v in hParams.items():
f.write("{}:{}\n".format(k,v))
runExp(features)
| [
"numpy.load",
"os.path.join",
"sklearn.utils.resample",
"pipeline.newPipe"
] | [((401, 433), 'numpy.load', 'np.load', (['"""../data/nik/genre.npy"""'], {}), "('../data/nik/genre.npy')\n", (408, 433), True, 'import numpy as np\n'), ((519, 572), 'numpy.load', 'np.load', (['"""../data/nik/smallword2VecGenresEncoded.npy"""'], {}), "('../data/nik/smallword2VecGenresEncoded.npy')\n", (526, 572), True, 'import numpy as np\n'), ((684, 763), 'sklearn.utils.resample', 'resample', (['allfeatures', 'allgenres'], {'replace': '(False)', 'n_samples': "hParams['subsample']"}), "(allfeatures, allgenres, replace=False, n_samples=hParams['subsample'])\n", (692, 763), False, 'from sklearn.utils import resample\n'), ((869, 1036), 'pipeline.newPipe', 'newPipe', (['features', 'genres'], {'iters': "hParams['iters']", 'max_iter': "hParams['max_iter']", 'regularization': "hParams['regularization']", 'multi_class': "hParams['multi_class']"}), "(features, genres, iters=hParams['iters'], max_iter=hParams[\n 'max_iter'], regularization=hParams['regularization'], multi_class=\n hParams['multi_class'])\n", (876, 1036), False, 'from pipeline import newPipe\n'), ((1080, 1131), 'os.path.join', 'os.path.join', (['expPath', '"""LogisticRegressionDict.npy"""'], {}), "(expPath, 'LogisticRegressionDict.npy')\n", (1092, 1131), False, 'import os\n'), ((1161, 1197), 'os.path.join', 'os.path.join', (['expPath', '"""hParams.npy"""'], {}), "(expPath, 'hParams.npy')\n", (1173, 1197), False, 'import os\n'), ((345, 377), 'numpy.load', 'np.load', (['"""../data/nik/tfidf.npy"""'], {}), "('../data/nik/tfidf.npy')\n", (352, 377), True, 'import numpy as np\n'), ((458, 495), 'numpy.load', 'np.load', (['"""../data/nik/smalltfidf.npy"""'], {}), "('../data/nik/smalltfidf.npy')\n", (465, 495), True, 'import numpy as np\n'), ((1222, 1258), 'os.path.join', 'os.path.join', (['expPath', '"""hParams.txt"""'], {}), "(expPath, 'hParams.txt')\n", (1234, 1258), False, 'import os\n')] |
"""
Produce integration test data, which is tested by the `test_integration_features.py`
tests. One thing to note here is that all redshifts are reasonably high.
This is necessary, because low redshifts mean that neutral fractions are small,
and then numerical noise gets relatively more important, and can make the comparison
fail at the tens-of-percent level.
"""
import glob
import h5py
import numpy as np
import os
import sys
import tempfile
from powerbox import get_power
from py21cmfast import (
AstroParams,
CosmoParams,
FlagOptions,
UserParams,
config,
determine_halo_list,
global_params,
initial_conditions,
perturb_field,
perturb_halo_list,
run_coeval,
run_lightcone,
)
SEED = 12345
DATA_PATH = os.path.join(os.path.dirname(__file__), "test_data")
DEFAULT_USER_PARAMS = {
"HII_DIM": 50,
"DIM": 150,
"BOX_LEN": 100,
"NO_RNG": True,
"USE_INTERPOLATION_TABLES": True,
}
DEFAULT_ZPRIME_STEP_FACTOR = 1.04
OPTIONS = (
[12, {}],
[12, {"PERTURB_ON_HIGH_RES": True}],
[11, {"zprime_step_factor": 1.02}],
[30, {"z_heat_max": 40}],
[13, {"zprime_step_factor": 1.05, "z_heat_max": 25, "HMF": 0}],
[16, {"interp_perturb_field": True}],
[14, {"USE_MASS_DEPENDENT_ZETA": True}],
[9, {"SUBCELL_RSD": True}],
[10, {"INHOMO_RECO": True}],
[16, {"HMF": 3, "USE_TS_FLUCT": True}],
[20, {"z_heat_max": 45, "M_MIN_in_Mass": True, "HMF": 2}],
[35, {"USE_FFTW_WISDOM": True}],
[
18,
{
"z_heat_max": 25,
"USE_MINI_HALOS": True,
"USE_MASS_DEPENDENT_ZETA": True,
"INHOMO_RECO": True,
"USE_TS_FLUCT": True,
"zprime_step_factor": 1.2,
"N_THREADS": 4,
"USE_FFTW_WISDOM": True,
"NUM_FILTER_STEPS_FOR_Ts": 8,
},
],
[8, {"N_THREADS": 2}],
[10, {"PHOTON_CONS": True}],
[
8.5,
{
"USE_MASS_DEPENDENT_ZETA": True,
"PHOTON_CONS": True,
"z_heat_max": 25,
"zprime_step_factor": 1.1,
},
],
[
9,
{
"USE_MASS_DEPENDENT_ZETA": True,
"USE_TS_FLUCT": True,
"INHOMO_RECO": True,
"PHOTON_CONS": True,
"z_heat_max": 25,
"zprime_step_factor": 1.1,
},
],
[
8.5,
{
"N_THREADS": 2,
"USE_FFTW_WISDOM": True,
"USE_MASS_DEPENDENT_ZETA": True,
"INHOMO_RECO": True,
"USE_TS_FLUCT": True,
"PHOTON_CONS": True,
"z_heat_max": 25,
"zprime_step_factor": 1.1,
},
],
[9, {"USE_HALO_FIELD": True}],
[
8.5,
{
"USE_MASS_DEPENDENT_ZETA": True,
"USE_HALO_FIELD": True,
"USE_TS_FLUCT": True,
"z_heat_max": 25,
"zprime_step_factor": 1.1,
},
],
[
8.5,
{
"USE_MASS_DEPENDENT_ZETA": True,
"USE_HALO_FIELD": True,
"USE_TS_FLUCT": False,
"PERTURB_ON_HIGH_RES": True,
"N_THREADS": 4,
"z_heat_max": 25,
"zprime_step_factor": 1.1,
},
],
[
12.0,
{
"USE_MASS_DEPENDENT_ZETA": True,
"USE_TS_FLUCT": True,
"PERTURB_ON_HIGH_RES": False,
"N_THREADS": 4,
"z_heat_max": 25,
"zprime_step_factor": 1.2,
"NUM_FILTER_STEPS_FOR_Ts": 4,
"USE_INTERPOLATION_TABLES": False,
},
],
[
12.0,
{
"USE_TS_FLUCT": True,
"N_THREADS": 4,
"z_heat_max": 25,
"zprime_step_factor": 1.2,
"NUM_FILTER_STEPS_FOR_Ts": 4,
"USE_INTERPOLATION_TABLES": False,
},
],
[
12.0,
{
"USE_MINI_HALOS": True,
"USE_MASS_DEPENDENT_ZETA": True,
"USE_TS_FLUCT": True,
"N_THREADS": 4,
"z_heat_max": 25,
"zprime_step_factor": 1.2,
"NUM_FILTER_STEPS_FOR_Ts": 4,
"USE_INTERPOLATION_TABLES": False,
},
],
[
12.1,
{"N_THREADS": 4, "FAST_FCOLL_TABLES": True, "USE_INTEPOLATION_TABLES": True},
],
)
OPTIONS_PT = (
[10, {}],
[10, {"SECOND_ORDER_LPT_CORRECTIONS": 0}],
[10, {"EVOLVE_DENSITY_LINEARLY": 1}],
[10, {"PERTURB_ON_HIGH_RES": True}],
)
OPTIONS_HALO = ([9, {"USE_HALO_FIELD": True}],)
def get_defaults(kwargs, cls):
return {k: kwargs.get(k, v) for k, v in cls._defaults_.items()}
def get_all_defaults(kwargs):
flag_options = get_defaults(kwargs, FlagOptions)
astro_params = get_defaults(kwargs, AstroParams)
cosmo_params = get_defaults(kwargs, CosmoParams)
user_params = get_defaults(kwargs, UserParams)
return user_params, cosmo_params, astro_params, flag_options
def get_all_options(redshift, **kwargs):
user_params, cosmo_params, astro_params, flag_options = get_all_defaults(kwargs)
user_params.update(DEFAULT_USER_PARAMS)
out = {
"redshift": redshift,
"user_params": user_params,
"cosmo_params": cosmo_params,
"astro_params": astro_params,
"flag_options": flag_options,
"use_interp_perturb_field": kwargs.get("use_interp_perturb_field", False),
"random_seed": SEED,
}
for key in kwargs:
if key.upper() in (k.upper() for k in global_params.keys()):
out[key] = kwargs[key]
return out
def get_all_options_ics(**kwargs):
user_params, cosmo_params, astro_params, flag_options = get_all_defaults(kwargs)
user_params.update(DEFAULT_USER_PARAMS)
out = {
"user_params": user_params,
"cosmo_params": cosmo_params,
"random_seed": SEED,
}
for key in kwargs:
if key.upper() in (k.upper() for k in global_params.keys()):
out[key] = kwargs[key]
return out
def get_all_options_halo(redshift, **kwargs):
user_params, cosmo_params, astro_params, flag_options = get_all_defaults(kwargs)
user_params.update(DEFAULT_USER_PARAMS)
out = {
"redshift": redshift,
"user_params": user_params,
"cosmo_params": cosmo_params,
"astro_params": astro_params,
"flag_options": flag_options,
"random_seed": SEED,
}
for key in kwargs:
if key.upper() in (k.upper() for k in global_params.keys()):
out[key] = kwargs[key]
return out
def produce_coeval_power_spectra(redshift, **kwargs):
options = get_all_options(redshift, **kwargs)
coeval = run_coeval(**options)
p, k = get_power(
coeval.brightness_temp,
boxlength=coeval.user_params.BOX_LEN,
)
return k, p, coeval
def produce_lc_power_spectra(redshift, **kwargs):
options = get_all_options(redshift, **kwargs)
lightcone = run_lightcone(max_redshift=options["redshift"] + 2, **options)
p, k = get_power(
lightcone.brightness_temp, boxlength=lightcone.lightcone_dimensions
)
return k, p, lightcone
def produce_perturb_field_data(redshift, **kwargs):
options = get_all_options(redshift, **kwargs)
options_ics = get_all_options_ics(**kwargs)
out = {
key: kwargs[key]
for key in kwargs
if key.upper() in (k.upper() for k in global_params.keys())
}
velocity_normalisation = 1e16
with config.use(regenerate=True, write=False):
init_box = initial_conditions(**options_ics)
pt_box = perturb_field(redshift=redshift, init_boxes=init_box, **out)
p_dens, k_dens = get_power(
pt_box.density,
boxlength=options["user_params"]["BOX_LEN"],
)
p_vel, k_vel = get_power(
pt_box.velocity * velocity_normalisation,
boxlength=options["user_params"]["BOX_LEN"],
)
def hist(kind, xmin, xmax, nbins):
data = getattr(pt_box, kind)
if kind == "velocity":
data = velocity_normalisation * data
bins, edges = np.histogram(
data,
bins=np.linspace(xmin, xmax, nbins),
range=[xmin, xmax],
normed=True,
)
left, right = edges[:-1], edges[1:]
X = np.array([left, right]).T.flatten()
Y = np.array([bins, bins]).T.flatten()
return X, Y
X_dens, Y_dens = hist("density", -0.8, 2.0, 50)
X_vel, Y_vel = hist("velocity", -2, 2, 50)
return k_dens, p_dens, k_vel, p_vel, X_dens, Y_dens, X_vel, Y_vel, init_box
def produce_halo_field_data(redshift, **kwargs):
options_halo = get_all_options_halo(redshift, **kwargs)
with config.use(regenerate=True, write=False):
pt_halos = perturb_halo_list(**options_halo)
return pt_halos
def get_filename(redshift, **kwargs):
# get sorted keys
kwargs = {k: kwargs[k] for k in sorted(kwargs)}
string = "_".join(f"{k}={v}" for k, v in kwargs.items())
fname = f"power_spectra_z{redshift:.2f}_{string}.h5"
return os.path.join(DATA_PATH, fname)
def get_filename_pt(redshift, **kwargs):
# get sorted keys
kwargs = {k: kwargs[k] for k in sorted(kwargs)}
string = "_".join(f"{k}={v}" for k, v in kwargs.items())
fname = f"perturb_field_data_z{redshift:.2f}_{string}.h5"
return os.path.join(DATA_PATH, fname)
def get_filename_halo(redshift, **kwargs):
# get sorted keys
kwargs = {k: kwargs[k] for k in sorted(kwargs)}
string = "_".join(f"{k}={v}" for k, v in kwargs.items())
fname = f"halo_field_data_z{redshift:.2f}_{string}.h5"
return os.path.join(DATA_PATH, fname)
def produce_power_spectra_for_tests(redshift, force, direc, **kwargs):
fname = get_filename(redshift, **kwargs)
# Need to manually remove it, otherwise h5py tries to add to it
if os.path.exists(fname):
if force:
os.remove(fname)
else:
return fname
# For tests, we *don't* want to use cached boxes, but we also want to use the
# cache between the power spectra and lightcone. So we create a temporary
# directory in which to cache results.
with config.use(direc=direc):
k, p, coeval = produce_coeval_power_spectra(redshift, **kwargs)
k_l, p_l, lc = produce_lc_power_spectra(redshift, **kwargs)
with h5py.File(fname, "w") as fl:
for k, v in kwargs.items():
fl.attrs[k] = v
fl.attrs["HII_DIM"] = coeval.user_params.HII_DIM
fl.attrs["DIM"] = coeval.user_params.DIM
fl.attrs["BOX_LEN"] = coeval.user_params.BOX_LEN
fl["power_coeval"] = p
fl["k_coeval"] = k
fl["power_lc"] = p_l
fl["k_lc"] = k_l
fl["xHI"] = lc.global_xH
fl["Tb"] = lc.global_brightness_temp
print(f"Produced {fname} with {kwargs}")
return fname
def produce_data_for_perturb_field_tests(redshift, force, **kwargs):
(
k_dens,
p_dens,
k_vel,
p_vel,
X_dens,
Y_dens,
X_vel,
Y_vel,
init_box,
) = produce_perturb_field_data(redshift, **kwargs)
fname = get_filename_pt(redshift, **kwargs)
# Need to manually remove it, otherwise h5py tries to add to it
if os.path.exists(fname):
if force:
os.remove(fname)
else:
return fname
with h5py.File(fname, "w") as fl:
for k, v in kwargs.items():
fl.attrs[k] = v
fl.attrs["HII_DIM"] = init_box.user_params.HII_DIM
fl.attrs["DIM"] = init_box.user_params.DIM
fl.attrs["BOX_LEN"] = init_box.user_params.BOX_LEN
fl["power_dens"] = p_dens
fl["k_dens"] = k_dens
fl["power_vel"] = p_vel
fl["k_vel"] = k_vel
fl["pdf_dens"] = Y_dens
fl["x_dens"] = X_dens
fl["pdf_vel"] = Y_vel
fl["x_vel"] = X_vel
print(f"Produced {fname} with {kwargs}")
return fname
def produce_data_for_halo_field_tests(redshift, force, **kwargs):
pt_halos = produce_halo_field_data(redshift, **kwargs)
fname = get_filename_halo(redshift, **kwargs)
# Need to manually remove it, otherwise h5py tries to add to it
if os.path.exists(fname):
if force:
os.remove(fname)
else:
return fname
with h5py.File(fname, "w") as fl:
for k, v in kwargs.items():
fl.attrs[k] = v
fl["n_pt_halos"] = pt_halos.n_halos
fl["pt_halo_masses"] = pt_halos.halo_masses
print(f"Produced {fname} with {kwargs}")
return fname
if __name__ == "__main__":
import logging
logger = logging.getLogger("21cmFAST")
lvl = "WARNING"
for arg in sys.argv:
if arg.startswith("--log"):
lvl = arg.split("--log")[-1]
lvl = getattr(logging, lvl)
logger.setLevel(lvl)
global_params.ZPRIME_STEP_FACTOR = DEFAULT_ZPRIME_STEP_FACTOR
force = "--force" in sys.argv
remove = "--no-clean" not in sys.argv
pt_only = "--pt-only" in sys.argv
no_pt = "--no-pt" in sys.argv
no_halo = "--no-halo" in sys.argv
nums = range(len(OPTIONS))
for arg in sys.argv:
if arg.startswith("--nums="):
nums = [int(x) for x in arg.split("=")[-1].split(",")]
remove = False
force = True
if pt_only or no_pt or no_halo:
remove = False
# For tests, we *don't* want to use cached boxes, but we also want to use the
# cache between the power spectra and lightcone. So we create a temporary
# directory in which to cache results.
direc = tempfile.mkdtemp()
fnames = []
if not pt_only:
for redshift, kwargs in [OPTIONS[n] for n in nums]:
fnames.append(
produce_power_spectra_for_tests(redshift, force, direc, **kwargs)
)
if not no_pt:
for redshift, kwargs in OPTIONS_PT:
fnames.append(
produce_data_for_perturb_field_tests(redshift, force, **kwargs)
)
if not no_halo:
for redshift, kwargs in OPTIONS_HALO:
fnames.append(produce_data_for_halo_field_tests(redshift, force, **kwargs))
# Remove extra files that
if not (nums or pt_only or no_pt or no_halo):
all_files = glob.glob(os.path.join(DATA_PATH, "*"))
for fl in all_files:
if fl not in fnames:
if remove:
print(f"Removing old file: {fl}")
os.remove(fl)
else:
print(f"File is now redundant and can be removed: {fl}")
| [
"py21cmfast.run_coeval",
"py21cmfast.initial_conditions",
"py21cmfast.perturb_field",
"h5py.File",
"os.remove",
"os.path.dirname",
"os.path.exists",
"tempfile.mkdtemp",
"py21cmfast.run_lightcone",
"numpy.array",
"numpy.linspace",
"powerbox.get_power",
"py21cmfast.perturb_halo_list",
"py21c... | [((769, 794), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (784, 794), False, 'import os\n'), ((6689, 6710), 'py21cmfast.run_coeval', 'run_coeval', ([], {}), '(**options)\n', (6699, 6710), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((6722, 6793), 'powerbox.get_power', 'get_power', (['coeval.brightness_temp'], {'boxlength': 'coeval.user_params.BOX_LEN'}), '(coeval.brightness_temp, boxlength=coeval.user_params.BOX_LEN)\n', (6731, 6793), False, 'from powerbox import get_power\n'), ((6960, 7022), 'py21cmfast.run_lightcone', 'run_lightcone', ([], {'max_redshift': "(options['redshift'] + 2)"}), "(max_redshift=options['redshift'] + 2, **options)\n", (6973, 7022), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((7035, 7113), 'powerbox.get_power', 'get_power', (['lightcone.brightness_temp'], {'boxlength': 'lightcone.lightcone_dimensions'}), '(lightcone.brightness_temp, boxlength=lightcone.lightcone_dimensions)\n', (7044, 7113), False, 'from powerbox import get_power\n'), ((7686, 7756), 'powerbox.get_power', 'get_power', (['pt_box.density'], {'boxlength': "options['user_params']['BOX_LEN']"}), "(pt_box.density, boxlength=options['user_params']['BOX_LEN'])\n", (7695, 7756), False, 'from powerbox import get_power\n'), ((7799, 7900), 'powerbox.get_power', 'get_power', (['(pt_box.velocity * velocity_normalisation)'], {'boxlength': "options['user_params']['BOX_LEN']"}), "(pt_box.velocity * velocity_normalisation, boxlength=options[\n 'user_params']['BOX_LEN'])\n", (7808, 7900), False, 'from powerbox import get_power\n'), ((9070, 9100), 'os.path.join', 'os.path.join', (['DATA_PATH', 'fname'], {}), '(DATA_PATH, fname)\n', (9082, 9100), False, 'import os\n'), ((9353, 9383), 'os.path.join', 'os.path.join', (['DATA_PATH', 'fname'], {}), '(DATA_PATH, fname)\n', (9365, 9383), False, 'import os\n'), ((9635, 9665), 'os.path.join', 'os.path.join', (['DATA_PATH', 'fname'], {}), '(DATA_PATH, fname)\n', (9647, 9665), False, 'import os\n'), ((9860, 9881), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (9874, 9881), False, 'import os\n'), ((11269, 11290), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (11283, 11290), False, 'import os\n'), ((12217, 12238), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (12231, 12238), False, 'import os\n'), ((12651, 12680), 'logging.getLogger', 'logging.getLogger', (['"""21cmFAST"""'], {}), "('21cmFAST')\n", (12668, 12680), False, 'import logging\n'), ((13605, 13623), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (13621, 13623), False, 'import tempfile\n'), ((7491, 7531), 'py21cmfast.config.use', 'config.use', ([], {'regenerate': '(True)', 'write': '(False)'}), '(regenerate=True, write=False)\n', (7501, 7531), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((7552, 7585), 'py21cmfast.initial_conditions', 'initial_conditions', ([], {}), '(**options_ics)\n', (7570, 7585), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((7603, 7663), 'py21cmfast.perturb_field', 'perturb_field', ([], {'redshift': 'redshift', 'init_boxes': 'init_box'}), '(redshift=redshift, init_boxes=init_box, **out)\n', (7616, 7663), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((8710, 8750), 'py21cmfast.config.use', 'config.use', ([], {'regenerate': '(True)', 'write': '(False)'}), '(regenerate=True, write=False)\n', (8720, 8750), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((8771, 8804), 'py21cmfast.perturb_halo_list', 'perturb_halo_list', ([], {}), '(**options_halo)\n', (8788, 8804), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((10182, 10205), 'py21cmfast.config.use', 'config.use', ([], {'direc': 'direc'}), '(direc=direc)\n', (10192, 10205), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((10357, 10378), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (10366, 10378), False, 'import h5py\n'), ((11388, 11409), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (11397, 11409), False, 'import h5py\n'), ((12336, 12357), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (12345, 12357), False, 'import h5py\n'), ((9913, 9929), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (9922, 9929), False, 'import os\n'), ((11322, 11338), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (11331, 11338), False, 'import os\n'), ((12270, 12286), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (12279, 12286), False, 'import os\n'), ((14294, 14322), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""*"""'], {}), "(DATA_PATH, '*')\n", (14306, 14322), False, 'import os\n'), ((8148, 8178), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nbins'], {}), '(xmin, xmax, nbins)\n', (8159, 8178), True, 'import numpy as np\n'), ((5519, 5539), 'py21cmfast.global_params.keys', 'global_params.keys', ([], {}), '()\n', (5537, 5539), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((5949, 5969), 'py21cmfast.global_params.keys', 'global_params.keys', ([], {}), '()\n', (5967, 5969), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((6496, 6516), 'py21cmfast.global_params.keys', 'global_params.keys', ([], {}), '()\n', (6514, 6516), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n'), ((8305, 8328), 'numpy.array', 'np.array', (['[left, right]'], {}), '([left, right])\n', (8313, 8328), True, 'import numpy as np\n'), ((8353, 8375), 'numpy.array', 'np.array', (['[bins, bins]'], {}), '([bins, bins])\n', (8361, 8375), True, 'import numpy as np\n'), ((14487, 14500), 'os.remove', 'os.remove', (['fl'], {}), '(fl)\n', (14496, 14500), False, 'import os\n'), ((7418, 7438), 'py21cmfast.global_params.keys', 'global_params.keys', ([], {}), '()\n', (7436, 7438), False, 'from py21cmfast import AstroParams, CosmoParams, FlagOptions, UserParams, config, determine_halo_list, global_params, initial_conditions, perturb_field, perturb_halo_list, run_coeval, run_lightcone\n')] |
import torch
import torch.functional as F
from utils import FunctionRegistry
from torch_geometric.utils import intersection_and_union as i_and_u
import numpy as np
test_default_cfg = {
'type': 'test'
}
TESTS = FunctionRegistry(default_cfg=test_default_cfg)
@TESTS.register
def test(model, loader):
model.eval()
correct = 0
for data in loader:
with torch.no_grad():
pred = model(data)['out'].max(dim=1)[1]
data_y = torch.cat([d.y for d in data]).to(pred.device)
correct += pred.eq(data_y).sum().item()
return correct / len(loader.dataset)
@TESTS.register
def test_part_seg(model, loader):
model.eval()
ious = [[] for _ in range(len(loader.dataset.categories))]
for data in loader:
out_dict = model(data)
out = out_dict['out']
pred = out.argmax(dim=1)
# Adaptation to the datalistloader
data_y = torch.cat([d.y for d in data]).to(out.device)
data_batch = torch.cat([torch.ones(d.y.shape) * i for i, d in enumerate(data)]).type(torch.LongTensor).to(out.device)
data_category = torch.cat([d.category for d in data])
# loader.dataset.num_classes -> loader.dataset.y_mask.size(-1)
# source code updated, see commint:
# https://github.com/rusty1s/pytorch_geometric/commit/5c5509edbfd284e8c90b55faee9e68d7ae4f806a#diff-95045fdafc8ba001ebac808e65e54457
# but the wheel is not updated to include the above changes in the ShapeNet class
# (hence, missing overshadowing attribute num_classes)
# this is a quick fix
# update 2: we actually don't need y_mask in the computation
# TODO: finalize the computation here
# i, u = i_and_u(pred, data_y, loader.dataset.y_mask.size(-1), data_batch)
# our modification
data_y = data_y - data_y.min() # part class id should start at 0
# import pdb; pdb.set_trace()
i, u = i_and_u(pred, data_y, data_y.max() + 1, data_batch)
iou = i.cpu().to(torch.float) / u.cpu().to(torch.float)
iou[torch.isnan(iou)] = 1
# Find and filter the relevant classes for each category.
for iou, category in zip(iou.unbind(), data_category):
ious[category.item()].append(iou)
# Compute mean IoU.
ious = [torch.stack(iou).mean(0).mean(0) for iou in ious]
return torch.tensor(ious).mean().item()
@TESTS.register
def test_sem_seg(model, loader):
model.eval()
correct = 0
total = 0
i_total = np.zeros((13, ))
u_total = np.zeros((13, ))
for data in loader:
out_dict = model(data)
out = out_dict['out']
pred = out.argmax(dim=1)
# Adaptation to the datalistloader
data_y = torch.cat([d.y for d in data]).to(out.device)
data_batch = torch.cat([torch.ones(d.y.shape) * i for i, d in enumerate(data)]).type(torch.LongTensor).to(out.device)
correct += (pred == data_y).sum()
total += pred.size(0)
# S3DIS: 13 classes
i, u = i_and_u(pred, data_y, 13, data_batch)
i_total += i.cpu().to(torch.float).numpy().sum(axis=0)
u_total += u.cpu().to(torch.float).numpy().sum(axis=0)
# Compute point accuracy
accuracy = correct.type(torch.FloatTensor) / total
# Compute mean IoU
ious = i_total / u_total
mean_iou = ious.sum() / 13.0
return {
'primary': mean_iou.item(),
'i_total': i_total.tolist(),
'u_total': u_total.tolist(),
'mean_IoU': mean_iou.item(),
'correct': correct.item(),
'total': total,
'accuracy': accuracy.item()
}
| [
"torch.ones",
"torch.stack",
"numpy.zeros",
"torch_geometric.utils.intersection_and_union",
"torch.cat",
"utils.FunctionRegistry",
"torch.no_grad",
"torch.isnan",
"torch.tensor"
] | [((216, 262), 'utils.FunctionRegistry', 'FunctionRegistry', ([], {'default_cfg': 'test_default_cfg'}), '(default_cfg=test_default_cfg)\n', (232, 262), False, 'from utils import FunctionRegistry\n'), ((2511, 2526), 'numpy.zeros', 'np.zeros', (['(13,)'], {}), '((13,))\n', (2519, 2526), True, 'import numpy as np\n'), ((2542, 2557), 'numpy.zeros', 'np.zeros', (['(13,)'], {}), '((13,))\n', (2550, 2557), True, 'import numpy as np\n'), ((1109, 1146), 'torch.cat', 'torch.cat', (['[d.category for d in data]'], {}), '([d.category for d in data])\n', (1118, 1146), False, 'import torch\n'), ((3028, 3065), 'torch_geometric.utils.intersection_and_union', 'i_and_u', (['pred', 'data_y', '(13)', 'data_batch'], {}), '(pred, data_y, 13, data_batch)\n', (3035, 3065), True, 'from torch_geometric.utils import intersection_and_union as i_and_u\n'), ((377, 392), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (390, 392), False, 'import torch\n'), ((2068, 2084), 'torch.isnan', 'torch.isnan', (['iou'], {}), '(iou)\n', (2079, 2084), False, 'import torch\n'), ((463, 493), 'torch.cat', 'torch.cat', (['[d.y for d in data]'], {}), '([d.y for d in data])\n', (472, 493), False, 'import torch\n'), ((913, 943), 'torch.cat', 'torch.cat', (['[d.y for d in data]'], {}), '([d.y for d in data])\n', (922, 943), False, 'import torch\n'), ((2739, 2769), 'torch.cat', 'torch.cat', (['[d.y for d in data]'], {}), '([d.y for d in data])\n', (2748, 2769), False, 'import torch\n'), ((2364, 2382), 'torch.tensor', 'torch.tensor', (['ious'], {}), '(ious)\n', (2376, 2382), False, 'import torch\n'), ((2303, 2319), 'torch.stack', 'torch.stack', (['iou'], {}), '(iou)\n', (2314, 2319), False, 'import torch\n'), ((991, 1012), 'torch.ones', 'torch.ones', (['d.y.shape'], {}), '(d.y.shape)\n', (1001, 1012), False, 'import torch\n'), ((2817, 2838), 'torch.ones', 'torch.ones', (['d.y.shape'], {}), '(d.y.shape)\n', (2827, 2838), False, 'import torch\n')] |
#!/usr/bin/python3
r'''Test of the mrcal-convert-lensmodel tool
'''
# add test for mrcal-convert-lensmodel. Should test
# 1. reoptimization
# 2. sampled at various distances with/without uncertainties
#
# splined -> opencv8 is probably the interesting direction. I can imagine
# that the no-geometry sampled solves would fail here because opencv8 just
# wouldn't fit in that case
# splined model: fix the core
import sys
import numpy as np
import numpysane as nps
import os
import subprocess
testdir = os.path.dirname(os.path.realpath(__file__))
# I import the LOCAL mrcal since that's what I'm testing
sys.path[:0] = f"{testdir}/..",
import mrcal
import testutils
import tempfile
import atexit
import shutil
workdir = tempfile.mkdtemp()
def cleanup():
global workdir
try:
shutil.rmtree(workdir)
workdir = None
except:
pass
atexit.register(cleanup)
filename_splined = f"{testdir}/data/cam0.splined.cameramodel"
with open(filename_splined, "r") as f: text_splined = f.read()
model_splined = mrcal.cameramodel(filename_splined)
# These settings are semi-arbitrary. I could test that higher radii fit more
# stuff until we go too high, and it doesn't fit at all anymore. Need --sampled
# because my models don't have optimization_inputs. For a basic test this is
# fine
text_out_cahvor = \
subprocess.check_output( (f"{testdir}/../mrcal-convert-lensmodel",
"--radius", "800",
"--intrinsics-only",
"--sampled",
"--distance", "3",
"LENSMODEL_CAHVOR",
"-",),
encoding = 'ascii',
input = text_splined,
stderr = subprocess.DEVNULL)
filename_out_cahvor = f"{workdir}/cam0.out.cahvor.cameramodel"
with open(filename_out_cahvor, "w") as f:
print(text_out_cahvor, file=f)
model_out_cahvor = mrcal.cameramodel(filename_out_cahvor)
difflen, diff, q0, implied_Rt10 = \
mrcal.projection_diff( (model_out_cahvor, model_splined),
use_uncertainties = False,
distance = 3)
icenter = np.array(difflen.shape) // 2
testutils.confirm_equal( 0, difflen[icenter[0],icenter[1]],
eps = 0.1,
msg = "Low-enough diff at the center")
testutils.finish()
| [
"atexit.register",
"mrcal.projection_diff",
"subprocess.check_output",
"os.path.realpath",
"tempfile.mkdtemp",
"mrcal.cameramodel",
"testutils.finish",
"numpy.array",
"shutil.rmtree",
"testutils.confirm_equal"
] | [((741, 759), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (757, 759), False, 'import tempfile\n'), ((882, 906), 'atexit.register', 'atexit.register', (['cleanup'], {}), '(cleanup)\n', (897, 906), False, 'import atexit\n'), ((1050, 1085), 'mrcal.cameramodel', 'mrcal.cameramodel', (['filename_splined'], {}), '(filename_splined)\n', (1067, 1085), False, 'import mrcal\n'), ((1352, 1593), 'subprocess.check_output', 'subprocess.check_output', (["(f'{testdir}/../mrcal-convert-lensmodel', '--radius', '800',\n '--intrinsics-only', '--sampled', '--distance', '3', 'LENSMODEL_CAHVOR',\n '-')"], {'encoding': '"""ascii"""', 'input': 'text_splined', 'stderr': 'subprocess.DEVNULL'}), "((f'{testdir}/../mrcal-convert-lensmodel',\n '--radius', '800', '--intrinsics-only', '--sampled', '--distance', '3',\n 'LENSMODEL_CAHVOR', '-'), encoding='ascii', input=text_splined, stderr=\n subprocess.DEVNULL)\n", (1375, 1593), False, 'import subprocess\n'), ((2022, 2060), 'mrcal.cameramodel', 'mrcal.cameramodel', (['filename_out_cahvor'], {}), '(filename_out_cahvor)\n', (2039, 2060), False, 'import mrcal\n'), ((2102, 2200), 'mrcal.projection_diff', 'mrcal.projection_diff', (['(model_out_cahvor, model_splined)'], {'use_uncertainties': '(False)', 'distance': '(3)'}), '((model_out_cahvor, model_splined), use_uncertainties=\n False, distance=3)\n', (2123, 2200), False, 'import mrcal\n'), ((2304, 2414), 'testutils.confirm_equal', 'testutils.confirm_equal', (['(0)', 'difflen[icenter[0], icenter[1]]'], {'eps': '(0.1)', 'msg': '"""Low-enough diff at the center"""'}), "(0, difflen[icenter[0], icenter[1]], eps=0.1, msg=\n 'Low-enough diff at the center')\n", (2327, 2414), False, 'import testutils\n'), ((2465, 2483), 'testutils.finish', 'testutils.finish', ([], {}), '()\n', (2481, 2483), False, 'import testutils\n'), ((536, 562), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (552, 562), False, 'import os\n'), ((2274, 2297), 'numpy.array', 'np.array', (['difflen.shape'], {}), '(difflen.shape)\n', (2282, 2297), True, 'import numpy as np\n'), ((811, 833), 'shutil.rmtree', 'shutil.rmtree', (['workdir'], {}), '(workdir)\n', (824, 833), False, 'import shutil\n')] |
#
# Licensed under the The Unlicense
#
"""Enrolls face images into a recognition database. Database includes a label
for each face and a 128D encoding created by dlib
python3 training.py \
-v \
--model /usr/share/edgetpu/examples/models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite \
--label ./labels.pickle \
--descriptor ./face_descriptors.npy \
--input /home/pi/dalek-doorman/training
"""
import argparse
import os
import sys
import time
import pickle
import cv2
import dlib
import numpy as np
from PIL import Image
from edgetpu.detection.engine import DetectionEngine
from faceextractor import FaceDataExtractor
parser = argparse.ArgumentParser()
parser.add_argument('-v',
help = "Preview encoded images",
action = 'store_true')
parser.add_argument('--model',
help='Full path to mobilenet tflite model',
default = "/usr/share/edgetpu/examples/models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite")
parser.add_argument('--label',
help='Label file path.',
default = "./labels.pickle")
parser.add_argument('--descriptor',
help='Descriptor file path.',
default = "./face_descriptors.npy")
parser.add_argument('--input',
help='Training image path.',
default = "/home/pi/dalek-doorman/training")
args = parser.parse_args()
model = DetectionEngine(args.model)
face_ext = FaceDataExtractor()
DESCRIPTORS = args.descriptor
LABELS = args.label
initialize = False
if args.v:
win = dlib.image_window()
for root, dirs, files in os.walk(args.input):
for file_name in files:
# create a fully described path for each training image
# file_name = str(num)+'.png'
train_filename = os.path.join(root,file_name)
directory = root.split(os.path.sep)[-1]
print(train_filename)
np_img = cv2.imread(train_filename, cv2.IMREAD_COLOR)
np_img = cv2.cvtColor(np_img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(np_img)
face_list = model.detect_with_image(img,
threshold=0.7,
keep_aspect_ratio=True,
relative_coord=False,
top_k=1)
if len(face_list) < 1:
sys.exit("Face not found in training image")
if len(face_list) > 1:
raise ValueError("More than one face found in training image")
face = face_list[0]
face_data = face_ext.extract_data(face = face, np_frame = np_img)
if face_data:
if args.v:
win.set_title(directory)
win.set_image(face_data['face_chip_img'])
time.sleep(5.0)
try:
# deserialize descriptors and labels from disk
descriptors = np.load(DESCRIPTORS)
f = open(LABELS, 'rb')
labels = pickle.load(f)
except IOError as error:
print("{error} - Recognition DB not found")
initialize = True # files do not exist
if initialize:
print("Creating new recognition DB")
descriptors = face_data['face_descriptor']
labels = [directory]
initialize = False
else:
# add calling parameters to end of existing lists
descriptors = np.concatenate([descriptors, face_data['face_descriptor']], axis=0)
labels.append(directory)
# Serialize descriptors and labels
np.save(DESCRIPTORS, descriptors)
with open(LABELS, "wb") as f:
pickle.dump(labels, f)
print(f"Loaded record #{len(labels)} {train_filename} as {directory}")
sys.exit("Training completed successfully")
| [
"dlib.image_window",
"numpy.load",
"numpy.save",
"pickle.dump",
"argparse.ArgumentParser",
"numpy.concatenate",
"cv2.cvtColor",
"edgetpu.detection.engine.DetectionEngine",
"os.walk",
"time.sleep",
"faceextractor.FaceDataExtractor",
"cv2.imread",
"pickle.load",
"PIL.Image.fromarray",
"os.... | [((640, 665), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (663, 665), False, 'import argparse\n'), ((1295, 1322), 'edgetpu.detection.engine.DetectionEngine', 'DetectionEngine', (['args.model'], {}), '(args.model)\n', (1310, 1322), False, 'from edgetpu.detection.engine import DetectionEngine\n'), ((1334, 1353), 'faceextractor.FaceDataExtractor', 'FaceDataExtractor', ([], {}), '()\n', (1351, 1353), False, 'from faceextractor import FaceDataExtractor\n'), ((1491, 1510), 'os.walk', 'os.walk', (['args.input'], {}), '(args.input)\n', (1498, 1510), False, 'import os\n'), ((3623, 3666), 'sys.exit', 'sys.exit', (['"""Training completed successfully"""'], {}), "('Training completed successfully')\n", (3631, 3666), False, 'import sys\n'), ((1445, 1464), 'dlib.image_window', 'dlib.image_window', ([], {}), '()\n', (1462, 1464), False, 'import dlib\n'), ((1667, 1696), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (1679, 1696), False, 'import os\n'), ((1791, 1835), 'cv2.imread', 'cv2.imread', (['train_filename', 'cv2.IMREAD_COLOR'], {}), '(train_filename, cv2.IMREAD_COLOR)\n', (1801, 1835), False, 'import cv2\n'), ((1853, 1892), 'cv2.cvtColor', 'cv2.cvtColor', (['np_img', 'cv2.COLOR_BGR2RGB'], {}), '(np_img, cv2.COLOR_BGR2RGB)\n', (1865, 1892), False, 'import cv2\n'), ((1907, 1930), 'PIL.Image.fromarray', 'Image.fromarray', (['np_img'], {}), '(np_img)\n', (1922, 1930), False, 'from PIL import Image\n'), ((2141, 2185), 'sys.exit', 'sys.exit', (['"""Face not found in training image"""'], {}), "('Face not found in training image')\n", (2149, 2185), False, 'import sys\n'), ((3425, 3458), 'numpy.save', 'np.save', (['DESCRIPTORS', 'descriptors'], {}), '(DESCRIPTORS, descriptors)\n', (3432, 3458), True, 'import numpy as np\n'), ((2554, 2569), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (2564, 2569), False, 'import time\n'), ((2680, 2700), 'numpy.load', 'np.load', (['DESCRIPTORS'], {}), '(DESCRIPTORS)\n', (2687, 2700), True, 'import numpy as np\n'), ((2765, 2779), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2776, 2779), False, 'import pickle\n'), ((3257, 3324), 'numpy.concatenate', 'np.concatenate', (["[descriptors, face_data['face_descriptor']]"], {'axis': '(0)'}), "([descriptors, face_data['face_descriptor']], axis=0)\n", (3271, 3324), True, 'import numpy as np\n'), ((3517, 3539), 'pickle.dump', 'pickle.dump', (['labels', 'f'], {}), '(labels, f)\n', (3528, 3539), False, 'import pickle\n')] |
import face_recognition
import picamera
import numpy as np
import os
camera = picamera.PiCamera()
camera.resolution = (320, 240)
output = np.empty((240, 320, 3), dtype=np.uint8)
def createFolder(path):
try:
if not os.path.exists("dataset/" + path) :
os.makedirs("dataset/" + path)
except OSError:
print("Error: creating directory" + path)
id = [None] * 5
name = [None] * 5
print("Capturing images")
i = 0
while True:
id[i] = input("Enter the id: ")
name[i] = input("Enter the name: ")
createFolder(name[i])
os.chdir("dataset/" + name[i])
for j in range(5):
camera.capture(id[i] + "-" + str(j) + ".jpg")
output = face_recognition.load_image_file(id[i] + "-" + str(j) + ".jpg")
#new image encoding
face_encodings = face_recognition.face_encodings(output)
#checking that a face is there or not
if len(face_encodings) == 0:
print("Please show a face")
continue
os.chdir("../../")
i += 1
print(id)
print(name) | [
"os.makedirs",
"numpy.empty",
"face_recognition.face_encodings",
"os.path.exists",
"os.chdir",
"picamera.PiCamera"
] | [((78, 97), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (95, 97), False, 'import picamera\n'), ((138, 177), 'numpy.empty', 'np.empty', (['(240, 320, 3)'], {'dtype': 'np.uint8'}), '((240, 320, 3), dtype=np.uint8)\n', (146, 177), True, 'import numpy as np\n'), ((553, 583), 'os.chdir', 'os.chdir', (["('dataset/' + name[i])"], {}), "('dataset/' + name[i])\n", (561, 583), False, 'import os\n'), ((985, 1003), 'os.chdir', 'os.chdir', (['"""../../"""'], {}), "('../../')\n", (993, 1003), False, 'import os\n'), ((797, 836), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['output'], {}), '(output)\n', (828, 836), False, 'import face_recognition\n'), ((224, 257), 'os.path.exists', 'os.path.exists', (["('dataset/' + path)"], {}), "('dataset/' + path)\n", (238, 257), False, 'import os\n'), ((270, 300), 'os.makedirs', 'os.makedirs', (["('dataset/' + path)"], {}), "('dataset/' + path)\n", (281, 300), False, 'import os\n')] |
import argparse
import psutil
import numpy as np
from pyquaternion import Quaternion
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.utils.geometry_utils import view_points
import mayavi.mlab as mlab
from utils import draw_lidar, draw_gt_boxes3d
def get_lidar_points(lyftdata, lidar_token):
'''Get lidar point cloud in the frame of the ego vehicle'''
sd_record = lyftdata.get("sample_data", lidar_token)
sensor_modality = sd_record["sensor_modality"]
# Get aggregated point cloud in lidar frame.
sample_rec = lyftdata.get("sample", sd_record["sample_token"])
chan = sd_record["channel"]
ref_chan = "LIDAR_TOP"
pc, times = LidarPointCloud.from_file_multisweep(
lyftdata, sample_rec, chan, ref_chan, nsweeps=1
)
# Compute transformation matrices for lidar point cloud
cs_record = lyftdata.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
pose_record = lyftdata.get("ego_pose", sd_record["ego_pose_token"])
vehicle_from_sensor = np.eye(4)
vehicle_from_sensor[:3, :3] = Quaternion(cs_record["rotation"]).rotation_matrix
vehicle_from_sensor[:3, 3] = cs_record["translation"]
ego_yaw = Quaternion(pose_record["rotation"]).yaw_pitch_roll[0]
rot_vehicle_flat_from_vehicle = np.dot(
Quaternion(scalar=np.cos(ego_yaw / 2), vector=[0, 0, np.sin(ego_yaw / 2)]).rotation_matrix,
Quaternion(pose_record["rotation"]).inverse.rotation_matrix,
)
vehicle_flat_from_vehicle = np.eye(4)
vehicle_flat_from_vehicle[:3, :3] = rot_vehicle_flat_from_vehicle
points = view_points(
pc.points[:3, :], np.dot(vehicle_flat_from_vehicle, vehicle_from_sensor), normalize=False
)
return points
def plot_lidar_with_depth(lyftdata, sample):
'''plot given sample'''
print('Plotting sample, token: {sample["token"]}')
lidar_token = sample["data"]["LIDAR_TOP"]
pc = get_lidar_points(lyftdata, lidar_token)
_, boxes, _ = lyftdata.get_sample_data(
lidar_token, use_flat_vehicle_coordinates=True
)
fig = mlab.figure(figure=None, bgcolor=(0,0,0),
fgcolor=None, engine=None, size=(1000, 500))
# plot lidar points
draw_lidar(pc.T, fig=fig)
# plot boxes one by one
for box in boxes:
corners = view_points(box.corners(), view=np.eye(3), normalize=False)
draw_gt_boxes3d([corners.T], fig=fig, color=(0, 1, 0))
mlab.show(1)
def plot_one_sample(lyftdata, sample_token):
''' plots only one sample's top lidar point cloud '''
sample = lyftdata.get('sample', sample_token)
plot_lidar_with_depth(lyftdata, sample)
input_str=input('Press any key to terminate \n')
mlab.close()
for proc in psutil.process_iter():
if proc.name() == "display":
proc.kill()
def plot_one_scene(lyftdata, scene_token):
scene = lyftdata.get('scene', scene_token)
token = scene['first_sample_token']
while token != '':
sample = lyftdata.get('sample', token)
plot_lidar_with_depth(lyftdata, sample)
token = sample['next']
input_str=input('Press any key to continue to next sample, enter "kill" to terminate \n')
mlab.close()
for proc in psutil.process_iter():
if proc.name() == "display":
proc.kill()
if input_str == "kill":
break
if __name__=='__main__':
from pathlib import Path
parser = argparse.ArgumentParser(description='Mayavi visualization of nuscenes dataset')
parser.add_argument('-d', '--dataroot', type=str, default="./data/lyft/", metavar='N',
help='data directory path (default: ./data/lyft/)')
parser.add_argument('--scene', type=str, default=None, metavar='N', help='scene token')
parser.add_argument('--sample', type=str, default=None, metavar='N', help='sample token')
args = parser.parse_args()
dataroot = Path(args.dataroot)
json_path = dataroot / 'data/'
print('Loading dataset with Lyft SDK ...')
lyftdata = NuScenes(version='v1.0-mini', dataroot='D:/mini', verbose=True)
print('Done!, starting 3d visualization ...')
if args.scene:
plot_one_scene(lyftdata, args.scene)
elif args.sample:
plot_one_sample(lyftdata, args.sample)
| [
"nuscenes.utils.data_classes.LidarPointCloud.from_file_multisweep",
"utils.draw_lidar",
"psutil.process_iter",
"mayavi.mlab.figure",
"argparse.ArgumentParser",
"mayavi.mlab.show",
"nuscenes.nuscenes.NuScenes",
"mayavi.mlab.close",
"pathlib.Path",
"pyquaternion.Quaternion",
"utils.draw_gt_boxes3d... | [((718, 807), 'nuscenes.utils.data_classes.LidarPointCloud.from_file_multisweep', 'LidarPointCloud.from_file_multisweep', (['lyftdata', 'sample_rec', 'chan', 'ref_chan'], {'nsweeps': '(1)'}), '(lyftdata, sample_rec, chan, ref_chan,\n nsweeps=1)\n', (754, 807), False, 'from nuscenes.utils.data_classes import LidarPointCloud\n'), ((1064, 1073), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1070, 1073), True, 'import numpy as np\n'), ((1536, 1545), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1542, 1545), True, 'import numpy as np\n'), ((2106, 2199), 'mayavi.mlab.figure', 'mlab.figure', ([], {'figure': 'None', 'bgcolor': '(0, 0, 0)', 'fgcolor': 'None', 'engine': 'None', 'size': '(1000, 500)'}), '(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size\n =(1000, 500))\n', (2117, 2199), True, 'import mayavi.mlab as mlab\n'), ((2244, 2269), 'utils.draw_lidar', 'draw_lidar', (['pc.T'], {'fig': 'fig'}), '(pc.T, fig=fig)\n', (2254, 2269), False, 'from utils import draw_lidar, draw_gt_boxes3d\n'), ((2466, 2478), 'mayavi.mlab.show', 'mlab.show', (['(1)'], {}), '(1)\n', (2475, 2478), True, 'import mayavi.mlab as mlab\n'), ((2735, 2747), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (2745, 2747), True, 'import mayavi.mlab as mlab\n'), ((2764, 2785), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (2783, 2785), False, 'import psutil\n'), ((3483, 3562), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mayavi visualization of nuscenes dataset"""'}), "(description='Mayavi visualization of nuscenes dataset')\n", (3506, 3562), False, 'import argparse\n'), ((3964, 3983), 'pathlib.Path', 'Path', (['args.dataroot'], {}), '(args.dataroot)\n', (3968, 3983), False, 'from pathlib import Path\n'), ((4081, 4144), 'nuscenes.nuscenes.NuScenes', 'NuScenes', ([], {'version': '"""v1.0-mini"""', 'dataroot': '"""D:/mini"""', 'verbose': '(True)'}), "(version='v1.0-mini', dataroot='D:/mini', verbose=True)\n", (4089, 4144), False, 'from nuscenes.nuscenes import NuScenes\n'), ((1108, 1141), 'pyquaternion.Quaternion', 'Quaternion', (["cs_record['rotation']"], {}), "(cs_record['rotation'])\n", (1118, 1141), False, 'from pyquaternion import Quaternion\n'), ((1668, 1722), 'numpy.dot', 'np.dot', (['vehicle_flat_from_vehicle', 'vehicle_from_sensor'], {}), '(vehicle_flat_from_vehicle, vehicle_from_sensor)\n', (1674, 1722), True, 'import numpy as np\n'), ((2407, 2461), 'utils.draw_gt_boxes3d', 'draw_gt_boxes3d', (['[corners.T]'], {'fig': 'fig', 'color': '(0, 1, 0)'}), '([corners.T], fig=fig, color=(0, 1, 0))\n', (2422, 2461), False, 'from utils import draw_lidar, draw_gt_boxes3d\n'), ((3235, 3247), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (3245, 3247), True, 'import mayavi.mlab as mlab\n'), ((3268, 3289), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (3287, 3289), False, 'import psutil\n'), ((1231, 1266), 'pyquaternion.Quaternion', 'Quaternion', (["pose_record['rotation']"], {}), "(pose_record['rotation'])\n", (1241, 1266), False, 'from pyquaternion import Quaternion\n'), ((1437, 1472), 'pyquaternion.Quaternion', 'Quaternion', (["pose_record['rotation']"], {}), "(pose_record['rotation'])\n", (1447, 1472), False, 'from pyquaternion import Quaternion\n'), ((2371, 2380), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2377, 2380), True, 'import numpy as np\n'), ((1355, 1374), 'numpy.cos', 'np.cos', (['(ego_yaw / 2)'], {}), '(ego_yaw / 2)\n', (1361, 1374), True, 'import numpy as np\n'), ((1390, 1409), 'numpy.sin', 'np.sin', (['(ego_yaw / 2)'], {}), '(ego_yaw / 2)\n', (1396, 1409), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import os
import torch
import glob
from torchvision import transforms
from torch.autograd import Variable
import torch.nn.functional as F
from model import ScNet
# GPU ID
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# The path of data and log
data_root = './data/test_img/'
project_root = './log/'
# Data size and Patch size
kPrcgNum = 1600
patch_size = 96
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
transform = transforms.Compose([
transforms.TenCrop(patch_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops]))
])
# instantiate model and initialize weights
model = ScNet()
model.cuda()
checkpoint = torch.load(project_root + '/checkpoint_1200.pth')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
imageTmp = []
testTmp = []
testImageDir = data_root + 'NI'
testImageFile = list(glob.glob(testImageDir + '/*.jpg')) + list(glob.glob(testImageDir + '/*.png'))
testImageDir = data_root + 'CG'
testImageFile += list(glob.glob(testImageDir + '/*.jpg')) + list(glob.glob(testImageDir + '/*.png'))
for line in testImageFile:
image_path = line
lists = image_path.split('/')
if lists[-2] == 'NI':
testClass = 1
else:
testClass = 0
with open(image_path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
test_input = transform(img)
test_input = test_input.cuda()
input_var = Variable(test_input, volatile=True)
ncrops, c, h, w = input_var.size()
# compute output
output = model(input_var.view(-1, c, h, w))
# _, pred = torch.max(output, 1)
pred = F.softmax(output, dim=1)
mean = torch.mean(pred, dim=0)
label = 0
if mean[1] > 0.5:
label = 1
testTmp.append(int(label)) # the predicted label
imageTmp.append(testClass)
imageLabelNp = np.array(imageTmp)
testLabelNp = np.array(testTmp)
# Computing average accuracy on patches
result = imageLabelNp == testLabelNp
cg_result = result[kPrcgNum:]
ni_result = result[:kPrcgNum]
print('NI accuracy is:', ni_result.sum()*100.0/len(ni_result))
print('CG accuracy is:', cg_result.sum()*100.0/len(cg_result))
print('The average accuracy is:', (ni_result.sum()*100.0/len(ni_result) + cg_result.sum()*100.0/len(cg_result))/ 2)
| [
"torch.mean",
"torch.autograd.Variable",
"torch.load",
"model.ScNet",
"PIL.Image.open",
"torch.nn.functional.softmax",
"torchvision.transforms.ToTensor",
"numpy.array",
"glob.glob",
"torchvision.transforms.Normalize",
"torchvision.transforms.TenCrop"
] | [((411, 465), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (431, 465), False, 'from torchvision import transforms\n'), ((816, 823), 'model.ScNet', 'ScNet', ([], {}), '()\n', (821, 823), False, 'from model import ScNet\n'), ((850, 899), 'torch.load', 'torch.load', (["(project_root + '/checkpoint_1200.pth')"], {}), "(project_root + '/checkpoint_1200.pth')\n", (860, 899), False, 'import torch\n'), ((2065, 2083), 'numpy.array', 'np.array', (['imageTmp'], {}), '(imageTmp)\n', (2073, 2083), True, 'import numpy as np\n'), ((2098, 2115), 'numpy.array', 'np.array', (['testTmp'], {}), '(testTmp)\n', (2106, 2115), True, 'import numpy as np\n'), ((508, 538), 'torchvision.transforms.TenCrop', 'transforms.TenCrop', (['patch_size'], {}), '(patch_size)\n', (526, 538), False, 'from torchvision import transforms\n'), ((1043, 1077), 'glob.glob', 'glob.glob', (["(testImageDir + '/*.jpg')"], {}), "(testImageDir + '/*.jpg')\n", (1052, 1077), False, 'import glob\n'), ((1086, 1120), 'glob.glob', 'glob.glob', (["(testImageDir + '/*.png')"], {}), "(testImageDir + '/*.png')\n", (1095, 1120), False, 'import glob\n'), ((1176, 1210), 'glob.glob', 'glob.glob', (["(testImageDir + '/*.jpg')"], {}), "(testImageDir + '/*.jpg')\n", (1185, 1210), False, 'import glob\n'), ((1219, 1253), 'glob.glob', 'glob.glob', (["(testImageDir + '/*.png')"], {}), "(testImageDir + '/*.png')\n", (1228, 1253), False, 'import glob\n'), ((1471, 1484), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (1481, 1484), False, 'from PIL import Image\n'), ((1613, 1648), 'torch.autograd.Variable', 'Variable', (['test_input'], {'volatile': '(True)'}), '(test_input, volatile=True)\n', (1621, 1648), False, 'from torch.autograd import Variable\n'), ((1826, 1850), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (1835, 1850), True, 'import torch.nn.functional as F\n'), ((1866, 1889), 'torch.mean', 'torch.mean', (['pred'], {'dim': '(0)'}), '(pred, dim=0)\n', (1876, 1889), False, 'import torch\n'), ((593, 614), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (612, 614), False, 'from torchvision import transforms\n')] |
##########################################################################################
# Python stuff written by <NAME>
# His email: <EMAIL>
# Most functions written starting February 2019, some added as time went on
# Most of these functions are translations from matlab scripts written by <NAME>
# His e-mail: <EMAIL>
# But he wrote those scripts in November 2016 so good luck reaching him
##########################################################################################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colorsMPL
import math
import os.path
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import scipy
from scipy import interpolate
from matplotlib import ticker
from matplotlib.patches import Ellipse
mp=1.6726219236900001e-27
mn=mp
qe=1.6021766339999999E-019
ev=qe
eps0=8.8541878128000006E-012
me=9.10938356e-31
def stupidFunction():
# function to check if I loaded the doc correctly
print('hey stupid')
def find_nearest(array, value):
# finds the element closest to value and returns the index of that element
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def is_number(s):
# checks to see if s is a number
# useful for parsing outputfiles
try:
float(s)
return True
except ValueError:
return False
def read_field(f,fieldname,dims,intField=False):
# reads a single variable from an outputfile
# used in various read_b2f* functions
line = f.readline().rstrip()
# find the right field
while fieldname not in line:
line = f.readline().rstrip()
if len(line) == 0:
print('read_field: EOF reached without finding '+str(fieldname))
print('The first variable not found is probably not in your file')
print('Take out the search for that variable in the function doc or update your SOLPS so that the output is produced')
return 0
# Consistency check: number of elements specified
# in the file should equal prod(dims)
for i in range(len(line.split())):
if is_number(line.split()[i]): numin = int(line.split()[i])
if (numin != np.prod(dims)):
print(line)
print('read_field: inconsistent number of input elements.');
fieldVal=[]
# collect field values
while (len(fieldVal) != numin):
line = f.readline().rstrip()
for i in range(len(line.split())):
if (intField): fieldVal.append(int(line.split()[i]))
else:
fieldVal.append(float(line.split()[i]))
fieldVal=np.array(fieldVal)
if (np.size(dims) > 1): fieldVal = fieldVal.reshape(dims,order='F').copy()
return fieldVal
def read_ft44_field(fid,ver,fieldname,dims,intField=False):
# Auxiliary routine to read fields from fort.44 file
# Verion 20160829: field label and size are specified in fort.44
# Do consistency check on the data
if (ver >= 20160829):
# Search the file until identifier 'fieldname' is found
line = fid.readline().rstrip()
while fieldname not in line:
line = fid.readline().rstrip()
if len(line) == 0: print('read_ft44_field: EOF reached without finding '+str(fieldname))
# Consistency check: number of elements specified in the file should equal
# prod(dims)
for i in range(len(line.split())):
if is_number(line.split()[i]): numin = int(line.split()[i])
if (numin != np.prod(dims) and 'wld' not in fieldname):
print('issue with field '+fieldname)
print("numin="+str(numin))
print("np.prod(dims)="+str(np.prod(dims)))
print('read_ft44_rfield: inconsistent number of input elements.')
elif (numin!= np.prod(dims) and 'wldnek' in fieldname):
dims = [numin]
elif (numin!= np.prod(dims) and 'wldna' in fieldname):
dims[1] = int(numin/dims[0])
# Read the data
fieldVal=[]
# collect field values
while (len(fieldVal) != numin):
line = fid.readline().rstrip()
if ('wld' in fieldname) and len(fieldVal)>=numin-1: break
for i in range(len(line.split())):
if ('wlpump' in fieldname):
if not is_number(line.split()[i]): continue
if (intField): fieldVal.append(int(line.split()[i]))
else: fieldVal.append(float(line.split()[i]))
fieldVal=np.array(fieldVal)
if (np.size(dims) > 1 and 'wld' not in fieldname): fieldVal = fieldVal.reshape(dims,order='F').copy()
if (np.size(dims) > 1 and 'wld' in fieldname): fieldVal = fieldVal.reshape(dims).copy()
return fieldVal
def read_b2fgmtry(b2fgmtryLoc):
# reads b2fgmtry and returns a class of the data
fieldname = 'nx,ny'
fid = open(b2fgmtryLoc)
line = fid.readline().rstrip()#read the first line
version = line[7:17]
print('read_b2fgmtry -- file version '+version)#check the version
dim = read_field(fid,'nx,ny',2,True)#check the grid size
nx = dim[0]
ny = dim[1]
qcdim = [nx+2,ny+2]
if (version >= '03.001.000'): qcdim = [nx+2,ny+2,2]
#initialize class that will hold all the data of b2fgmtry
class gmtryResults:
def __init__(self):
# Read symmetry information
self.isymm = read_field(fid,'isymm',1,True)
# Read gmtry variables
self.crx = read_field(fid,'crx' ,[nx+2,ny+2,4])
self.cry = read_field(fid,'cry' ,[nx+2,ny+2,4])
self.fpsi = read_field(fid,'fpsi',[nx+2,ny+2,4])
self.ffbz = read_field(fid,'ffbz',[nx+2,ny+2,4])
self.bb = read_field(fid,'bb' ,[nx+2,ny+2,4])
self.vol = read_field(fid,'vol' ,[nx+2,ny+2])
self.hx = read_field(fid,'hx' ,[nx+2,ny+2])
self.hy = read_field(fid,'hy' ,[nx+2,ny+2])
self.qz = read_field(fid,'qz' ,[nx+2,ny+2,2])
self.qc = read_field(fid,'qc' ,qcdim)
self.gs = read_field(fid,'gs' ,[nx+2,ny+2,3])
# Some other geometrical parameters
self.nlreg = read_field(fid,'nlreg',1,True)
self.nlxlo = read_field(fid,'nlxlo',self.nlreg,True)
self.nlxhi = read_field(fid,'nlxhi',self.nlreg,True)
self.nlylo = read_field(fid,'nlylo',self.nlreg,True)
self.nlyhi = read_field(fid,'nlyhi',self.nlreg,True)
self.nlloc = read_field(fid,'nlloc',self.nlreg,True)
self.nncut = read_field(fid,'nncut' ,1,True)
self.leftcut = read_field(fid,'leftcut' ,self.nncut,True)
self.rightcut = read_field(fid,'rightcut' ,self.nncut,True)
self.topcut = read_field(fid,'topcut' ,self.nncut,True)
self.bottomcut = read_field(fid,'bottomcut',self.nncut,True)
self.leftix = read_field(fid,'leftix' ,[nx+2,ny+2],True)
self.rightix = read_field(fid,'rightix' ,[nx+2,ny+2],True)
self.topix = read_field(fid,'topix' ,[nx+2,ny+2],True)
self.bottomix = read_field(fid,'bottomix' ,[nx+2,ny+2],True)
self.leftiy = read_field(fid,'leftiy' ,[nx+2,ny+2],True)
self.rightiy = read_field(fid,'rightiy' ,[nx+2,ny+2],True)
self.topiy = read_field(fid,'topiy' ,[nx+2,ny+2],True)
self.bottomiy = read_field(fid,'bottomiy' ,[nx+2,ny+2],True)
self.region = read_field(fid,'region' ,[nx+2,ny+2,3],True)
self.nnreg = read_field(fid,'nnreg' ,3,True)
self.resignore = read_field(fid,'resignore' ,[nx+2,ny+2,2],True)
self.periodic_bc = read_field(fid,'periodic_bc',1,True)
self.pbs = read_field(fid,'pbs' ,[nx+2,ny+2,2])
self.parg = read_field(fid,'parg',100)
gmtry = gmtryResults()#instantiate class
# Close file
fid.close()
print('done reading geometry file')
return gmtry
def read_b2fstate(b2fstateLoc):
# reads b2fstate and returns a class of the data
fieldname = 'nx,ny'
fid = open(b2fstateLoc)
line = fid.readline().rstrip()#read the first line
version = line[7:17]
print('read_b2fstate -- file version '+version)#check the version
dim = read_field(fid,'nx,ny,ns',3,True)#check the grid size
nx = dim[0]
ny = dim[1]
ns = dim[2]
fluxdim = [nx+2,ny+2,2]
fluxdimp = [nx+2,ny+2]
fluxdims = [nx+2,ny+2,2,ns]
if (version >= '03.001.000'):
fluxdim = [nx+2,ny+2,2,2]
fluxdimp = fluxdim
fluxdims = [nx+2,ny+2,2,2,ns]
#initialize class that will hold all the data of b2fstate
class stateResults:
def __init__(self):
# Read charges etc.
self.zamin = read_field(fid,'zamin',ns)
self.zamax = read_field(fid,'zamax',ns)
self.zn = read_field(fid,'zn',ns)
self.am = read_field(fid,'am',ns)
# Read state variables
self.na = read_field(fid,'na',[nx+2,ny+2,ns])
self.ne = read_field(fid,'ne',[nx+2,ny+2])
self.ua = read_field(fid,'ua',[nx+2,ny+2,ns])
self.uadia = read_field(fid,'uadia',[nx+2,ny+2,2,ns])
self.te = read_field(fid,'te',[nx+2,ny+2])
self.ti = read_field(fid,'ti',[nx+2,ny+2])
self.po = read_field(fid,'po',[nx+2,ny+2])
# Read fluxes
self.fna = read_field(fid,'fna',fluxdims)
self.fhe = read_field(fid,'fhe',fluxdim)
self.fhi = read_field(fid,'fhi',fluxdim)
self.fch = read_field(fid,'fch',fluxdim)
self.fch_32 = read_field(fid,'fch_32',fluxdim)
self.fch_52 = read_field(fid,'fch_52',fluxdim)
self.kinrgy = read_field(fid,'kinrgy',[nx+2,ny+2,ns])
self.time = read_field(fid,'time',1)
self.fch_p = read_field(fid,'fch_p',fluxdimp)
state = stateResults()#instantiate class
# Close file
fid.close()
print('done reading state file')
return state
def read_b2fplasmf(fileName,nx,ny,ns):
#
# Read formatted b2fplasmf file created by B2.5.
# returns class of SOME of the data (add what you want if it's not here)
if not (os.path.isfile(fileName)):
print("b2fplasmf: Cannot find the filename")
return 0
fid = open(fileName)
if (fid == -1): print("read_b2fplasmf: can't open file")
# Get version of the b2fstate file
line = fid.readline().rstrip()
version = line[7:17]
print('read_b2fplasmf -- file version '+version)
# Expected array sizes, gmtry
qcdim = [nx+2,ny+2]
if version >= '03.001.000': qcdim = [nx+2,ny+2,2]
# Expected array sizes, state
fluxdim = [nx+2,ny+2,2]
fluxdims = [nx+2,ny+2,2,ns]
# Read basic data, there is more in b2fplasmf, I might grab it if I find out I need it
# New variables also get added, this might be out of date, check the file if a var is missing
class plasmfResults:
def __init__(self):
# Read gmtry variables
self.crx = read_field(fid,'crx' ,[nx+2,ny+2,4])
self.cry = read_field(fid,'cry' ,[nx+2,ny+2,4])
# Read state variables
self.fch = read_field(fid,'fch' ,fluxdim)
self.fch0 = read_field(fid,'fch0' ,fluxdim)
self.fchp = read_field(fid,'fchp' ,fluxdim)
self.fhe = read_field(fid,'fhe' ,fluxdim)
self.fhe0 = read_field(fid,'fhe0' ,fluxdim)
self.fhep = read_field(fid,'fhep' ,fluxdim)
self.fhet = read_field(fid,'fhet' ,fluxdim)
self.fhi = read_field(fid,'fhi' ,fluxdim)
self.fhi0 = read_field(fid,'fhi0' ,fluxdim)
self.fhip = read_field(fid,'fhip' ,fluxdim)
self.fhit = read_field(fid,'fhit' ,fluxdim)
self.fna = read_field(fid,'fna' ,fluxdims)
self.fna0 = read_field(fid,'fna0' ,fluxdims)
self.fnap = read_field(fid,'fnap' ,fluxdims)
self.fne = read_field(fid,'fne' ,fluxdim)
self.fni = read_field(fid,'fni' ,fluxdim)
self.na = read_field(fid,'na' ,[nx+2,ny+2,ns])
self.na0 = read_field(fid,'na0' ,[nx+2,ny+2,ns])
self.nap = read_field(fid,'nap' ,[nx+2,ny+2,ns])
self.ne = read_field(fid,'ne' ,[nx+2,ny+2])
self.ne0 = read_field(fid,'ne0' ,[nx+2,ny+2])
self.ne2 = read_field(fid,'ne2' ,[nx+2,ny+2])
self.nep = read_field(fid,'nep' ,[nx+2,ny+2])
self.ni = read_field(fid,'ni' ,[nx+2,ny+2,2])
self.ni0 = read_field(fid,'ni0' ,[nx+2,ny+2,2])
self.pb = read_field(fid,'pb' ,[nx+2,ny+2])
self.po = read_field(fid,'po' ,[nx+2,ny+2])
self.po0 = read_field(fid,'po0' ,[nx+2,ny+2])
self.pop = read_field(fid,'pop' ,[nx+2,ny+2])
self.te = read_field(fid,'te' ,[nx+2,ny+2])
self.te0 = read_field(fid,'te0' ,[nx+2,ny+2])
self.tep = read_field(fid,'tep' ,[nx+2,ny+2])
self.ti = read_field(fid,'ti' ,[nx+2,ny+2])
self.ti0 = read_field(fid,'ti0' ,[nx+2,ny+2])
self.tip = read_field(fid,'tip' ,[nx+2,ny+2])
self.ua = read_field(fid,'ua' ,[nx+2,ny+2,ns])
self.ua0 = read_field(fid,'ua0' ,[nx+2,ny+2,ns])
self.uap = read_field(fid,'uap' ,[nx+2,ny+2,ns])
self.uadia = read_field(fid,'uadia' ,[nx+2,ny+2,2,ns])
self.fchdia = read_field(fid,'fchdia',fluxdim)
self.fmo = read_field(fid,'fmo' ,fluxdims)
self.fna_32 = read_field(fid,'fna_32',fluxdims)
self.fna_52 = read_field(fid,'fna_52',fluxdims)
self.fni_32 = read_field(fid,'fni_32',fluxdim)
self.fni_52 = read_field(fid,'fni_52',fluxdim)
self.fne_32 = read_field(fid,'fne_32',fluxdim)
self.fne_52 = read_field(fid,'fne_52',fluxdim)
self.wadia = read_field(fid,'wadia' ,[nx+2,ny+2,2,ns])
self.vaecrb = read_field(fid,'vaecrb',[nx+2,ny+2,2,ns])
self.facdrift = read_field(fid,'facdrift' ,[nx+2,ny+2])
self.fac_ExB = read_field(fid,'fac_ExB' ,[nx+2,ny+2])
self.fchvispar = read_field(fid,'fchvispar' ,fluxdim)
self.fchvisper = read_field(fid,'fchvisper' ,fluxdim)
self.fchin = read_field(fid,'fchin' ,fluxdim)
self.fna_nodrift = read_field(fid,'fna_nodrift' ,fluxdims)
self.fac_vis = read_field(fid,'fac_vis' ,[nx+2,ny+2])
self.fna_mdf = read_field(fid,'fna_mdf' ,fluxdims)
self.fhe_mdf = read_field(fid,'fhe_mdf' ,fluxdim)
self.fhi_mdf = read_field(fid,'fhi_mdf' ,fluxdim)
self.fnaPSch = read_field(fid,'fnaPSch' ,fluxdims)
self.fhePSch = read_field(fid,'fhePSch' ,fluxdim)
self.fhiPSch = read_field(fid,'fhiPSch' ,fluxdim)
self.fna_fcor = read_field(fid,'fna_fcor' ,fluxdims)
self.fna_he = read_field(fid,'fna_he' ,fluxdims)
self.fchvisq = read_field(fid,'fchvisq' ,fluxdim)
self.fchinert = read_field(fid,'fchinert' ,fluxdim)
if version>'3.000.006':
# these are only present in 3.000.007 and beyond
self.fht = read_field(fid,'fht' ,fluxdim)
self.fhj = read_field(fid,'fhj' ,fluxdim)
self.fhm = read_field(fid,'fhm' ,fluxdims)
self.fhp = read_field(fid,'fhp' ,fluxdims)
self.resco = read_field(fid,'resco' ,[nx+2,ny+2,ns])
self.reshe = read_field(fid,'reshe' ,[nx+2,ny+2])
self.reshi = read_field(fid,'reshi' ,[nx+2,ny+2])
self.resmo = read_field(fid,'resmo' ,[nx+2,ny+2,ns])
self.resmt = read_field(fid,'resmt' ,[nx+2,ny+2])
self.respo = read_field(fid,'respo' ,[nx+2,ny+2])
self.sch = read_field(fid,'sch' ,[nx+2,ny+2,4])
self.she = read_field(fid,'she' ,[nx+2,ny+2,4])
self.shi = read_field(fid,'shi' ,[nx+2,ny+2,4])
self.smo = read_field(fid,'smo' ,[nx+2,ny+2,4,ns])
self.smq = read_field(fid,'smq' ,[nx+2,ny+2,4,ns])
self.sna = read_field(fid,'sna' ,[nx+2,ny+2,2,ns])
self.sne = read_field(fid,'sne' ,[nx+2,ny+2,2])
self.rsana = read_field(fid,'rsana' ,[nx+2,ny+2,ns])
self.rsahi = read_field(fid,'rsahi' ,[nx+2,ny+2,ns])
self.rsamo = read_field(fid,'rsamo' ,[nx+2,ny+2,ns])
self.rrana = read_field(fid,'rrana' ,[nx+2,ny+2,ns])
self.rrahi = read_field(fid,'rrahi' ,[nx+2,ny+2,ns])
self.rramo = read_field(fid,'rramo' ,[nx+2,ny+2,ns])
self.rqahe = read_field(fid,'rqahe' ,[nx+2,ny+2,ns])
self.rqrad = read_field(fid,'rqrad' ,[nx+2,ny+2,ns])
self.rqbrm = read_field(fid,'rqbrm' ,[nx+2,ny+2,ns])
self.rcxna = read_field(fid,'rcxna' ,[nx+2,ny+2,ns])
self.rcxhi = read_field(fid,'rcxhi' ,[nx+2,ny+2,ns])
self.rcxmo = read_field(fid,'rcxmo' ,[nx+2,ny+2,ns])
self.b2stbr_sna = read_field(fid,'b2stbr_sna' ,[nx+2,ny+2,ns])
self.b2stbr_smo = read_field(fid,'b2stbr_smo' ,[nx+2,ny+2,ns])
self.b2stbr_she = read_field(fid,'b2stbr_she' ,[nx+2,ny+2])
self.b2stbr_shi = read_field(fid,'b2stbr_shi' ,[nx+2,ny+2])
self.b2stbr_sch = read_field(fid,'b2stbr_sch' ,[nx+2,ny+2])
self.b2stbr_sne = read_field(fid,'b2stbr_sne' ,[nx+2,ny+2])
self.b2stbc_sna = read_field(fid,'b2stbc_sna' ,[nx+2,ny+2,ns])
self.b2stbc_smo = read_field(fid,'b2stbc_smo' ,[nx+2,ny+2,ns])
self.b2stbc_she = read_field(fid,'b2stbc_she' ,[nx+2,ny+2])
self.b2stbc_shi = read_field(fid,'b2stbc_shi' ,[nx+2,ny+2])
self.b2stbc_sch = read_field(fid,'b2stbc_sch' ,[nx+2,ny+2])
self.b2stbc_sne = read_field(fid,'b2stbc_sne' ,[nx+2,ny+2])
self.b2stbm_sna = read_field(fid,'b2stbm_sna' ,[nx+2,ny+2,ns])
self.b2stbm_smo = read_field(fid,'b2stbm_smo' ,[nx+2,ny+2,ns])
self.b2stbm_she = read_field(fid,'b2stbm_she' ,[nx+2,ny+2])
self.b2stbm_shi = read_field(fid,'b2stbm_shi' ,[nx+2,ny+2])
self.b2stbm_sch = read_field(fid,'b2stbm_sch' ,[nx+2,ny+2])
self.b2stbm_sne = read_field(fid,'b2stbm_sne' ,[nx+2,ny+2])
self.b2sihs_divue = read_field(fid,'b2sihs_divue',[nx+2,ny+2])
self.b2sihs_divua = read_field(fid,'b2sihs_divua',[nx+2,ny+2])
self.b2sihs_exbe = read_field(fid,'b2sihs_exbe' ,[nx+2,ny+2])
self.b2sihs_exba = read_field(fid,'b2sihs_exba' ,[nx+2,ny+2])
self.b2sihs_visa = read_field(fid,'b2sihs_visa' ,[nx+2,ny+2])
self.b2sihs_joule = read_field(fid,'b2sihs_joule',[nx+2,ny+2])
self.b2sihs_fraa = read_field(fid,'b2sihs_fraa' ,[nx+2,ny+2])
self.b2sihs_str = read_field(fid,'b2sihs_str' ,[nx+2,ny+2])
self.b2npmo_smaf = read_field(fid,'b2npmo_smaf' ,[nx+2,ny+2,4,ns])
self.b2npmo_smag = read_field(fid,'b2npmo_smag' ,[nx+2,ny+2,4,ns])
self.b2npmo_smav = read_field(fid,'b2npmo_smav' ,[nx+2,ny+2,4,ns])
self.smpr = read_field(fid,'smpr' ,[nx+2,ny+2,ns])
self.smpt = read_field(fid,'smpt' ,[nx+2,ny+2,ns])
self.smfr = read_field(fid,'smfr' ,[nx+2,ny+2,ns])
self.smcf = read_field(fid,'smcf' ,[nx+2,ny+2,ns])
self.ext_sna = read_field(fid,'ext_sna' ,[nx+2,ny+2,ns])
self.ext_smo = read_field(fid,'ext_smo' ,[nx+2,ny+2,ns])
self.ext_she = read_field(fid,'ext_she' ,[nx+2,ny+2])
self.ext_shi = read_field(fid,'ext_shi' ,[nx+2,ny+2])
self.ext_sch = read_field(fid,'ext_sch' ,[nx+2,ny+2])
self.ext_sne = read_field(fid,'ext_sne' ,[nx+2,ny+2])
self.calf = read_field(fid,'calf' ,fluxdim)
self.cdna = read_field(fid,'cdna' ,fluxdims)
self.cdpa = read_field(fid,'cdpa' ,fluxdims)
self.ceqp = read_field(fid,'ceqp' ,[nx+2,ny+2])
self.chce = read_field(fid,'chce' ,fluxdim)
self.chci = read_field(fid,'chci' ,fluxdim)
self.chve = read_field(fid,'chve' ,fluxdim)
self.chvemx = read_field(fid,'chvemx' ,[nx+2,ny+2])
self.chvi = read_field(fid,'chvi' ,fluxdim)
self.chvimx = read_field(fid,'chvimx' ,[nx+2,ny+2])
self.csig = read_field(fid,'csig' ,fluxdim)
self.cvla = read_field(fid,'cvla' ,fluxdims)
self.cvsa = read_field(fid,'cvsa' ,fluxdims)
self.cthe = read_field(fid,'cthe' ,[nx+2,ny+2,ns])
self.cthi = read_field(fid,'cthi' ,[nx+2,ny+2,ns])
self.csigin = read_field(fid,'csigin' ,[fluxdims[0],fluxdims[1],fluxdims[2],fluxdims[3],ns])
self.cvsa_cl = read_field(fid,'cvsa_cl' ,fluxdims)
self.fllime = read_field(fid,'fllime' ,[nx+2,ny+2])
self.fllimi = read_field(fid,'fllimi' ,[nx+2,ny+2])
self.fllim0fna = read_field(fid,'fllim0fna' ,fluxdims)
self.fllim0fhi = read_field(fid,'fllim0fhi' ,fluxdims)
self.fllimvisc = read_field(fid,'fllimvisc' ,[nx+2,ny+2,ns])
self.sig0 = read_field(fid,'sig0' ,[nx+2,ny+2])
self.hce0 = read_field(fid,'hce0' ,[nx+2,ny+2])
self.alf0 = read_field(fid,'alf0' ,[nx+2,ny+2])
self.hci0 = read_field(fid,'hci0' ,[nx+2,ny+2])
self.hcib = read_field(fid,'hcib' ,[nx+2,ny+2,ns])
self.dpa0 = read_field(fid,'dpa0' ,[nx+2,ny+2,ns])
self.dna0 = read_field(fid,'dna0' ,[nx+2,ny+2,ns])
self.vsa0 = read_field(fid,'vsa0' ,[nx+2,ny+2,ns])
self.vla0 = read_field(fid,'vla0' ,[nx+2,ny+2,2,ns])
self.csig_an = read_field(fid,'csig_an' ,fluxdim)
self.calf_an = read_field(fid,'calf_an' ,fluxdim)
nstra = read_field(fid,'nstra' ,[1],True)
if nstra!=0:
self.sclstra = read_field(fid,'sclstra' ,[ns+1,nstra[0]])
self.sclrtio = read_field(fid,'sclrtio' ,[ns+1,nstra[0]])
plasmf = plasmfResults()
fid.close()
print('done reading b2fplasmf')
return plasmf
def read_ft44(fileName):
#
# Read fort.44 file
#
# For now
# - only fort.44 version 20081111 recognized
# - assuming nfla = 1 until a better fix
# - assuming nlwrmsh = 1 until a better fix
#
print('read_ft44: assuming nlwrmsh = 1, nfla = 1.')
nlwrmsh = 1
nfla = 1
fid = open(fileName)
if (fid == -1): print("read_ft44: can't open file")
# Read dimensions
# nx, ny, version
dims = fid.readline().rstrip().split()
nx = int(dims[0])
ny = int(dims[1])
ver = int(dims[2])
if (ver != 20081111 and ver != 20160829 and ver != 20170328):
print('read_ft44: unknown format of fort.44 file (this is usually fine)')
# go to new line (skip reading a possible git-hash)
# fid.readline().rstrip()
# natm, nmol, nion
dims = fid.readline().rstrip().split()
natm = int(dims[0])
nmol = int(dims[1])
nion = int(dims[2])
# for now, ignore reading species labels
for i in range(natm): line = fid.readline().rstrip()
for i in range(nmol): line = fid.readline().rstrip()
for i in range(nion): line = fid.readline().rstrip()
# Read basic data, there is more, I might grab it if I find out I need it
class ft44Results:
def __init__(self):
self.dab2 = read_ft44_field(fid,ver,'dab2',[nx,ny,natm]);
self.tab2 = read_ft44_field(fid,ver,'tab2',[nx,ny,natm]);
self.dmb2 = read_ft44_field(fid,ver,'dmb2',[nx,ny,nmol]);
self.tmb2 = read_ft44_field(fid,ver,'tmb2',[nx,ny,nmol]);
self.dib2 = read_ft44_field(fid,ver,'dib2',[nx,ny,nion]);
self.tib2 = read_ft44_field(fid,ver,'tib2',[nx,ny,nion]);
self.rfluxa = read_ft44_field(fid,ver,'rfluxa',[nx,ny,natm]);
self.rfluxm = read_ft44_field(fid,ver,'rfluxm',[nx,ny,nmol]);
self.pfluxa = read_ft44_field(fid,ver,'pfluxa',[nx,ny,natm]);
self.pfluxm = read_ft44_field(fid,ver,'pfluxm',[nx,ny,nmol]);
self.refluxa = read_ft44_field(fid,ver,'refluxa',[nx,ny,natm]);
self.refluxm = read_ft44_field(fid,ver,'refluxm',[nx,ny,nmol]);
self.pefluxa = read_ft44_field(fid,ver,'pefluxa',[nx,ny,natm]);
self.pefluxm = read_ft44_field(fid,ver,'pefluxm',[nx,ny,nmol]);
self.emiss = read_ft44_field(fid,ver,'emiss',[nx,ny,1]);
self.emissmol = read_ft44_field(fid,ver,'emissmol',[nx,ny,1]);
self.srcml = read_ft44_field(fid,ver,'srcml',[nx,ny,nmol]);
self.edissml = read_ft44_field(fid,ver,'edissml',[nx,ny,nmol]);
self.wldnek = read_ft44_field(fid,ver,'wldnek(0)',[1,105])
self.wldna = read_ft44_field(fid,ver,'wldna(0)',[natm,-1])
# self.wldspt0 = read_ft44_field(fid,ver,'wldspt(0)',[natm,-1])
# self.wldspt1 = read_ft44_field(fid,ver,'wldspt( 1)',[natm,-1])
# self.wldspt2 = read_ft44_field(fid,ver,'wldspt( 2)',[natm,-1])
# self.wldspt3 = read_ft44_field(fid,ver,'wldspt( 3)',[natm,-1])
# self.wldspt4 = read_ft44_field(fid,ver,'wldspt( 4)',[natm,-1])
# self.wldspt5 = read_ft44_field(fid,ver,'wldspt( 5)',[natm,-1])
# self.wldspt6 = read_ft44_field(fid,ver,'wldspt( 6)',[natm,-1])
# self.wlpumpA = read_ft44_field(fid,ver,'wlpump(A)',[natm,88])
# self.wlpumpM = read_ft44_field(fid,ver,'wlpump(M)',[nmol,88])
self.eneutrad = read_ft44_field(fid,ver,'eneutrad',[nx,ny,natm])
self.emolrad = read_ft44_field(fid,ver,'emolrad',[nx,ny,nmol])
self.eionrad = read_ft44_field(fid,ver,'eionrad',[nx,ny,nion])
ft44 = ft44Results()
fid.close()
print('done reading ft44 file')
return ft44
def read_ft46(fileName):
# ft46 = read_ft46(file)
#
# Read fort.46 file. Convert to SI units.
#
# For now, only fort.46 version 20160513 recognized
#
fid = open(fileName)
if (fid == -1): print("read_ft44: can't open file")
# Read dimensions
# ntri, version, avoid reading git-hash
line = fid.readline().rstrip().split()
ntri = int(line[0])
ver = int(line[1])
if ver != 20160513 and ver != 20160829 and ver != 20170930:
print('read_ft46: unknown format of fort.46 file')
# natm, nmol, nion
dims = fid.readline().rstrip().split()
natm = int(dims[0])
nmol = int(dims[1])
nion = int(dims[2])
# for now, ignore reading species labels
for i in range(natm): fid.readline().rstrip()
for i in range(nmol): fid.readline().rstrip()
for i in range(nion): fid.readline().rstrip()
eV = 1.6021765650000000E-019
# Read data
class ft46Results:
def __init__(self):
self.pdena = read_ft44_field(fid,ver,'pdena',[ntri,natm])*1e6# m^{-3}
self.pdenm = read_ft44_field(fid,ver,'pdenm',[ntri,nmol])*1e6
self.pdeni = read_ft44_field(fid,ver,'pdeni',[ntri,nion])*1e6
self.edena = read_ft44_field(fid,ver,'edena',[ntri,natm])*1e6*eV# J m^{-3}
self.edenm = read_ft44_field(fid,ver,'edenm',[ntri,nmol])*1e6*eV
self.edeni = read_ft44_field(fid,ver,'edeni',[ntri,nion])*1e6*eV
self.vxdena = read_ft44_field(fid,ver,'vxdena',[ntri,natm])*1e1# kg s^{-1} m^{-2}
self.vxdenm = read_ft44_field(fid,ver,'vxdenm',[ntri,nmol])*1e1
self.vxdeni = read_ft44_field(fid,ver,'vxdeni',[ntri,nion])*1e1
self.vydena = read_ft44_field(fid,ver,'vydena',[ntri,natm])*1e1# kg s^{-1} m^{-2}
self.vydenm = read_ft44_field(fid,ver,'vydenm',[ntri,nmol])*1e1
self.vydeni = read_ft44_field(fid,ver,'vydeni',[ntri,nion])*1e1
self.vzdena = read_ft44_field(fid,ver,'vzdena',[ntri,natm])*1e1# kg s^{-1} m^{-2}
self.vzdenm = read_ft44_field(fid,ver,'vzdenm',[ntri,nmol])*1e1
self.vzdeni = read_ft44_field(fid,ver,'vzdeni',[ntri,nion])*1e1
self.vol = read_ft44_field(fid,ver,'volumes',[ntri])
self.pux = read_ft44_field(fid,ver,'pux',[ntri])
self.puy = read_ft44_field(fid,ver,'puy',[ntri])
ft46 = ft46Results()
# Close file
fid.close()
print('done reading ft46 file')
return ft46
def read_ft33(fileName):
#
# Read fort.33-files (triangle nodes). Converts to SI units (m).
#
#
fid = open(fileName);
if (fid == -1): print("can't open fort.33 file")
print('read_ft33: assuming ntrfrm = 0.')
ntrfrm = 0
# Read coordinates
# number of nodes
nnodes = int(fid.readline().rstrip())
nodes = [[],[]]
if (ntrfrm==0):
for line in fid:
for j in range(len(line.split())):
nodes[0].append(float(line.split()[j]))
if len(nodes[0])>=nnodes: break
for line in fid:
for j in range(len(line.split())):
nodes[1].append(float(line.split()[j]))
if len(nodes[1])>=nnodes: break
else: print('read_ft33: wrong ntrfrm.')
# Convert from cm to m
nodes = np.array(nodes)*1e-2
# close file
fid.close()
return nodes
def read_ft34(fileName):
# cells = read_ft34(file)
#
# Read fort.34-files (nodes composing each triangle).
#
fid = open(fileName)
if (fid == -1): print("can't open fort.34 file")
# Read data
# number of triangels
ntria = int(fid.readline().rstrip())
cells = [[],[],[]]
for i in range(ntria):
line = fid.readline().rstrip().split()
cells[0].append(int(line[1]))
cells[1].append(int(line[2]))
cells[2].append(int(line[3]))
# close file
fid.close()
return cells
def read_ft35(fileName):
# links = read_ft35(file)
#
# Read fort.35-files (triangle data).
#
fid = open(fileName)
if (fid == -1): print("can't open fort.34 file")
# Read data
# number of triangles
ntria = int(fid.readline().rstrip())
class ft35Results():
def __init__(self):
self.nghbr = np.zeros([ntria,3]);
self.side = np.zeros([ntria,3]);
self.cont = np.zeros([ntria,3]);
self.ixiy = np.zeros([ntria,2]);
for i in range (ntria):
data = fid.readline().rstrip().split()
data = [int(i) for i in data]
self.nghbr[i,:] = data[1::3][0:3]
self.side[i,:] = data[2::3][0:3]
self.cont[i,:] = data[3::3]
self.ixiy[i,:] = data[10:12]
links=ft35Results()
# close file
fid.close()
return links
def read_triangle_mesh(fort33fn,fort34fn,fort35fn):
# triangles = read_triangle_mesh(fort33,fort34,fort35)
#
# Wrapper routine to read all triangle data at once.
#
# Returns nodes, cells, nghbr, side and cont as fields of triangles-struct.
#
class triangleResults:
def __init__(self):
self.nodes = np.array(read_ft33(fort33fn))#list of
self.cells = np.array(read_ft34(fort34fn))
centroidsX = []
centroidsY = []
nodeXs = []
nodeYs = []
for i in range(np.shape(self.cells)[1]):#loop through every triangle
cntrX=0
cntrY=0
nodeX=[]
nodeY=[]
for j in range(3):#loop through each node on each triangle
cntrX=cntrX+self.nodes[:,self.cells[:,i][j]-1][0]
nodeX.append(self.nodes[:,self.cells[:,i][j]-1][0])
cntrY=cntrY+self.nodes[:,self.cells[:,i][j]-1][1]
nodeY.append(self.nodes[:,self.cells[:,i][j]-1][1])
cntrX=cntrX/3#calculate centroid of triangle
cntrY=cntrY/3
nodeXs.append(nodeX)
nodeYs.append(nodeY)
centroidsX.append(cntrX)#make list of triangle centroid x-coordinate
centroidsY.append(cntrY)#make list of triangle centroid y-coordinate
self.nodeXs = np.array(nodeXs)
self.nodeYs = np.array(nodeYs)
self.triaX = np.array(centroidsX)
self.triaY = np.array(centroidsY)
links = read_ft35(fort35fn)
self.nghbr = links.nghbr
self.side = links.side
self.cont = links.cont
self.ixiy = links.ixiy
triangles=triangleResults()
return triangles
def readB2Plot(fileLoc):
# reads the resulting file of writes within b2plot commands and returns array
# idx 0 is computational index
# idx 1 is R-Rsep
# idx 2 is value from b2plot
fid = open(fileLoc)
title = fid.readline().rstrip()
line = fid.readline().rstrip().split()
dataList =[[],[],[]]
while (is_number(line[0])):
dataList[1].append(float(line[0]))
dataList[2].append(float(line[1]))
line = fid.readline().rstrip().split()
line = fid.readline().rstrip().split()
while (is_number(line[0])):
dataList[0].append(float(line[0]))
line = fid.readline().rstrip().split()
if not line: break
fid.close()
return np.array(dataList)
def readB2transportFile(caseDir):
# reads b2.transport.inputfile and puts the data into an array
# makes a few assumptions about how things are organized
# assumes only Dn, chiI, and chiE are specified
# use with caution
fid = open(caseDir+'b2.transport.inputfile')
line = fid.readline().rstrip().split()
Dn=[]
chiE=[]
chiI=[]
R=[]
while (len(line)>0):
line = fid.readline().rstrip().split()
if ('addspec(' in line): continue
if (line==['/']): break
if ('ndata(' in line):
if (int(line[3])==1):
DnVals=True
chiIvals=False
chiEvals=False
if (int(line[3])==3):
chiIvals=True
DnVals=False
chiEvals=False
if (int(line[3])==4):
chiEvals=True
DnVals=False
chiIvals=False
line = fid.readline().rstrip().split()
if (DnVals):
Dn.append(float(line[-2]))
R.append(float(line[9]))
if (chiEvals):
chiE.append(float(line[-2]))
if (chiIvals):
chiI.append(float(line[-2]))
return [R,Dn,chiE,chiI]
def read_tally_field(fid,fieldname):
# parses a line of display.tallies which is the result of display_tallies L > display.tallies
line = fid.readline().rstrip()
while fieldname not in line: line = fid.readline().rstrip()
line = fid.readline().rstrip()
data=[]
while line:
if is_number(line.split()[0]): data.append(np.array(line.split()).astype(np.float))
else: data.append(np.array(line.split()[1:]).astype(np.float))
line = fid.readline().rstrip()
if np.shape(data)[0]==1: data=data[0]
return np.array(data)
def readTallyDisplay(fileLoc):
# reads display.tallies which is the result of display_tallies L > display.tallies
# returns class with all the display_tallies results
fid = open(fileLoc)
line = fid.readline().rstrip()
while 'ITER' not in line: line = fid.readline().rstrip()
class tallyResults:
def __init__(self):
self.rsanareg = read_tally_field(fid,'rsanareg')
self.rsahireg = read_tally_field(fid,'rsahireg')
self.rsamoreg = read_tally_field(fid,'rsamoreg')
self.rranareg = read_tally_field(fid,'rranareg')
self.rrahireg = read_tally_field(fid,'rrahireg')
self.rramoreg = read_tally_field(fid,'rramoreg')
self.rqahereg = read_tally_field(fid,'rqahereg')
# self.rqradreg = read_tally_field(fid,'rqradreg')
# self.rqbrmreg = read_tally_field(fid,'rqbrmreg')
self.rcxnareg = read_tally_field(fid,'rcxnareg')
self.rcxhireg = read_tally_field(fid,'rcxhireg')
self.rcxmoreg = read_tally_field(fid,'rcxmoreg')
self.fnaxreg = read_tally_field(fid,'fnaxreg')
self.fnayreg = read_tally_field(fid,'fnayreg')
self.fhixreg = read_tally_field(fid,'fhixreg')
self.fhiyreg = read_tally_field(fid,'fhiyreg')
self.fhexreg = read_tally_field(fid,'fhexreg')
self.fheyreg = read_tally_field(fid,'fheyreg')
self.fhpxreg = read_tally_field(fid,'fhpxreg')
self.fhpyreg = read_tally_field(fid,'fhpyreg')
self.fhmxreg = read_tally_field(fid,'fhmxreg')
self.fhmyreg = read_tally_field(fid,'fhmyreg')
self.fchxreg = read_tally_field(fid,'fchxreg')
self.fchyreg = read_tally_field(fid,'fchyreg')
self.fhtxreg = read_tally_field(fid,'fhtxreg')
self.fhtyreg = read_tally_field(fid,'fhtyreg')
self.fhjxreg = read_tally_field(fid,'fhjxreg')
self.fhjyreg = read_tally_field(fid,'fhjyreg')
# self.qconvixreg = read_tally_field(fid,'qconvixreg')
# self.qconviyreg = read_tally_field(fid,'qconviyreg')
# self.qconvexreg = read_tally_field(fid,'qconvexreg')
# self.qconveyreg = read_tally_field(fid,'qconveyreg')
self.b2stbr_sna_reg = read_tally_field(fid,'b2stbr_sna_reg')
# self.b2stbr_sne_reg = read_tally_field(fid,'b2stbr_sne_reg')
self.b2stbr_she_reg = read_tally_field(fid,'b2stbr_she_reg')
self.b2stbr_shi_reg = read_tally_field(fid,'b2stbr_shi_reg')
self.b2stbr_sch_reg = read_tally_field(fid,'b2stbr_sch_reg')
self.b2stbc_sna_reg = read_tally_field(fid,'b2stbc_sna_reg')
self.b2stbc_she_reg = read_tally_field(fid,'b2stbc_she_reg')
self.b2stbc_shi_reg = read_tally_field(fid,'b2stbc_shi_reg')
self.b2stbm_she_reg = read_tally_field(fid,'b2stbm_she_reg')
self.b2stbm_shi_reg = read_tally_field(fid,'b2stbm_shi_reg')
self.nareg = read_tally_field(fid,'nareg')
self.tereg = read_tally_field(fid,'tereg')
self.nereg = read_tally_field(fid,'nereg')
self.ne2reg = read_tally_field(fid,'ne2reg')
self.tireg = read_tally_field(fid,'tireg')
self.nireg = read_tally_field(fid,'nireg')
self.poreg = read_tally_field(fid,'poreg')
self.volreg = read_tally_field(fid,'volreg')
self.b2brem = read_tally_field(fid,'b2brem')
self.b2rad = read_tally_field(fid,'b2rad')
self.b2qie = read_tally_field(fid,'b2qie')
self.b2vdp = read_tally_field(fid,'b2vdp')
self.b2divue = read_tally_field(fid,'b2divue')
self.b2divua = read_tally_field(fid,'b2divua')
self.b2exbe = read_tally_field(fid,'b2exbe')
self.b2exba = read_tally_field(fid,'b2exba')
self.b2visa = read_tally_field(fid,'b2visa')
self.b2joule = read_tally_field(fid,'b2joule')
self.b2fraa = read_tally_field(fid,'b2fraa')
self.b2she = read_tally_field(fid,'b2she')
self.b2shi = read_tally_field(fid,'b2shi')
self.b2she0 = read_tally_field(fid,'b2she0')
self.b2shi0 = read_tally_field(fid,'b2shi0')
self.rdneureg = read_tally_field(fid,'rdneureg')
tally = tallyResults()
print('finished reading tallies')
return tally
def fmt(x, pos):
# formatting function
a, b = '{:.2e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
def read_input(inputFile):
# reads the input file of SOLPS and gets the eirene wall number and the divgeo wall number
# and pairs with the (R,Z) for each end point of the wall
# for the non-additional surfaces it tries to find the wall points associated with it
# and gives the non-additional surfaces those wall end points
fid = open(inputFile)
line=fid.readline().rstrip().split()
additional=False
surfaces=[]
innerTargWalls = []
outerTargWalls = []
while 'atomic' not in line:
line=fid.readline().rstrip().split()
#mark non-additional surfaces (PFR, Targets, SOL, etc.)
if ':' in line and not additional:
surfaces.append([int(line[1])*-1,int(line[1])*-1,1.0000,1.0000,1.000,1.000])#line[3]])
if int(line[1])==3: innerTargIdx = len(surfaces)-1
if int(line[1])==6: outerTargIdx = len(surfaces)-1
#now it goes through the additional surfaces (actual walls)
elif ':' in line and additional:
fid.readline().rstrip().split()
fid.readline().rstrip().split()
geoLine = fid.readline().rstrip()
#The following if statements try to find the walls associated with the non-additional surfaces
if len(innerTargWalls)>0:
if (int(line[3])==innerTargWalls[0]):
surfaces[innerTargIdx][2] = float(geoLine[0:12])
surfaces[innerTargIdx][3] = float(geoLine[12:24])
elif (int(line[3])==innerTargWalls[-1]):
surfaces[innerTargIdx][4] = float(geoLine[36:48])
surfaces[innerTargIdx][5] = float(geoLine[48:60])
if len(outerTargWalls)>0:
if (int(line[3])==outerTargWalls[0]):
surfaces[outerTargIdx][2] = float(geoLine[0:12])
surfaces[outerTargIdx][3] = float(geoLine[12:24])
elif (int(line[3])==outerTargWalls[-1]):
surfaces[outerTargIdx][4] = float(geoLine[36:48])
surfaces[outerTargIdx][5] = float(geoLine[48:60])
#This actually writes down the geo information
surfaces.append([int(line[1]),int(line[3]),float(geoLine[0:12]),float(geoLine[12:24]),float(geoLine[36:48]),float(geoLine[48:60])])
#This tells the code that we are switching to the additional surfaces
if '3b.' in line:
additional=True
return np.array(surfaces)
def plotvar(xPts, yPts, var,minColor='none',maxColor='none', cbScale='linear',cbTitle=r'Density $m^{-3}$',colormap='viridis',title='SOLPS data',
xlims=[0.25,1.6],ylims=[-1.7,1.7],colorBarOn=True,filename='NONE',inputLoc='NONE'):
# xPts is b2fgmtry.crx
# yPts is b2fgmtry.cry
# var is whatever variable you want to plot
# minColor and maxColor are bounds of colorbar, will try to automatically find bounds if you don't specify
# cbScale is scale of colorbar (log or linear or symlog, symlog changes colorbar to 'bwr')
# cbTitle is the title of the colorbar
# colormap is the colormap used by the colorbar
# title is the title of the whole plot
# xlims and ylims give the x and y bounds of the 2D plot
# colorBarOn turns color bar on and off
# filename is the name of the file the plot is saved to, 'NONE' causes the plot not to be saved
# inputLoc is the location of the input.dat, used to add walls to the 2D plot, 'NONE'
if inputLoc!='NONE':
wallGeo = read_input(inputLoc)
patches = []
nx = np.shape(xPts)[0]
ny = np.shape(xPts)[1]
for iy in np.arange(0,ny):
for ix in np.arange(0,nx):
rcol = xPts[ix,iy,[0,1,3,2]]
zcol = yPts[ix,iy,[0,1,3,2]]
rcol.shape=(4,1)
zcol.shape=(4,1)
polygon = Polygon(np.column_stack((rcol,zcol)), True,linewidth=3)
patches.append(polygon)
vals=var.T.flatten()
if (cbScale=='symlog'):
p = PatchCollection(patches,False,cmap='bwr',edgecolor='k',linewidth=0.15)
else:
p = PatchCollection(patches,False,cmap=colormap,edgecolor='k',linewidth=0.1)
p.set_array(np.array(vals))
if (minColor!='none'):
if (cbScale=='linear'):
p.set_clim([minColor,maxColor])
if (cbScale=='log'):
p.norm=colorsMPL.LogNorm(vmin=minColor,vmax=maxColor)
if (cbScale=='symlog'):
p.norm=colorsMPL.SymLogNorm(linthresh=maxColor/10,linscale=0.5,vmin=minColor,vmax=maxColor)
fig,axs = plt.subplots(1,figsize=(9, 11))
axs.add_collection(p)
if (colorBarOn):
if cbScale == 'symlog':
tickLocs = [maxColor,maxColor/10,minColor/10,minColor]
cb = plt.colorbar(p,ax=axs,pad=0.01,ticks=tickLocs)
# cb.ax.set_yticklabels(tickLabels)
else:
cb = plt.colorbar(p,ax=axs,pad=0.01)
cb.ax.tick_params(labelsize=20)
cb.set_label(cbTitle,fontsize=25)
if inputLoc!='NONE':
wallColor ='k'
wallWidth=3
for j in range(np.shape(wallGeo)[0]):
axs.plot(np.array([wallGeo[j][2],wallGeo[j][4]])/100,np.array([wallGeo[j][3],wallGeo[j][5]])/100,color=wallColor,linewidth=wallWidth)
axs.set_title(title,fontsize=25)
axs.set_ylim(ylims)
axs.set_xlim(xlims)
axs.tick_params(axis='both',labelsize=20)
plt.xlabel('R [m]',fontsize=25)
plt.ylabel('Z [m]',fontsize=25)
plt.grid(True)
if filename != 'NONE':
plt.savefig(filename)
plt.show()
def is_neutral(a):
# checks if the species is a neutral
# DOESNT WORK WITH MORE THAN TWO TYPES OF IONS
if a>=6: print('WARNING: bigger species index than is_neutral was made for, proceed with caution')
if a==0 or a==2:
return True
else:
return False
def read_b2wdat_field(filename):
# reads a .out file produced by setting b2wdat_iout='4' in b2mn.dat
f = open(filename)
line = f.readline().rstrip().split()
fieldVal = []
while (line!=[]):
line = f.readline().rstrip().split()
if line==[]: break
fieldVal.append([float(i) for i in line][1:])
return np.array(fieldVal[::-1]).T
def read_b2wdat(b2wdatLoc,nSpec):
# reads .out files produced by setting b2wdat_iout='4' in b2mn.dat and returns a class with the data
# currently only grabs what I have needed there are literally hundreds more
# this isn't a very robust function, might not work if it isn't a D-only or D+Li case. Be careful
# adjusting is_neutral to be more robust might be all it needs but not sure
nas = []
uas = []
b2srdt_smodts = []
b2npmo_fmoxs = []
b2npmo_fmoys = []
b2sigp_smogpis = []
b2sigp_smogpos = []
b2npmo_smbs = []
b2stcx_smqs = []
b2npmo_smocfs = []
b2stel_smq_ions = []
b2stel_smq_recs = []
b2npmo_smotfias = []
b2npmo_smotfeas = []
b2npmo_smofreas = []
b2npmo_smofrias = []
b2npmo_smoans = []
b2stbc_phys_smos = []
b2npmo_smovvs = []
b2npmo_smovhs = []
b2stbr_smos = []
b2trcl_luciani_fllim_cvsahzxs = []
b2trcl_luciani_cvsahzxs = []
b2npmo_resmos = []
b2stbr_sna_eirs = []
b2stel_sna_ions = []
b2stel_sna_recs = []
b2npc_snas = []
crxs = []
crys = []
for i in range(4): crxs.append(read_b2wdat_field(b2wdatLoc+'output/'+'crx'+str(i)+'.dat'))
for i in range(4): crys.append(read_b2wdat_field(b2wdatLoc+'output/'+'cry'+str(i)+'.dat'))
for spIdx in range(nSpec):
if is_neutral(spIdx): continue
if spIdx==1: nas.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2npc11_na00'+str(spIdx)+'.dat'))
if spIdx>1: nas.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2npc9_na00'+str(spIdx)+'.dat'))
uas.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_ua00'+str(spIdx)+'.dat'))
b2srdt_smodts.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2srdt_smodt00'+str(spIdx)+'.dat'))
b2npmo_fmoxs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_fmox00'+str(spIdx)+'.dat'))
b2npmo_fmoys.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_fmoy00'+str(spIdx)+'.dat'))
b2sigp_smogpis.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2sigp_smogpi00'+str(spIdx)+'.dat'))
b2sigp_smogpos.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2sigp_smogpo00'+str(spIdx)+'.dat'))
b2npmo_smbs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smb00'+str(spIdx)+'.dat'))
b2stcx_smqs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2stcx_smq00'+str(spIdx)+'.dat'))
b2npmo_smocfs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smocf00'+str(spIdx)+'.dat'))
b2stel_smq_ions.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2stel_smq_ion00'+str(spIdx)+'.dat'))
b2stel_smq_recs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2stel_smq_rec00'+str(spIdx)+'.dat'))
b2npmo_smotfias.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smotfia00'+str(spIdx)+'.dat'))
b2npmo_smotfeas.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smotfea00'+str(spIdx)+'.dat'))
b2npmo_smofreas.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smofrea00'+str(spIdx)+'.dat'))
b2npmo_smofrias.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smofria00'+str(spIdx)+'.dat'))
b2npmo_smoans.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smoan00'+str(spIdx)+'.dat'))
b2stbc_phys_smos.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2stbc_phys_smo00'+str(spIdx)+'.dat'))
b2npmo_smovvs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smovv00'+str(spIdx)+'.dat'))
b2npmo_smovhs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_smovh00'+str(spIdx)+'.dat'))
b2stbr_smos.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2stbr_smo_eir00'+str(spIdx)+'.dat'))
b2trcl_luciani_fllim_cvsahzxs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2trcl_luciani_fllim_cvsahzx00'+str(spIdx)+'.dat'))
b2trcl_luciani_cvsahzxs.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2trcl_luciani_cvsahzx00'+str(spIdx)+'.dat'))
b2npmo_resmos.append( read_b2wdat_field(b2wdatLoc+'output/'+'b2npmo_resmo00'+str(spIdx)+'.dat'))
b2stbr_sna_eirs.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2stbr_sna_eir00'+str(spIdx)+'.dat'))
b2stel_sna_ions.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2stel_sna_ion00'+str(spIdx)+'.dat'))
b2stel_sna_recs.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2stel_sna_rec00'+str(spIdx)+'.dat'))
if spIdx==0 or spIdx==2: b2npc_snas.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2npc_sna00'+str(spIdx)+'.dat'))
elif spIdx==1: b2npc_snas.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2npc11_sna00'+str(spIdx)+'.dat'))
else: b2npc_snas.append(read_b2wdat_field(b2wdatLoc+'output/'+'b2npc9_sna00'+str(spIdx)+'.dat'))
#initialize class that will hold all the data of bwdat output
class b2wdatResults:
def __init__(self):
#LHS of the momentum eqn
self.b2srdt_smodt = b2srdt_smodts
self.b2npmo_fmox = b2npmo_fmoxs
self.b2npmo_fmoy = b2npmo_fmoys
self.b2sigp_smogpi = b2sigp_smogpis
self.b2sigp_smogpo = b2sigp_smogpos
#RHS of the momentum equation
self.b2npmo_smb = b2npmo_smbs
self.b2stcx_smq = b2stcx_smqs
self.b2npmo_smocf = b2npmo_smocfs
self.b2stel_smq_ion = b2stel_smq_ions
self.b2stel_smq_rec = b2stel_smq_recs
self.b2npmo_smotfia = b2npmo_smotfias
self.b2npmo_smotfea = b2npmo_smotfeas
self.b2npmo_smofrea = b2npmo_smofreas
self.b2npmo_smofria = b2npmo_smofrias
self.b2npmo_smoan = b2npmo_smoans
self.b2stbc_phys_smo = b2stbc_phys_smos
self.b2npmo_smovv = b2npmo_smovvs
self.b2npmo_smovh = b2npmo_smovhs
self.b2stbr_smo = b2stbr_smos
self.b2trcl_luciani_fllim_cvsahzx = b2trcl_luciani_fllim_cvsahzxs
self.b2trcl_luciani_cvsahzx = b2trcl_luciani_cvsahzxs
self.b2npmo_resmo = b2npmo_resmos
#particle sources
self.b2stbr_sna_eir = b2stbr_sna_eirs
self.b2stel_sna_ion = b2stel_sna_ions
self.b2stel_sna_rec = b2stel_sna_recs
self.b2npc_sna = b2npc_snas
#geo info
self.hx = read_b2wdat_field(b2wdatLoc+'output/'+'hx.dat')
self.hy = read_b2wdat_field(b2wdatLoc+'output/'+'hy.dat')
self.hz = read_b2wdat_field(b2wdatLoc+'output/'+'hz.dat')
self.vol = read_b2wdat_field(b2wdatLoc+'output/'+'vol.dat')
self.bbx = read_b2wdat_field(b2wdatLoc+'output/'+'bbx.dat')
self.bb = read_b2wdat_field(b2wdatLoc+'output/'+'bb.dat')
self.bx = read_b2wdat_field(b2wdatLoc+'output/'+'bbx.dat')/read_b2wdat_field(b2wdatLoc+'output/'+'bb.dat')
self.crx = crxs
self.cry = crys
#plasma parameters
self.na = nas
self.ua = uas
b2wdat = b2wdatResults()#instantiate class
print('done reading b2wdat files')
return b2wdat
#################################################################################
# The following functions were used to reproduce and check solps momentum results
# Probably not useful but kept just in case
#################################################################################
def b2tlnl(nx, ny, te, ti, ne,icase=0):
ev=1.6021766339999999E-019
#-----------------------------------------------------------------------
#
# purpose
#
# B2TLNL computes the Coulomb logarithm according to Braginskii or
# Wesson formulas.
#
#
# lnlam is the log of the plasma parameter.
#
#-----------------------------------------------------------------------
#declarations
lnlam=np.zeros(np.shape(te))
#.computation
lamda=-5.0
if (lamda<0):
if icase==0:#Braginskii
for iy in range(ny):
for ix in range(nx):
if(te[ix][iy]/ev <= 50.0):
lnlam[ix][iy]=max(-lamda,23.4 - 1.15*math.log10(ne[ix][iy]/1.0e6) +
3.45*math.log10(te[ix][iy]/ev))
else:
lnlam[ix][iy]=max(-lamda,25.3 - 1.15*math.log10(ne[ix][iy]/1.0e6) +
2.30*math.log10(te[ix][iy]/ev))
else:
lnlam = lamda
return lnlam
def fce1(z): return ((1.0+0.24*z)*(1.0+0.93*z))/((1.0+2.56*z)*(1.0+0.29*z))
def fce2(z): return ((1.0+1.40*z)*(1.0+0.52*z))/((1.0+2.56*z)*(1.0+0.29*z))*1.56
def fce2n(z): return fce2(z)/(z+math.sqrt(2.0)/2.0)
def fal_cen(z): return -fce2n(z)/fce1(z)
def zmffCalc(zamax,na,ns,ismain):
zmff = np.zeros(np.shape(na[:,:,0]))
for sI in range(ns):
if(sI!=ismain): zmff = zmff + zamax[sI]**2 * na[:,:,sI]
zmff=zmff/(zamax[ismain]**2 * na[:,:,ismain])
return zmff
def fkabvp(a,b,zamax,na):
ismain = 1
ns=len(zamax)
zmff = zmffCalc(zamax,na,ns,ismain)
cimp1=fce1(zmff)
if (a==ismain and (b!=a) and not is_neutral(a) and not is_neutral(b)):
fkabvp=cimp1
elif ((b==ismain) and (a!=b) and not is_neutral(a) and not is_neutral(b)):
fkabvp=cimp1
elif((a!=b and a!=ismain) and (b!=ismain) and not is_neutral(a) and not is_neutral(b)):
fkabvp=np.ones(np.shape(na[:,:,0]))
elif((a==b) and (not is_neutral(a)) and (not is_neutral(b))):
fkabvp=np.zeros(np.shape(na[:,:,0]))
else:
fkabvp=np.zeros(np.shape(na[:,:,0]))
return fkabvp
def fkabtf(a, b,zamax,na):
ismain=1
ns=len(zamax)
zmff = zmffCalc(zamax,na,ns,ismain)
cimp2=fce2(zmff)
if ((b==ismain) and (b!=a) and (not is_neutral(a)) and (not is_neutral(b))):
fkabtf=cimp2
elif ((a!=ismain) and (b!=ismain) and (not is_neutral(a)) and (not is_neutral(b))):
fkabtf=0.0
elif ((a==ismain)):
fkabtf=0.0
else:
fkabtf=0.0
return fkabtf
def fka(a,zamax,na,am):
ns = len(zamax)
rz2 = zamax**2
fka = np.zeros(np.shape(na[:,:,0]))
for r in range(ns):
fka = fka + rz2[r]*na[:,:,r]*math.sqrt(mp)*math.sqrt(am[a]*am[r]/(am[a]+am[r]))
fka = fka*rz2[a]
return fka
def b2xpne(ns, rza, na):# b2aux/b2xpne.F
# ------------------------------------------------------------------
# B2XPNE computes the electron density, ne:
# ne(,) = (sum is :: rza(,,is)*na(,,is))
# I'm using it to calculate ne2 which is used in some functions below
# ------------------------------------------------------------------
ne = np.zeros(np.shape(na[:,:,0]))
for species in range(ns):
ne = ne + rza[species]*na[:,:,species]
return ne
| [
"numpy.abs",
"numpy.shape",
"matplotlib.colors.LogNorm",
"numpy.arange",
"matplotlib.colors.SymLogNorm",
"numpy.prod",
"matplotlib.pyplot.colorbar",
"math.log10",
"matplotlib.pyplot.subplots",
"numpy.size",
"matplotlib.pyplot.show",
"math.sqrt",
"numpy.asarray",
"matplotlib.collections.Pat... | [((1144, 1161), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (1154, 1161), True, 'import numpy as np\n'), ((2646, 2664), 'numpy.array', 'np.array', (['fieldVal'], {}), '(fieldVal)\n', (2654, 2664), True, 'import numpy as np\n'), ((4487, 4505), 'numpy.array', 'np.array', (['fieldVal'], {}), '(fieldVal)\n', (4495, 4505), True, 'import numpy as np\n'), ((35041, 35059), 'numpy.array', 'np.array', (['dataList'], {}), '(dataList)\n', (35049, 35059), True, 'import numpy as np\n'), ((36821, 36835), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (36829, 36835), True, 'import numpy as np\n'), ((44268, 44286), 'numpy.array', 'np.array', (['surfaces'], {}), '(surfaces)\n', (44276, 44286), True, 'import numpy as np\n'), ((45371, 45387), 'numpy.arange', 'np.arange', (['(0)', 'ny'], {}), '(0, ny)\n', (45380, 45387), True, 'import numpy as np\n'), ((46292, 46324), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(9, 11)'}), '(1, figsize=(9, 11))\n', (46304, 46324), True, 'import matplotlib.pyplot as plt\n'), ((47128, 47160), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""R [m]"""'], {'fontsize': '(25)'}), "('R [m]', fontsize=25)\n", (47138, 47160), True, 'import matplotlib.pyplot as plt\n'), ((47164, 47196), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z [m]"""'], {'fontsize': '(25)'}), "('Z [m]', fontsize=25)\n", (47174, 47196), True, 'import matplotlib.pyplot as plt\n'), ((47200, 47214), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (47208, 47214), True, 'import matplotlib.pyplot as plt\n'), ((47285, 47295), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (47293, 47295), True, 'import matplotlib.pyplot as plt\n'), ((2228, 2241), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (2235, 2241), True, 'import numpy as np\n'), ((2674, 2687), 'numpy.size', 'np.size', (['dims'], {}), '(dims)\n', (2681, 2687), True, 'import numpy as np\n'), ((30925, 30940), 'numpy.array', 'np.array', (['nodes'], {}), '(nodes)\n', (30933, 30940), True, 'import numpy as np\n'), ((45312, 45326), 'numpy.shape', 'np.shape', (['xPts'], {}), '(xPts)\n', (45320, 45326), True, 'import numpy as np\n'), ((45339, 45353), 'numpy.shape', 'np.shape', (['xPts'], {}), '(xPts)\n', (45347, 45353), True, 'import numpy as np\n'), ((45406, 45422), 'numpy.arange', 'np.arange', (['(0)', 'nx'], {}), '(0, nx)\n', (45415, 45422), True, 'import numpy as np\n'), ((45743, 45817), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches', '(False)'], {'cmap': '"""bwr"""', 'edgecolor': '"""k"""', 'linewidth': '(0.15)'}), "(patches, False, cmap='bwr', edgecolor='k', linewidth=0.15)\n", (45758, 45817), False, 'from matplotlib.collections import PatchCollection\n'), ((45836, 45912), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches', '(False)'], {'cmap': 'colormap', 'edgecolor': '"""k"""', 'linewidth': '(0.1)'}), "(patches, False, cmap=colormap, edgecolor='k', linewidth=0.1)\n", (45851, 45912), False, 'from matplotlib.collections import PatchCollection\n'), ((45925, 45939), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (45933, 45939), True, 'import numpy as np\n'), ((47255, 47276), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (47266, 47276), True, 'import matplotlib.pyplot as plt\n'), ((47919, 47943), 'numpy.array', 'np.array', (['fieldVal[::-1]'], {}), '(fieldVal[::-1])\n', (47927, 47943), True, 'import numpy as np\n'), ((55661, 55673), 'numpy.shape', 'np.shape', (['te'], {}), '(te)\n', (55669, 55673), True, 'import numpy as np\n'), ((56587, 56608), 'numpy.shape', 'np.shape', (['na[:, :, 0]'], {}), '(na[:, :, 0])\n', (56595, 56608), True, 'import numpy as np\n'), ((57904, 57925), 'numpy.shape', 'np.shape', (['na[:, :, 0]'], {}), '(na[:, :, 0])\n', (57912, 57925), True, 'import numpy as np\n'), ((58449, 58470), 'numpy.shape', 'np.shape', (['na[:, :, 0]'], {}), '(na[:, :, 0])\n', (58457, 58470), True, 'import numpy as np\n'), ((1173, 1194), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (1179, 1194), True, 'import numpy as np\n'), ((4514, 4527), 'numpy.size', 'np.size', (['dims'], {}), '(dims)\n', (4521, 4527), True, 'import numpy as np\n'), ((4620, 4633), 'numpy.size', 'np.size', (['dims'], {}), '(dims)\n', (4627, 4633), True, 'import numpy as np\n'), ((31919, 31939), 'numpy.zeros', 'np.zeros', (['[ntria, 3]'], {}), '([ntria, 3])\n', (31927, 31939), True, 'import numpy as np\n'), ((31965, 31985), 'numpy.zeros', 'np.zeros', (['[ntria, 3]'], {}), '([ntria, 3])\n', (31973, 31985), True, 'import numpy as np\n'), ((32011, 32031), 'numpy.zeros', 'np.zeros', (['[ntria, 3]'], {}), '([ntria, 3])\n', (32019, 32031), True, 'import numpy as np\n'), ((32057, 32077), 'numpy.zeros', 'np.zeros', (['[ntria, 2]'], {}), '([ntria, 2])\n', (32065, 32077), True, 'import numpy as np\n'), ((33942, 33958), 'numpy.array', 'np.array', (['nodeXs'], {}), '(nodeXs)\n', (33950, 33958), True, 'import numpy as np\n'), ((33985, 34001), 'numpy.array', 'np.array', (['nodeYs'], {}), '(nodeYs)\n', (33993, 34001), True, 'import numpy as np\n'), ((34027, 34047), 'numpy.array', 'np.array', (['centroidsX'], {}), '(centroidsX)\n', (34035, 34047), True, 'import numpy as np\n'), ((34073, 34093), 'numpy.array', 'np.array', (['centroidsY'], {}), '(centroidsY)\n', (34081, 34093), True, 'import numpy as np\n'), ((36775, 36789), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (36783, 36789), True, 'import numpy as np\n'), ((46092, 46139), 'matplotlib.colors.LogNorm', 'colorsMPL.LogNorm', ([], {'vmin': 'minColor', 'vmax': 'maxColor'}), '(vmin=minColor, vmax=maxColor)\n', (46109, 46139), True, 'import matplotlib.colors as colorsMPL\n'), ((46190, 46283), 'matplotlib.colors.SymLogNorm', 'colorsMPL.SymLogNorm', ([], {'linthresh': '(maxColor / 10)', 'linscale': '(0.5)', 'vmin': 'minColor', 'vmax': 'maxColor'}), '(linthresh=maxColor / 10, linscale=0.5, vmin=minColor,\n vmax=maxColor)\n', (46210, 46283), True, 'import matplotlib.colors as colorsMPL\n'), ((46492, 46541), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {'ax': 'axs', 'pad': '(0.01)', 'ticks': 'tickLocs'}), '(p, ax=axs, pad=0.01, ticks=tickLocs)\n', (46504, 46541), True, 'import matplotlib.pyplot as plt\n'), ((46618, 46651), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {'ax': 'axs', 'pad': '(0.01)'}), '(p, ax=axs, pad=0.01)\n', (46630, 46651), True, 'import matplotlib.pyplot as plt\n'), ((3531, 3544), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (3538, 3544), True, 'import numpy as np\n'), ((45593, 45622), 'numpy.column_stack', 'np.column_stack', (['(rcol, zcol)'], {}), '((rcol, zcol))\n', (45608, 45622), True, 'import numpy as np\n'), ((46823, 46840), 'numpy.shape', 'np.shape', (['wallGeo'], {}), '(wallGeo)\n', (46831, 46840), True, 'import numpy as np\n'), ((56468, 56482), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (56477, 56482), False, 'import math\n'), ((58000, 58042), 'math.sqrt', 'math.sqrt', (['(am[a] * am[r] / (am[a] + am[r]))'], {}), '(am[a] * am[r] / (am[a] + am[r]))\n', (58009, 58042), False, 'import math\n'), ((3817, 3830), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (3824, 3830), True, 'import numpy as np\n'), ((33070, 33090), 'numpy.shape', 'np.shape', (['self.cells'], {}), '(self.cells)\n', (33078, 33090), True, 'import numpy as np\n'), ((46867, 46907), 'numpy.array', 'np.array', (['[wallGeo[j][2], wallGeo[j][4]]'], {}), '([wallGeo[j][2], wallGeo[j][4]])\n', (46875, 46907), True, 'import numpy as np\n'), ((46911, 46951), 'numpy.array', 'np.array', (['[wallGeo[j][3], wallGeo[j][5]]'], {}), '([wallGeo[j][3], wallGeo[j][5]])\n', (46919, 46951), True, 'import numpy as np\n'), ((57197, 57218), 'numpy.shape', 'np.shape', (['na[:, :, 0]'], {}), '(na[:, :, 0])\n', (57205, 57218), True, 'import numpy as np\n'), ((57986, 57999), 'math.sqrt', 'math.sqrt', (['mp'], {}), '(mp)\n', (57995, 57999), False, 'import math\n'), ((3701, 3714), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (3708, 3714), True, 'import numpy as np\n'), ((3908, 3921), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (3915, 3921), True, 'import numpy as np\n'), ((57308, 57329), 'numpy.shape', 'np.shape', (['na[:, :, 0]'], {}), '(na[:, :, 0])\n', (57316, 57329), True, 'import numpy as np\n'), ((57363, 57384), 'numpy.shape', 'np.shape', (['na[:, :, 0]'], {}), '(na[:, :, 0])\n', (57371, 57384), True, 'import numpy as np\n'), ((56011, 56038), 'math.log10', 'math.log10', (['(te[ix][iy] / ev)'], {}), '(te[ix][iy] / ev)\n', (56021, 56038), False, 'import math\n'), ((56200, 56227), 'math.log10', 'math.log10', (['(te[ix][iy] / ev)'], {}), '(te[ix][iy] / ev)\n', (56210, 56227), False, 'import math\n'), ((55937, 55971), 'math.log10', 'math.log10', (['(ne[ix][iy] / 1000000.0)'], {}), '(ne[ix][iy] / 1000000.0)\n', (55947, 55971), False, 'import math\n'), ((56125, 56159), 'math.log10', 'math.log10', (['(ne[ix][iy] / 1000000.0)'], {}), '(ne[ix][iy] / 1000000.0)\n', (56135, 56159), False, 'import math\n')] |
import numpy as np
# We generate random points
num_points = 1000
vectors_set = []
for i in xrange(num_points):
x1 = np.random.normal(0.0, 0.55)
y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)
vectors_set.append([x1, y1])
x_data = [v[0] for v in vectors_set]
y_data = [v[1] for v in vectors_set]
# The 2 lines below are useful on OS X to prevent the following error:
# RuntimeError: Python is not installed as a framework. The Mac OS X backend will not be able to function correctly if Python is not installed as a framework. See the Python documentation for more information on installing Python as a framework on Mac OS X. Please either reinstall Python as a framework, or try one of the other backends. If you are Working with Matplotlib in a virtual enviroment see 'Working with Matplotlib in Virtual environments' in the Matplotlib FAQ
import matplotlib as mpl
mpl.use('TkAgg')
# Display Random points
import matplotlib.pyplot as plt
plt.plot(x_data, y_data, 'ro', label='Original data')
plt.legend()
plt.show()
import tensorflow as tf
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
# Cost function calculation
loss = tf.reduce_mean(tf.square(y - y_data))
# We want to minimize the cost function
# We train the Optimizer which is the gradient descent algo to the cost function defined
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in xrange(8):
sess.run(train)
print(step, sess.run(W), sess.run(b))
print(step, sess.run(loss))
# Display Graphic
plt.plot(x_data, y_data, 'ro')
plt.plot(x_data, sess.run(W) * x_data + sess.run(b))
plt.xlabel('x')
plt.xlim(-2, 2)
plt.ylabel('y')
plt.ylim(0.1, 0.6)
plt.legend()
plt.show()
| [
"tensorflow.random_uniform",
"matplotlib.pyplot.xlim",
"tensorflow.square",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"tensorflow.Session",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"tensorflow.zeros",
"tensorflow.initialize_a... | [((874, 890), 'matplotlib.use', 'mpl.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (881, 890), True, 'import matplotlib as mpl\n'), ((949, 1002), 'matplotlib.pyplot.plot', 'plt.plot', (['x_data', 'y_data', '"""ro"""'], {'label': '"""Original data"""'}), "(x_data, y_data, 'ro', label='Original data')\n", (957, 1002), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1015), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1013, 1015), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1024, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1410), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.5)'], {}), '(0.5)\n', (1405, 1410), True, 'import tensorflow as tf\n'), ((1452, 1481), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1479, 1481), True, 'import tensorflow as tf\n'), ((1490, 1502), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1500, 1502), True, 'import tensorflow as tf\n'), ((118, 145), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.55)'], {}), '(0.0, 0.55)\n', (134, 145), True, 'import numpy as np\n'), ((1069, 1102), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]', '(-1.0)', '(1.0)'], {}), '([1], -1.0, 1.0)\n', (1086, 1102), True, 'import tensorflow as tf\n'), ((1120, 1133), 'tensorflow.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (1128, 1133), True, 'import tensorflow as tf\n'), ((1207, 1228), 'tensorflow.square', 'tf.square', (['(y - y_data)'], {}), '(y - y_data)\n', (1216, 1228), True, 'import tensorflow as tf\n'), ((1647, 1677), 'matplotlib.pyplot.plot', 'plt.plot', (['x_data', 'y_data', '"""ro"""'], {}), "(x_data, y_data, 'ro')\n", (1655, 1677), True, 'import matplotlib.pyplot as plt\n'), ((1733, 1748), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1743, 1748), True, 'import matplotlib.pyplot as plt\n'), ((1750, 1765), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2)', '(2)'], {}), '(-2, 2)\n', (1758, 1765), True, 'import matplotlib.pyplot as plt\n'), ((1767, 1782), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1777, 1782), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1802), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.1)', '(0.6)'], {}), '(0.1, 0.6)\n', (1792, 1802), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1816), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1814, 1816), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1828), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1826, 1828), True, 'import matplotlib.pyplot as plt\n'), ((169, 196), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (185, 196), True, 'import numpy as np\n')] |
##lots of imports. Some are unnecessary but I left a lot just to be safe...
import matplotlib.pyplot as plt
import matplotlib
from astropy.io import fits
import numpy as np
import astropy.table as t
import matplotlib.image as img
from scipy.optimize import newton
from pathlib import Path
import math
import matplotlib.cm as cm
import matplotlib.mlab as mlab
from matplotlib.patches import Ellipse
import numpy.random as rnd
from matplotlib import patches
import sys
from scipy.optimize import curve_fit
from mpl_toolkits.axes_grid1 import make_axes_locatable
import re
drpall=fits.open('/home/celeste/Documents/astro_research/drpall-v2_3_1.fits')
mags=drpall[1].data['NSA_ELPETRO_ABSMAG']
u=mags[:,2]
g=mags[:,3]
r=mags[:,4]
i=mags[:,5]
mass_total = np.log10(drpall[1].data['nsa_elpetro_mass'])-np.log10(.49)
plt.scatter(mass_total, g-i, c = 'g', alpha = 0.5)
#plt.scatter(mass_total, r-i, c = 'r', alpha = 0.5)
#plt.scatter(mass_total, u-r, c = 'b', alpha = 0.5)
plt.xlabel("$Log_{10}$ Stellar Mass")
xmin, xmax = 8.5, 12
ymin, ymax = -1, 2
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
filename = '/home/celeste/Documents/astro_research/thesis_git/Good_Galaxies_SPX_3_N2S2.txt'
file_names = np.genfromtxt(filename, usecols=(0), skip_header=1, dtype=str, delimiter=',')
plate_num = []
fiber_num = []
split = []
#file_open = open("error_files.txt", "w")
mass_new = []
colorg_new = []
colorr_new = []
colori_new = []
coloru_new = []
tbdata = drpall[1].data
##Goes through all files in the folder
for ii in range(0, len(file_names)):
##Removes all non alphanumeric characters and only leaves numbers and periods
file_names[ii] = re.sub("[^0-9-]", "", file_names[ii])
#print(file_names[ii])
#print(file_names[ii][4:])
#print(file_names[ii][:4])
##splits the two numbers into a plate number and fiber number
one, two = (str(file_names[ii]).split('-'))
##splits the two numbers into a plate number and fiber number
plate_num.insert(ii, one)
fiber_num.insert(ii, two)
for x in range(0, len(plate_num)):
plateifu = (str(plate_num[x]) + '-' + str(fiber_num[x]))
ind = np.where(tbdata['plateifu'] == plateifu)
mass_new.insert(x, np.log10(tbdata['nsa_elpetro_mass'][ind][0])-np.log10(.49))
coloru_new.insert(x, tbdata['NSA_ELPETRO_ABSMAG'][ind][:,2])
colorg_new.insert(x, tbdata['NSA_ELPETRO_ABSMAG'][ind][:,3])
colorr_new.insert(x, tbdata['NSA_ELPETRO_ABSMAG'][ind][:,4])
colori_new.insert(x, tbdata['NSA_ELPETRO_ABSMAG'][ind][:,5])
print(mass_new)
print(np.asarray(coloru_new)-np.asarray(colorr_new))
plt.scatter(mass_new, np.asarray(colorg_new)-np.asarray(colori_new), c='black')
plt.ylabel("g-i color")
plt.savefig("g-i_color.png")
plt.close()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.close",
"numpy.asarray",
"numpy.genfromtxt",
"numpy.where",
"astropy.io.fits.open",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"re.sub",
"matplotlib.pyplot.savefig"... | [((579, 649), 'astropy.io.fits.open', 'fits.open', (['"""/home/celeste/Documents/astro_research/drpall-v2_3_1.fits"""'], {}), "('/home/celeste/Documents/astro_research/drpall-v2_3_1.fits')\n", (588, 649), False, 'from astropy.io import fits\n'), ((816, 864), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mass_total', '(g - i)'], {'c': '"""g"""', 'alpha': '(0.5)'}), "(mass_total, g - i, c='g', alpha=0.5)\n", (827, 864), True, 'import matplotlib.pyplot as plt\n'), ((971, 1008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Log_{10}$ Stellar Mass"""'], {}), "('$Log_{10}$ Stellar Mass')\n", (981, 1008), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1069), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1057, 1069), True, 'import matplotlib.pyplot as plt\n'), ((1070, 1090), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (1078, 1090), True, 'import matplotlib.pyplot as plt\n'), ((1198, 1273), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'usecols': '(0)', 'skip_header': '(1)', 'dtype': 'str', 'delimiter': '""","""'}), "(filename, usecols=0, skip_header=1, dtype=str, delimiter=',')\n", (1211, 1273), True, 'import numpy as np\n'), ((2671, 2694), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""g-i color"""'], {}), "('g-i color')\n", (2681, 2694), True, 'import matplotlib.pyplot as plt\n'), ((2695, 2723), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""g-i_color.png"""'], {}), "('g-i_color.png')\n", (2706, 2723), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2735), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2733, 2735), True, 'import matplotlib.pyplot as plt\n'), ((756, 800), 'numpy.log10', 'np.log10', (["drpall[1].data['nsa_elpetro_mass']"], {}), "(drpall[1].data['nsa_elpetro_mass'])\n", (764, 800), True, 'import numpy as np\n'), ((801, 815), 'numpy.log10', 'np.log10', (['(0.49)'], {}), '(0.49)\n', (809, 815), True, 'import numpy as np\n'), ((1646, 1683), 're.sub', 're.sub', (['"""[^0-9-]"""', '""""""', 'file_names[ii]'], {}), "('[^0-9-]', '', file_names[ii])\n", (1652, 1683), False, 'import re\n'), ((2133, 2173), 'numpy.where', 'np.where', (["(tbdata['plateifu'] == plateifu)"], {}), "(tbdata['plateifu'] == plateifu)\n", (2141, 2173), True, 'import numpy as np\n'), ((2544, 2566), 'numpy.asarray', 'np.asarray', (['coloru_new'], {}), '(coloru_new)\n', (2554, 2566), True, 'import numpy as np\n'), ((2567, 2589), 'numpy.asarray', 'np.asarray', (['colorr_new'], {}), '(colorr_new)\n', (2577, 2589), True, 'import numpy as np\n'), ((2613, 2635), 'numpy.asarray', 'np.asarray', (['colorg_new'], {}), '(colorg_new)\n', (2623, 2635), True, 'import numpy as np\n'), ((2636, 2658), 'numpy.asarray', 'np.asarray', (['colori_new'], {}), '(colori_new)\n', (2646, 2658), True, 'import numpy as np\n'), ((2197, 2241), 'numpy.log10', 'np.log10', (["tbdata['nsa_elpetro_mass'][ind][0]"], {}), "(tbdata['nsa_elpetro_mass'][ind][0])\n", (2205, 2241), True, 'import numpy as np\n'), ((2242, 2256), 'numpy.log10', 'np.log10', (['(0.49)'], {}), '(0.49)\n', (2250, 2256), True, 'import numpy as np\n')] |
import numpy as np
import time
class BananaPixelEnv():
def __init__(self, env, num_frames=4):
self.frame_buffer = []
self.brain_names = env.brain_names
self.env = env
self.num_frames = num_frames
def _update_state(self):
frame = np.transpose(self.env_info.visual_observations[0], (0, 3, 1, 2))[:, :, :, :]
frame_size = frame.shape
self.state = np.zeros((1, frame_size[1], self.num_frames, frame_size[2], frame_size[3]))
self.frame_buffer.insert(0, frame)
if len(self.frame_buffer) > 4:
self.frame_buffer.pop()
for i, f in enumerate(self.frame_buffer):
self.state[0, :, i ,:, :] = f
def reset(self):
self.env_info = self.env.reset(train_mode=True)[self.brain_names[0]]
self._update_state()
return self.state
def step(self, action):
#t0 = time.time()
self.env_info = self.env.step(np.int32(action).astype(np.int32))[self.brain_names[0]]
#print("env interaction time: {}".format(time.time()-t0))
#t0 = time.time()
self._update_state()
#print("update state time: {}".format(time.time() - t0))
reward = self.env_info.rewards[0]
done = self.env_info.local_done[0]
return self.state, reward, done, None
| [
"numpy.transpose",
"numpy.zeros",
"numpy.int32"
] | [((410, 485), 'numpy.zeros', 'np.zeros', (['(1, frame_size[1], self.num_frames, frame_size[2], frame_size[3])'], {}), '((1, frame_size[1], self.num_frames, frame_size[2], frame_size[3]))\n', (418, 485), True, 'import numpy as np\n'), ((279, 343), 'numpy.transpose', 'np.transpose', (['self.env_info.visual_observations[0]', '(0, 3, 1, 2)'], {}), '(self.env_info.visual_observations[0], (0, 3, 1, 2))\n', (291, 343), True, 'import numpy as np\n'), ((944, 960), 'numpy.int32', 'np.int32', (['action'], {}), '(action)\n', (952, 960), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def norm(inp):
r"""Computes the magnitude of the input field.
Parameters
----------
inp : xarray.DataArray
Time series of the input field.
Returns
-------
out : xarray.DataArray
Time series of the magnitude of the input field.
Examples
--------
>>> from pyrfu import mms, pyrf
Time interval
>>> tint = ["2019-09-14T07:54:00.000", "2019-09-14T08:11:00.000"]
Spacecraft index
>>> mms_id = 1
Load magnetic field
>>> b_xyz = mms.get_data("B_gse_fgm_srvy_l2", tint, mms_id)
Compute magnitude of the magnetic field
>>> b_mag = pyrf.norm(b_xyz)
"""
out = np.sqrt(np.sum(inp ** 2, axis=1))
return out
| [
"numpy.sum"
] | [((903, 927), 'numpy.sum', 'np.sum', (['(inp ** 2)'], {'axis': '(1)'}), '(inp ** 2, axis=1)\n', (909, 927), True, 'import numpy as np\n')] |
import threading
import numpy as np
import matplotlib
from thimbgui import QtGui, QtWidgets, QtCore, Qt
class FeatureFitWidget(QtWidgets.QWidget):
slidersChanged = Signal(int)
def __init__(self, features, feature_idx, parent=None):
super(FeatureFitWidget, self).__init__(parent)
self.display_width = options.display_width
#self.spectra = spectra
self.features = features
self.feature = features[feature_idx]
self.feature_idx = feature_idx
self.norm_hint_wvs = []
self.norm_hint_fluxes = []
self.lay = QtGui.QGridLayout()
self.mpl_fit = MatplotlibWidget(parent=parent, nrows=2, sharex="columns")
self.lay.addWidget(self.mpl_fit, 1, 0, 3, 1)
slider_orientation = Qt.Vertical
slider_n_steps = 200
self.off_slider = FloatSlider("offset", -0.15, 0.15, orientation=slider_orientation, n_steps=slider_n_steps)
self.d_slider = FloatSlider("depth", 0.0, 1.0, orientation=slider_orientation, n_steps=slider_n_steps)
self.g_slider = FloatSlider("sigma", 0.0, 1.0, orientation=slider_orientation, n_steps=slider_n_steps)
self.l_slider = FloatSlider("gamma", 0.0, 1.0, orientation=slider_orientation, n_steps=slider_n_steps)
self.cont_slider = FloatSlider("rel norm", 0.90, 1.10, orientation=slider_orientation, n_steps=slider_n_steps)
slider_grid = [(2, 1, 1, 1), (2, 2, 1, 1), (2, 3, 1, 1), (2, 4, 1, 1), (2, 5, 1, 1)]
slider_list = [self.off_slider, self.d_slider, self.g_slider, self.l_slider, self.cont_slider]
for sl_idx in range(len(slider_list)):
self.lay.addWidget(slider_list[sl_idx], *slider_grid[sl_idx])
#previous/next setup
self.prev_next = PrevNext(duration=1.0, parent=self)
self.lay.addWidget(self.prev_next, 1, 1, 1, 4)
#output_file button
self.output_button = QtGui.QPushButton("save measurements")
self.output_button.clicked.connect(self.save_measurements)
self.lay.addWidget(self.output_button, 3, 1, 1, 2)
#use check box
self.use_cb = QtGui.QCheckBox("Use line")
self.use_cb.setChecked(self.feature.flags["use"])
self.lay.addWidget(self.use_cb, 3, 3, 1, 1)
self._init_feature_table()
self._init_plots()
self._init_slider_vals()
self._internal_connect()
self.setLayout(self.lay)
def minimumSizeHint(self):
return QtGui.QSize(500, 500)
def save_feature_fits(self, fname):
import pickle
pickle.dump(self.features, open(fname, "wb"))
def save_measurements(self):
fname, file_filter = QtGui.QFileDialog.getSaveFileName(self, "save measurements")
try:
tmb.io.linelist_io.write_moog_from_features(fname, self.features)
except Exception as e:
print(e)
try:
feat_fname = ".".join(fname.split(".")[:]) + ".features.pkl"
self.save_feature_fits(feat_fname)
except Exception as e:
print(e)
@property
def hint_click_on(self):
return False
def handle_plot_click(self, eventl):
event ,= eventl
#print "clicked!", event.button
if event.button == 2:
if self.hint_click_on:
hwv = event.xdata
hflux = event.ydata
self.add_norm_hint(hwv, hflux)
def add_norm_hint(self, wv, flux):
self.norm_hint_wvs.append(wv)
self.norm_hint_fluxes.append(flux)
#todo add a realistic error estimate for the hints
hint_tuple = self.norm_hint_wvs, self.norm_hint_fluxes, np.ones(len(self.norm_hint_wvs), dtype=float)*10.0
tmb.utils.misc.approximate_normalization(self.spectrum,norm_hints=hint_tuple,overwrite=True)
self.update_plots()
def fit_axis(self, row):
return self.mpl_fit.axis(row, 0)
def _internal_connect(self):
self.mpl_fit.buttonPressed.connect(self.handle_plot_click)
self._connect_sliders()
self.slidersChanged.connect(self.update_row)
#print self.linelist_view.selectionModel()
#print dir(self.linelist_view.selectionModel())
#self.linelist_view.selectionModel().currentRowChanged.connect(self.on_selection_change)
self.linelist_view.doubleClicked.connect(self.set_feature)
self.prev_next.next.connect(self.next_feature)
self.prev_next.prev.connect(self.prev_feature)
self.use_cb.stateChanged.connect(self.set_use)
def set_use(self, state_val):
self.feature.flags["use"] = state_val > 0
def on_selection_change(self, row):
print("in on selection change", row)
#print "in on_selection_change", selection
#print dir(selection)
def set_feature(self, index):
row = index.row()
self.feature_idx = row
self.feature = self.features[self.feature_idx]
self.linelist_view.selectRow(self.feature_idx)
self.on_feature_changed()
def next_feature(self):
next_idx = self.feature_idx + 1
if next_idx > self.linelist_model.rowCount()-1:
next_idx = self.linelist_model.rowCount()-1
self.prev_next.pause()
self.feature_idx = next_idx
self.feature = self.features[self.feature_idx]
self.linelist_view.selectRow(self.feature_idx)
self.on_feature_changed()
def prev_feature(self):
prev_idx = self.feature_idx - 1
if prev_idx < 0:
prev_idx = 0
self.prev_next.pause()
self.feature_idx = prev_idx
self.feature_idx = max(self.feature_idx - 1, 0)
self.feature = self.features[self.feature_idx]
self.linelist_view.selectRow(self.feature_idx)
self.on_feature_changed()
def _init_feature_table(self):
drole = Qt.DisplayRole
crole = Qt.CheckStateRole
wvcol = models.Column("Wavelength", getter_dict = {drole: lambda x: "%10.3f" % x.wv})
spcol = models.Column("Species", getter_dict = {drole: lambda x: "%10.3f" % x.species})
epcol = models.Column("Excitation\nPotential", {drole: lambda x:"%10.3f" % x.ep})
loggfcol = models.Column("log(gf)", {drole: lambda x: "%10.3f" % x.loggf})
offsetcol = models.Column("Offset", {drole: lambda x: "%10.3f" % x.get_offset()})
depthcol = models.Column("Depth", {drole: lambda x: "%10.3f" % x.depth})
sigcol = models.Column("sigma", {drole: lambda x: "% 10.3f" % x.profile.get_parameters()[1]})
gamcol = models.Column("gamma", {drole: lambda x: "% 10.3f" % x.profile.get_parameters()[2]})
ewcol = models.Column("Equivalent\nWidth", {drole: lambda x: "%10.2f" % (1000.0*x.eq_width)})
def set_note(x, note):
x.note = note
return True
notescol = models.Column("Notes", {drole:lambda x: x.note}, setter_dict={Qt.EditRole: set_note}, editable=True)
#viewedcol = Column("Viewed", getter_dict={crole: dummy_func}, setter_dict={crole: flag_setter_factory("viewed")}, checkable=True)
#ewcol = Column("depth"
columns = [wvcol, spcol, epcol, loggfcol, offsetcol,
depthcol, sigcol, gamcol, ewcol, notescol]#, viewedcol]
self.linelist_model = models.ConfigurableTableModel(self.features, columns)
self.linelist_view = views.LineListView(parent=self)
self.linelist_view.setModel(self.linelist_model)
self.linelist_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.lay.addWidget(self.linelist_view, 0, 0, 1, 6)
def update_row(self, row_num):
left_idx = self.linelist_model.index(row_num, 0)
right_idx = self.linelist_model.index(row_num, self.linelist_model.columnCount())
self.linelist_model.dataChanged.emit(left_idx, right_idx)
def bounded_spec(self):
feat_wv = self.feature.wv
#min_wv = feat_wv-1.5*self.display_width
#max_wv = feat_wv+1.5*self.display_width
bspec = self.feature.data_sample
return bspec
def sliders_changed(self, intval):
#just ignore which slider caused the change get everything
off = self.off_slider.value()
gw = self.g_slider.value()
lw = self.l_slider.value()
depth = self.d_slider.value()
relc = self.cont_slider.value()
self.feature.profile.set_parameters(np.asarray([off, gw, lw]))
self.feature.set_relative_continuum(relc)
self.feature.set_depth(depth)
self.update_plots()
self.slidersChanged.emit(self.feature_idx)
def on_feature_changed(self):
if self.feature.flags["use"]:
self.use_cb.setChecked(True)
else:
self.use_cb.setChecked(False)
self._init_slider_vals()
feat_wv = self.feature.wv
xlim_min = feat_wv-self.display_width
xlim_max = feat_wv+self.display_width
self.fit_axis(0).set_xlim(xlim_min, xlim_max)
bspec = self.bounded_spec()
ymin, ymax = np.min(bspec.flux), np.max(bspec.flux)
ydelta = ymax-ymin
extra_frac = 0.05
self.fit_axis(0).set_ylim(ymin-extra_frac*ydelta, ymax+extra_frac*ydelta)
self.update_plots()
def _connect_sliders(self):
self.off_slider.slider.valueChanged.connect(self.sliders_changed)
self.g_slider.slider.valueChanged.connect(self.sliders_changed)
self.l_slider.slider.valueChanged.connect(self.sliders_changed)
self.d_slider.slider.valueChanged.connect(self.sliders_changed)
self.cont_slider.slider.valueChanged.connect(self.sliders_changed)
def _init_slider_vals(self):
off, gw, lw = self.feature.profile.get_parameters()
d = self.feature.depth #always access depth before setting anything
relc = self.feature.relative_continuum
self.off_slider.set_value(off)
self.g_slider.set_value(gw)
self.l_slider.set_value(lw)
self.d_slider.set_value(d)
self.cont_slider.set_value(relc)
def _init_plots(self):
feat_wv = self.feature.wv
xlim_min = feat_wv-self.display_width
xlim_max = feat_wv+self.display_width
self.fit_axis(0).set_xlim(xlim_min, xlim_max)
bspec = self.bounded_spec()
self.data_line ,= self.fit_axis(0).plot(bspec.wv, bspec.flux, c="b")
self.cont_line ,= self.fit_axis(0).plot(bspec.wv, bspec.norm, c="g")
feature_model = self.feature.model_flux(bspec.wv)*bspec.norm
self.model_line,= self.fit_axis(0).plot(bspec.wv, feature_model)
nac = bspec.norm[len(bspec.norm)//2]
self.top_marker_line ,= self.fit_axis(0).plot([feat_wv, feat_wv], [0.7*nac, 1.1*nac], c="r", lw=1.5)
self.bottom_marker_line ,= self.fit_axis(1).plot([feat_wv, feat_wv], [-10.0, 10.0], c="r", lw=1.5)
#import pdb; pdb.set_trace()
#and now for the residuals plot
inv_var = bspec.ivar
bkground_alpha = 0.5
self.zero_line ,= self.fit_axis(1).plot([bspec.wv[0], bspec.wv[-1]], [0, 0], c="k", alpha=bkground_alpha, lw=2.0)
sig_levels = [3]
self.sig_lines = [self.fit_axis(1).plot([bspec.wv[0], bspec.wv[-1]], [sl, sl], c="k", alpha=bkground_alpha)[0] for sl in sig_levels]
self.sig_lines.extend([self.fit_axis(1).plot([bspec.wv[0], bspec.wv[-1]], [-sl, -sl], c="k", alpha=bkground_alpha)[0] for sl in sig_levels])
#plot the model residuals.
significance = np.sqrt(inv_var)*(feature_model-bspec.flux)
self.resid_line ,= self.fit_axis(1).plot(bspec.wv, significance, c="b")
self.fit_axis(1).set_ylim(-6, 6)
self.fit_axis(1).set_xlabel("Wavelength")
self.fit_axis(1).set_ylabel("Residual Significance")
self.fit_axis(0).set_ylabel("Flux")
self.mpl_fit.draw()
def update_plots(self):
feat_wv = self.feature.wv
bspec = self.bounded_spec()
self.data_line.set_data(bspec.wv, bspec.flux)
bnorm = bspec.norm
self.cont_line.set_data(bspec.wv, bnorm)
feature_model = self.feature.model_flux(bspec.wv)*bnorm
self.model_line.set_data(bspec.wv, feature_model)
nac = bspec.norm[len(bspec.norm)//2]
self.top_marker_line.set_data([feat_wv, feat_wv], [0.7*nac, 1.1*nac])
self.bottom_marker_line.set_xdata([feat_wv, feat_wv])
inv_var = bspec.ivar
significance = (feature_model-bspec.flux)*np.sqrt(inv_var)
self.resid_line.set_data(bspec.wv, significance)
self.zero_line.set_data([bspec.wv[0], bspec.wv[-1]], [0, 0])
for line in self.sig_lines:
line.set_xdata([bspec.wv[0], bspec.wv[-1]])
self.mpl_fit.draw()
| [
"thimbgui.QtGui.QFileDialog.getSaveFileName",
"numpy.asarray",
"thimbgui.QtGui.QPushButton",
"thimbgui.QtGui.QGridLayout",
"numpy.min",
"numpy.max",
"thimbgui.QtGui.QSize",
"numpy.sqrt",
"thimbgui.QtGui.QCheckBox"
] | [((597, 616), 'thimbgui.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (614, 616), False, 'from thimbgui import QtGui, QtWidgets, QtCore, Qt\n'), ((1937, 1975), 'thimbgui.QtGui.QPushButton', 'QtGui.QPushButton', (['"""save measurements"""'], {}), "('save measurements')\n", (1954, 1975), False, 'from thimbgui import QtGui, QtWidgets, QtCore, Qt\n'), ((2156, 2183), 'thimbgui.QtGui.QCheckBox', 'QtGui.QCheckBox', (['"""Use line"""'], {}), "('Use line')\n", (2171, 2183), False, 'from thimbgui import QtGui, QtWidgets, QtCore, Qt\n'), ((2511, 2532), 'thimbgui.QtGui.QSize', 'QtGui.QSize', (['(500)', '(500)'], {}), '(500, 500)\n', (2522, 2532), False, 'from thimbgui import QtGui, QtWidgets, QtCore, Qt\n'), ((2721, 2781), 'thimbgui.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', (['self', '"""save measurements"""'], {}), "(self, 'save measurements')\n", (2754, 2781), False, 'from thimbgui import QtGui, QtWidgets, QtCore, Qt\n'), ((8543, 8568), 'numpy.asarray', 'np.asarray', (['[off, gw, lw]'], {}), '([off, gw, lw])\n', (8553, 8568), True, 'import numpy as np\n'), ((9181, 9199), 'numpy.min', 'np.min', (['bspec.flux'], {}), '(bspec.flux)\n', (9187, 9199), True, 'import numpy as np\n'), ((9201, 9219), 'numpy.max', 'np.max', (['bspec.flux'], {}), '(bspec.flux)\n', (9207, 9219), True, 'import numpy as np\n'), ((11649, 11665), 'numpy.sqrt', 'np.sqrt', (['inv_var'], {}), '(inv_var)\n', (11656, 11665), True, 'import numpy as np\n'), ((12625, 12641), 'numpy.sqrt', 'np.sqrt', (['inv_var'], {}), '(inv_var)\n', (12632, 12641), True, 'import numpy as np\n')] |
import numpy as np
import random
from collections import namedtuple, deque
from models import Actor, Critic
import copy
import torch
import torch.nn.functional as F
import torch.optim as optim
UPDATE_EVERY = 1 # how often to update the network
LOW_ACTION = -1
HIGH_ACTION = 1
NUM_AGENTS = 1
WARM_UP = 0
CONSECUTIVE_LEARNS = 1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed, a_check=None, c_check=None, gamma=0.99, tau=1e-3, add_noise=False, mu=0.,
theta=0.15, sigma=0.1, lr_actor=2e-4, lr_critic=2e-4,buffer_size=1e5, batch_size=128 ):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.random_process = OUNoise(action_size, seed, mu=mu, theta=theta, sigma=sigma)
self.gamma = gamma
self.tau = tau
# Actor and Critic approximators
self.targetActor = Actor(state_size, action_size, seed, (128,128)).to(device)
self.targetCritic = Critic(state_size, action_size, seed, (128,128)).to(device)
self.actor = Actor(state_size, action_size, seed, (128, 128)).to(device)
self.critic = Critic(state_size, action_size, seed, (128, 128)).to(device)
for target, local in zip(self.targetCritic.parameters(), self.critic.parameters()):
target.data.copy_(local.data)
for target, local in zip(self.targetActor.parameters(), self.actor.parameters()):
target.data.copy_(local.data)
if a_check is not None:
self.actor.load_state_dict(a_check)
if c_check is not None:
self.critic.load_state_dict(c_check)
# self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
self.actor_opt = optim.Adam(self.actor.parameters(), lr=lr_actor)
self.critic_opt = optim.Adam(self.critic.parameters(), lr=lr_critic)
# Replay memory
self.memory = ReplayBuffer(action_size, buffer_size, batch_size, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.t = 0
self.warm_up = WARM_UP
self.add_noise = add_noise
def reset(self):
self.random_process.reset()
def act(self, state, random=False):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
eval (boolean) : Turns off mean and std deviation from evaluation batches if set to true
"""
if random is True or self.t < self.warm_up:
action = np.random.randn(NUM_AGENTS, self.action_size)
else:
self.actor.eval()
with torch.no_grad():
action = self.actor(torch.from_numpy(state).float().to(device)).cpu().data.numpy()
if self.add_noise:
noise = self.random_process.sample()
action += noise
self.actor.train()
return np.clip(action, LOW_ACTION, HIGH_ACTION)
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory NUMPY
self.t += 1
self.memory.add(state, action, reward, next_state, done)
if self.t > self.warm_up:
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if len(self.memory) > self.memory.batch_size:
for i in range(0,CONSECUTIVE_LEARNS):
# If enough samples are available in memory, get random subset and learn
experiences = self.memory.sample()
self.learn(experiences, self.gamma)
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.targetActor(next_states)
Q_targets_next = self.targetCritic(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss (using gradient clipping)
self.critic_opt.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1)
self.critic_opt.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor(states)
actor_loss = -self.critic(states, actions_pred).mean()
# Minimize the loss
self.actor_opt.zero_grad()
actor_loss.backward()
self.actor_opt.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic, self.targetCritic, self.tau)
self.soft_update(self.actor, self.targetActor, self.tau)
'''
print(next_action_values_local.shape)
print(next_action_values_local[0][:])
print(next_action_values_local.gather(1, actions).shape)
print(actions[0][0])
print(next_action_values_local.gather(1, actions)[0][0])
'''
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def adjust_learning_rate(self, episode, val):
print("adjusting learning rate!")
for param_group in self.optimizer.param_groups:
param_group['lr'] = val
#----------------------------------------------------------------------------------------------------------------------
class MeanStdNormalizer:
def __init__(self, read_only=False, clip=10.0, epsilon=1e-8):
self.read_only = read_only
self.rms = None
self.clip = clip
self.epsilon = epsilon
def __call__(self, x):
x = np.asarray(x)
if self.rms is None:
self.rms = RunningMeanStd(shape=(1,) + x.shape[1:])
if not self.read_only:
self.rms.update(x)
return np.clip((x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip, self.clip)
def state_dict(self):
return {'mean': self.rms.mean,
'var': self.rms.var}
def load_state_dict(self, saved):
self.rms.mean = saved['mean']
self.rms.var = saved['var']
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
class RunningMeanStd:
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = self.update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
#-----------------------------------------------------------------------------------------------------------------------
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=int(buffer_size))
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.1):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
class LinearSchedule:
def __init__(self, start, end=None, steps=None):
if end is None:
end = start
steps = 1
self.inc = (end - start) / float(steps)
self.current = start
self.end = end
if end > start:
self.bound = min
else:
self.bound = max
def __call__(self, steps=1):
val = self.current
self.current = self.bound(self.current + self.inc * steps, self.end)
return val | [
"random.sample",
"numpy.ones",
"numpy.clip",
"numpy.mean",
"torch.no_grad",
"numpy.random.randn",
"models.Critic",
"random.seed",
"models.Actor",
"numpy.var",
"numpy.asarray",
"torch.nn.functional.mse_loss",
"numpy.square",
"random.random",
"torch.cuda.is_available",
"numpy.vstack",
... | [((363, 388), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (386, 388), False, 'import torch\n'), ((1034, 1051), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1045, 1051), False, 'import random\n'), ((3420, 3460), 'numpy.clip', 'np.clip', (['action', 'LOW_ACTION', 'HIGH_ACTION'], {}), '(action, LOW_ACTION, HIGH_ACTION)\n', (3427, 3460), True, 'import numpy as np\n'), ((4995, 5028), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Q_expected', 'Q_targets'], {}), '(Q_expected, Q_targets)\n', (5005, 5028), True, 'import torch.nn.functional as F\n'), ((7221, 7234), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7231, 7234), True, 'import numpy as np\n'), ((8041, 8067), 'numpy.zeros', 'np.zeros', (['shape', '"""float64"""'], {}), "(shape, 'float64')\n", (8049, 8067), True, 'import numpy as np\n'), ((8087, 8112), 'numpy.ones', 'np.ones', (['shape', '"""float64"""'], {}), "(shape, 'float64')\n", (8094, 8112), True, 'import numpy as np\n'), ((8189, 8207), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (8196, 8207), True, 'import numpy as np\n'), ((8228, 8245), 'numpy.var', 'np.var', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (8234, 8245), True, 'import numpy as np\n'), ((9786, 9879), 'collections.namedtuple', 'namedtuple', (['"""Experience"""'], {'field_names': "['state', 'action', 'reward', 'next_state', 'done']"}), "('Experience', field_names=['state', 'action', 'reward',\n 'next_state', 'done'])\n", (9796, 9879), False, 'from collections import namedtuple, deque\n'), ((9896, 9913), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (9907, 9913), False, 'import random\n'), ((10231, 10276), 'random.sample', 'random.sample', (['self.memory'], {'k': 'self.batch_size'}), '(self.memory, k=self.batch_size)\n', (10244, 10276), False, 'import random\n'), ((11363, 11380), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (11374, 11380), False, 'import random\n'), ((11508, 11526), 'copy.copy', 'copy.copy', (['self.mu'], {}), '(self.mu)\n', (11517, 11526), False, 'import copy\n'), ((3023, 3068), 'numpy.random.randn', 'np.random.randn', (['NUM_AGENTS', 'self.action_size'], {}), '(NUM_AGENTS, self.action_size)\n', (3038, 3068), True, 'import numpy as np\n'), ((11275, 11288), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (11282, 11288), True, 'import numpy as np\n'), ((1260, 1308), 'models.Actor', 'Actor', (['state_size', 'action_size', 'seed', '(128, 128)'], {}), '(state_size, action_size, seed, (128, 128))\n', (1265, 1308), False, 'from models import Actor, Critic\n'), ((1347, 1396), 'models.Critic', 'Critic', (['state_size', 'action_size', 'seed', '(128, 128)'], {}), '(state_size, action_size, seed, (128, 128))\n', (1353, 1396), False, 'from models import Actor, Critic\n'), ((1428, 1476), 'models.Actor', 'Actor', (['state_size', 'action_size', 'seed', '(128, 128)'], {}), '(state_size, action_size, seed, (128, 128))\n', (1433, 1476), False, 'from models import Actor, Critic\n'), ((1510, 1559), 'models.Critic', 'Critic', (['state_size', 'action_size', 'seed', '(128, 128)'], {}), '(state_size, action_size, seed, (128, 128))\n', (1516, 1559), False, 'from models import Actor, Critic\n'), ((3130, 3145), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3143, 3145), False, 'import torch\n'), ((7435, 7471), 'numpy.sqrt', 'np.sqrt', (['(self.rms.var + self.epsilon)'], {}), '(self.rms.var + self.epsilon)\n', (7442, 7471), True, 'import numpy as np\n'), ((8911, 8927), 'numpy.square', 'np.square', (['delta'], {}), '(delta)\n', (8920, 8927), True, 'import numpy as np\n'), ((11707, 11722), 'random.random', 'random.random', ([], {}), '()\n', (11720, 11722), False, 'import random\n'), ((10312, 10370), 'numpy.vstack', 'np.vstack', (['[e.state for e in experiences if e is not None]'], {}), '([e.state for e in experiences if e is not None])\n', (10321, 10370), True, 'import numpy as np\n'), ((10426, 10485), 'numpy.vstack', 'np.vstack', (['[e.action for e in experiences if e is not None]'], {}), '([e.action for e in experiences if e is not None])\n', (10435, 10485), True, 'import numpy as np\n'), ((10541, 10600), 'numpy.vstack', 'np.vstack', (['[e.reward for e in experiences if e is not None]'], {}), '([e.reward for e in experiences if e is not None])\n', (10550, 10600), True, 'import numpy as np\n'), ((10660, 10723), 'numpy.vstack', 'np.vstack', (['[e.next_state for e in experiences if e is not None]'], {}), '([e.next_state for e in experiences if e is not None])\n', (10669, 10723), True, 'import numpy as np\n'), ((10790, 10847), 'numpy.vstack', 'np.vstack', (['[e.done for e in experiences if e is not None]'], {}), '([e.done for e in experiences if e is not None])\n', (10799, 10847), True, 'import numpy as np\n'), ((3183, 3206), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3199, 3206), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
def extract_data( d ):
return {"Re[0]x":d[:, 1], "Im[0]x":d[:, 2],
"Re[0]y":d[:, 3], "Im[0]y":d[:, 4],
"Re[0]z":d[:, 5], "Im[0]z":d[:, 6],
"Re[1]x":d[:, 7], "Im[1]x":d[:, 8],
"Re[1]y":d[:, 9], "Im[1]y":d[:,10],
"Re[1]z":d[:,11], "Im[1]z":d[:,12],
"Re[2]x":d[:,13], "Im[2]x":d[:,14],
"Re[2]y":d[:,15], "Im[2]y":d[:,16],
"Re[2]z":d[:,17], "Im[2]z":d[:,18],
"Re[3]x":d[:,19], "Im[3]x":d[:,20],
"Re[3]y":d[:,21], "Im[3]y":d[:,22],
"Re[3]z":d[:,23], "Im[3]z":d[:,24],
"Re[4]x":d[:,25], "Im[4]x":d[:,26],
"Re[4]y":d[:,27], "Im[4]y":d[:,28],
"Re[4]z":d[:,29], "Im[4]z":d[:,30],
"Re[5]x":d[:,31], "Im[5]x":d[:,32],
"Re[5]y":d[:,33], "Im[5]y":d[:,34],
"Re[5]z":d[:,35], "Im[5]z":d[:,36],
"Re[6]x":d[:,37], "Im[6]x":d[:,38],
"Re[6]y":d[:,39], "Im[6]y":d[:,40],
"Re[6]z":d[:,41], "Im[6]z":d[:,42],
"Re[7]x":d[:,43], "Im[7]x":d[:,44],
"Re[7]y":d[:,45], "Im[7]y":d[:,46],
"Re[7]z":d[:,47], "Im[7]z":d[:,48],
"Re[8]x":d[:,49], "Im[8]x":d[:,50],
"Re[8]y":d[:,51], "Im[8]y":d[:,52],
"Re[8]z":d[:,53], "Im[8]z":d[:,54],
"Re[9]x":d[:,55], "Im[9]x":d[:,56],
"Re[9]y":d[:,57], "Im[9]y":d[:,58],
"Re[9]z":d[:,59], "Im[9]z":d[:,60],
"Re[10]x":d[:,61], "Im[10]x":d[:,62],
"Re[10]y":d[:,63], "Im[10]y":d[:,64],
"Re[10]z":d[:,65], "Im[10]z":d[:,66],
"Re[11]x":d[:,67], "Im[11]x":d[:,68],
"Re[11]y":d[:,69], "Im[11]y":d[:,70],
"Re[11]z":d[:,71], "Im[11]z":d[:,72]}
font_title = {'family' : 'serif',
'weight' : 'normal',
'size' : 18,}
font_label = {
'weight' : 'normal',
'size' : 16,}
###########################
def plot_data(data):
t = data[:,0]
dat = extract_data(data)
for i_data in data_idx:
try:
i_file
except NameError:
legend.append( "%s" % i_data )
else:
legend.append( "%d:%s" % (i_file, i_data) )
handle.append( ax.plot(t, dat[i_data], marker='.')[0] )
def plot_files(file_idx, data_idx):
for i_file in file_idx:
data = np.loadtxt( "res/ptot_1d_%d.dat" % i_file )
plot_data(data)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
handle = []
legend = []
file_idx = [0, 1, 2, 3]
data_idx = ["Re[0]z"]
data = np.loadtxt( "res/ptot_1d.dat" )
plot_data(data)
#plot_files(file_idx, data_idx)
# for i_file in file_idx:
# data = np.loadtxt( "res/ptot_1d_%d.dat" % i_file )
# t = data[:,0]
# dat = extract_data(data)
# for i_data in data_idx:
# legend.append( "%d:%s" % (i_file, i_data) )
# handle.append( ax.plot(t, dat[i_data], marker='.')[0] )
# #handle.append( ax.plot(t, np.log10( np.abs( dat[i] ) ) )[0] )
ax.legend( handle, legend, shadow=True, loc='upper center' )
#ax.set_xlim([-1600,1600])
ax.grid(True)
plt.xlabel(r"Time (fs)", fontdict=font_label)
plt.ylabel(r"Polarization (arb.)", fontdict=font_label)
plt.title(r"Overall polarization", fontdict=font_title)
plt.savefig("fig/ptot.svg")
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((2487, 2499), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2497, 2499), True, 'import matplotlib.pyplot as plt\n'), ((2609, 2638), 'numpy.loadtxt', 'np.loadtxt', (['"""res/ptot_1d.dat"""'], {}), "('res/ptot_1d.dat')\n", (2619, 2638), True, 'import numpy as np\n'), ((3151, 3195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (fs)"""'], {'fontdict': 'font_label'}), "('Time (fs)', fontdict=font_label)\n", (3161, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3251), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Polarization (arb.)"""'], {'fontdict': 'font_label'}), "('Polarization (arb.)', fontdict=font_label)\n", (3207, 3251), True, 'import matplotlib.pyplot as plt\n'), ((3253, 3307), 'matplotlib.pyplot.title', 'plt.title', (['"""Overall polarization"""'], {'fontdict': 'font_title'}), "('Overall polarization', fontdict=font_title)\n", (3262, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3309, 3336), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig/ptot.svg"""'], {}), "('fig/ptot.svg')\n", (3320, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3337, 3347), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3345, 3347), True, 'import matplotlib.pyplot as plt\n'), ((2411, 2452), 'numpy.loadtxt', 'np.loadtxt', (["('res/ptot_1d_%d.dat' % i_file)"], {}), "('res/ptot_1d_%d.dat' % i_file)\n", (2421, 2452), True, 'import numpy as np\n')] |
import unittest
import numpy
import cupy
from cupy import testing
import cupyx.scipy.special # NOQA
def _boundary_inputs(boundary, rtol, atol):
left = boundary * (1 - numpy.copysign(rtol, boundary)) - atol
right = boundary * (1 + numpy.copysign(rtol, boundary)) + atol
return [left, boundary, right]
class _TestBase(object):
def test_erf(self):
self.check_unary('erf')
def test_erfc(self):
self.check_unary('erfc')
def test_erfcx(self):
self.check_unary('erfcx')
@testing.with_requires('scipy>=1.4.0')
def test_erfinv(self):
self.check_unary('erfinv')
self.check_unary_random('erfinv', scale=2, offset=-1)
self.check_unary_boundary('erfinv', boundary=-1)
self.check_unary_boundary('erfinv', boundary=1)
@testing.with_requires('scipy>=1.4.0')
def test_erfcinv(self):
self.check_unary('erfcinv')
self.check_unary_random('erfcinv', scale=2, offset=0)
self.check_unary_boundary('erfcinv', boundary=0)
self.check_unary_boundary('erfcinv', boundary=2)
@testing.gpu
@testing.with_requires('scipy')
class TestSpecial(unittest.TestCase, _TestBase):
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
import scipy.special # NOQA
a = testing.shaped_arange((2, 3), xp, dtype)
return getattr(scp.special, name)(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_random(self, name, xp, scp, dtype, scale, offset):
import scipy.special # NOQA
a = testing.shaped_random((2, 3), xp, dtype, scale=scale) + offset
return getattr(scp.special, name)(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_boundary(self, name, xp, scp, dtype, boundary):
import scipy.special # NOQA
a = _boundary_inputs(boundary, 1.0 / 1024, 1.0 / 1024)
a = xp.array(a, dtype=dtype)
return getattr(scp.special, name)(a)
@testing.with_requires('scipy>=1.4.0')
@testing.for_dtypes(['f', 'd'])
def test_erfinv_behavior(self, dtype):
a = cupy.empty((1,), dtype=dtype)
a[:] = 1.0 + 1E-6
a = cupyx.scipy.special.erfinv(a)
assert cupy.isnan(a)
a[:] = -1.0 - 1E-6
a = cupyx.scipy.special.erfinv(a)
assert cupy.isnan(a)
a[:] = 1.0
a = cupyx.scipy.special.erfinv(a)
assert numpy.isposinf(cupy.asnumpy(a))
a[:] = -1.0
a = cupyx.scipy.special.erfinv(a)
assert numpy.isneginf(cupy.asnumpy(a))
@testing.with_requires('scipy>=1.4.0')
@testing.for_dtypes(['f', 'd'])
def test_erfcinv_behavior(self, dtype):
a = cupy.empty((1,), dtype=dtype)
a[:] = 2.0 + 1E-6
a = cupyx.scipy.special.erfcinv(a)
assert cupy.isnan(a)
a[:] = 0.0 - 1E-6
a = cupyx.scipy.special.erfcinv(a)
assert cupy.isnan(a)
a[:] = 0.0
a = cupyx.scipy.special.erfcinv(a)
assert numpy.isposinf(cupy.asnumpy(a))
a[:] = 2.0
a = cupyx.scipy.special.erfcinv(a)
assert numpy.isneginf(cupy.asnumpy(a))
@testing.gpu
@testing.with_requires('scipy')
class TestFusionSpecial(unittest.TestCase, _TestBase):
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
import scipy.special # NOQA
a = testing.shaped_arange((2, 3), xp, dtype)
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_random(self, name, xp, scp, dtype, scale, offset):
import scipy.special # NOQA
a = testing.shaped_random((2, 3), xp, dtype, scale=scale) + offset
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary_boundary(self, name, xp, scp, dtype, boundary):
import scipy.special # NOQA
a = _boundary_inputs(boundary, 1.0 / 1024, 1.0 / 1024)
a = xp.array(a, dtype=dtype)
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
| [
"cupy.testing.shaped_arange",
"numpy.copysign",
"cupy.empty",
"cupy.isnan",
"cupy.testing.shaped_random",
"cupy.testing.with_requires",
"cupy.asnumpy",
"cupy.testing.numpy_cupy_allclose",
"cupy.testing.for_dtypes",
"cupy.fuse"
] | [((1102, 1132), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy"""'], {}), "('scipy')\n", (1123, 1132), False, 'from cupy import testing\n'), ((3324, 3354), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy"""'], {}), "('scipy')\n", (3345, 3354), False, 'from cupy import testing\n'), ((527, 564), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy>=1.4.0"""'], {}), "('scipy>=1.4.0')\n", (548, 564), False, 'from cupy import testing\n'), ((808, 845), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy>=1.4.0"""'], {}), "('scipy>=1.4.0')\n", (829, 845), False, 'from cupy import testing\n'), ((1188, 1223), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['e', 'f', 'd']"], {}), "(['e', 'f', 'd'])\n", (1206, 1223), False, 'from cupy import testing\n'), ((1229, 1286), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)', 'scipy_name': '"""scp"""'}), "(atol=1e-05, scipy_name='scp')\n", (1256, 1286), False, 'from cupy import testing\n'), ((1477, 1507), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['f', 'd']"], {}), "(['f', 'd'])\n", (1495, 1507), False, 'from cupy import testing\n'), ((1513, 1570), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)', 'scipy_name': '"""scp"""'}), "(atol=1e-05, scipy_name='scp')\n", (1540, 1570), False, 'from cupy import testing\n'), ((1805, 1835), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['f', 'd']"], {}), "(['f', 'd'])\n", (1823, 1835), False, 'from cupy import testing\n'), ((1841, 1898), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)', 'scipy_name': '"""scp"""'}), "(atol=1e-05, scipy_name='scp')\n", (1868, 1898), False, 'from cupy import testing\n'), ((2155, 2192), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy>=1.4.0"""'], {}), "('scipy>=1.4.0')\n", (2176, 2192), False, 'from cupy import testing\n'), ((2198, 2228), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['f', 'd']"], {}), "(['f', 'd'])\n", (2216, 2228), False, 'from cupy import testing\n'), ((2733, 2770), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy>=1.4.0"""'], {}), "('scipy>=1.4.0')\n", (2754, 2770), False, 'from cupy import testing\n'), ((2776, 2806), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['f', 'd']"], {}), "(['f', 'd'])\n", (2794, 2806), False, 'from cupy import testing\n'), ((3416, 3451), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['e', 'f', 'd']"], {}), "(['e', 'f', 'd'])\n", (3434, 3451), False, 'from cupy import testing\n'), ((3457, 3514), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)', 'scipy_name': '"""scp"""'}), "(atol=1e-05, scipy_name='scp')\n", (3484, 3514), False, 'from cupy import testing\n'), ((3770, 3800), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['f', 'd']"], {}), "(['f', 'd'])\n", (3788, 3800), False, 'from cupy import testing\n'), ((3806, 3863), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)', 'scipy_name': '"""scp"""'}), "(atol=1e-05, scipy_name='scp')\n", (3833, 3863), False, 'from cupy import testing\n'), ((4163, 4193), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (["['f', 'd']"], {}), "(['f', 'd'])\n", (4181, 4193), False, 'from cupy import testing\n'), ((4199, 4256), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)', 'scipy_name': '"""scp"""'}), "(atol=1e-05, scipy_name='scp')\n", (4226, 4256), False, 'from cupy import testing\n'), ((1385, 1425), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (1406, 1425), False, 'from cupy import testing\n'), ((2284, 2313), 'cupy.empty', 'cupy.empty', (['(1,)'], {'dtype': 'dtype'}), '((1,), dtype=dtype)\n', (2294, 2313), False, 'import cupy\n'), ((2398, 2411), 'cupy.isnan', 'cupy.isnan', (['a'], {}), '(a)\n', (2408, 2411), False, 'import cupy\n'), ((2496, 2509), 'cupy.isnan', 'cupy.isnan', (['a'], {}), '(a)\n', (2506, 2509), False, 'import cupy\n'), ((2863, 2892), 'cupy.empty', 'cupy.empty', (['(1,)'], {'dtype': 'dtype'}), '((1,), dtype=dtype)\n', (2873, 2892), False, 'import cupy\n'), ((2978, 2991), 'cupy.isnan', 'cupy.isnan', (['a'], {}), '(a)\n', (2988, 2991), False, 'import cupy\n'), ((3076, 3089), 'cupy.isnan', 'cupy.isnan', (['a'], {}), '(a)\n', (3086, 3089), False, 'import cupy\n'), ((3613, 3653), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (3634, 3653), False, 'from cupy import testing\n'), ((3664, 3675), 'cupy.fuse', 'cupy.fuse', ([], {}), '()\n', (3673, 3675), False, 'import cupy\n'), ((4057, 4068), 'cupy.fuse', 'cupy.fuse', ([], {}), '()\n', (4066, 4068), False, 'import cupy\n'), ((4472, 4483), 'cupy.fuse', 'cupy.fuse', ([], {}), '()\n', (4481, 4483), False, 'import cupy\n'), ((1691, 1744), 'cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {'scale': 'scale'}), '((2, 3), xp, dtype, scale=scale)\n', (1712, 1744), False, 'from cupy import testing\n'), ((2601, 2616), 'cupy.asnumpy', 'cupy.asnumpy', (['a'], {}), '(a)\n', (2613, 2616), False, 'import cupy\n'), ((2710, 2725), 'cupy.asnumpy', 'cupy.asnumpy', (['a'], {}), '(a)\n', (2722, 2725), False, 'import cupy\n'), ((3182, 3197), 'cupy.asnumpy', 'cupy.asnumpy', (['a'], {}), '(a)\n', (3194, 3197), False, 'import cupy\n'), ((3291, 3306), 'cupy.asnumpy', 'cupy.asnumpy', (['a'], {}), '(a)\n', (3303, 3306), False, 'import cupy\n'), ((3984, 4037), 'cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {'scale': 'scale'}), '((2, 3), xp, dtype, scale=scale)\n', (4005, 4037), False, 'from cupy import testing\n'), ((176, 206), 'numpy.copysign', 'numpy.copysign', (['rtol', 'boundary'], {}), '(rtol, boundary)\n', (190, 206), False, 'import numpy\n'), ((243, 273), 'numpy.copysign', 'numpy.copysign', (['rtol', 'boundary'], {}), '(rtol, boundary)\n', (257, 273), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
from enum import Enum
from io import StringIO
from inspect import isclass
from contextlib import contextmanager
from NumPyNet import activations
from NumPyNet.exception import NotFittedError
__author__ = ['<NAME>', '<NAME>']
__email__ = ['<EMAIL>', '<EMAIL>']
# Enum of cost_function, declarations inside class
class cost_type(int, Enum):
mse = 0 # mean square error
masked = 1
mae = 2 # mean absolute error
seg = 3
smooth = 4
wgan = 5
hellinger = 6
hinge = 7
logcosh = 8
def _check_activation (layer, activation_func):
'''
Check if the activation function is valid.
Parameters
----------
layer : object
Layer object (ex. Activation_layer)
activation_func : string or Activations object
Activation function to check.
If the Activations object is not created yet the 'eval' is done on the object.
Returns
-------
act : Activation
Activation object
Notes
-----
You can use this function to verify if the given activation function is valid.
The function can be passed either as a string either as object or simply as class object.
Examples
--------
>>> layer = Activation_layer(input_shape=(1,2,3))
>>> print(_check_activation(layer, 'Linear'))
6
>>> print(_check_activation(layer, Activations.Relu))
6
>>> print(_check_activation(layer, Activations.Linear()))
6
'''
if isinstance(activation_func, str):
allowed_activation_func = [f.lower() for f in dir(activations) if isclass(getattr(activations, f)) and f != 'Activations']
if activation_func.lower() not in allowed_activation_func:
class_name = layer.__class__.__name__
raise ValueError('{0}: incorrect value of Activation Function given'.format(class_name))
else:
activation_func = activation_func.lower()
activation_func = ''.join([activation_func[0].upper(), activation_func[1:]])
activation_func = ''.join(['activations.', activation_func, '()'])
activation = eval(activation_func)
elif issubclass(type(activation_func), activations.Activations):
activation = activation_func
elif issubclass(activation_func, activations.Activations):
activation = activation_func
else:
class_name = layer.__class__.__name__
raise ValueError('{0}: incorrect value of Activation Function given'.format(class_name))
return activation
def _check_cost (layer, cost):
'''
Check if the cost function is valid.
Parameters
----------
layer : object
Layer object (ex. Cost_layer)
cost : string or Cost object
Cost function to check.
The cost object can be use by the cost enum
Returns
-------
NumPyNet cost function index
Notes
-----
You can use this function to verify if the given cost function is valid.
The function can be passed either as a string either as object.
Examples
--------
>>> layer = Cost_layer(input_shape=(1,2,3))
>>> print(_check_cost(layer, 'mae'))
2
>>> print(_check_cost(layer, cost.mae))
2
'''
if isinstance(cost, str):
allowed_cost = [c for c in dir(cost_type) if not c.startswith('__')]
if cost.lower() not in allowed_cost:
class_name = layer.__class__.__name__
raise ValueError('{0}: incorrect value of Cost Function given'.format(class_name))
else:
cost = eval('cost_type.{0}.value'.format(cost))
elif isinstance(cost, cost_type):
cost = cost.value
elif isinstance(cost, int) and cost <= max(cost_type):
cost = cost_type(cost)
else:
class_name = layer.__class__.__name__
raise ValueError('{0}: incorrect value of Cost Function given'.format(class_name))
return cost
def check_is_fitted (obj, variable='delta'):
'''
Check if for the current layer is available the backward function.
Parameters
----------
obj : layer type
The object used as self
variable : str
The variable name which allows the backward status if it is not None
Notes
-----
.. note::
The backward function can be used ONLY after the forward procedure.
This function allows to check if the forward function has been already applied.
'''
fitted_var = getattr(obj, variable)
if fitted_var is None:
raise NotFittedError('This layer instance is not fitted yet. Call "forward" with appropriate arguments before using its backward function.')
else:
return True
def print_statistics (arr):
'''
Compute the common statistics of the input array
Parameters
----------
arr : array-like
Input array
Returns
-------
mse : float
Mean Squared Error, i.e sqrt(mean(x*x))
mean: float
Mean of the array
variance: float
Variance of the array
Notes
-----
.. note::
The values are printed and returned
'''
mean = np.mean(arr)
variance = np.var(arr)
mse = np.sqrt(np.mean(arr * arr))
print('MSE: {:>3.3f}, Mean: {:>3.3f}, Variance: {:>3.3f}'.format(mse, mean, variance))
return (mse, mean, variance)
def to_categorical (arr):
'''
Converts a vector of labels into one-hot encoding format
Parameters
----------
arr : array-like 1D
Array of integer labels (without holes)
Returns
-------
2D matrix in one-hot encoding format
'''
n = len(arr)
pos = np.expand_dims(arr, axis=1).astype(int)
num_label = np.max(pos) + 1
categorical = np.zeros(shape=(n, num_label), dtype=float)
np.put_along_axis(categorical, indices=pos, values=1, axis=1)
return categorical
def from_categorical (categoricals):
'''
Convert a one-hot encoding format into a vector of labels
Parameters
----------
categoricals : array-like 2D
One-hot encoding format of a label set
Returns
-------
Corresponding labels in 1D array
'''
return np.argmax(categoricals, axis=-1)
def data_to_timesteps (data, steps, shift=1):
'''
Prepare data for a Recurrent model, dividing a series of data with shape (Ndata, features)
into timesteps, with shapes (Ndata - steps + 1, steps, features)
If 'data' has more than two dimension, it'll be reshaped.
Pay attention to the final number of 'batch'
Parameters
----------
data : array-like
2 or 4 dimensional numpy array, with shapes (Ndata, features) or (Ndata, w, h, c).
steps : int
Number of timesteps considered for the Recurrent layer
shift : int (default=1)
Temporal shift.
Returns
-------
X : array-like
A view on the data array of input, for Recurrent layers
y : array-like
Correspondig labels as time shifted values.
'''
X = data.reshape(data.shape[0], -1)
Npoints, features = X.shape
stride0, stride1 = X.strides
shape = (Npoints - steps * shift, steps, features)
strides = (shift * stride0, stride0, stride1)
X = np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
y = data[steps:]
return X, y
@contextmanager
def _redirect_stdout (verbose):
'''
Redirect output stdout from cython wrap to devnull or not.
This function does not work for cython stdout!
If you want something for cython wraps you can refer to the
implementation in the rFBP package (https://github.com/Nico-Curti/rFBP)
Parameters
----------
verbose : bool
Enable or disable stdout
'''
if verbose:
try:
yield
finally:
return
old_target = sys.stdout
try:
with open(os.devnull, "w") as new_target:
sys.stdout = new_target
yield new_target
finally:
sys.stdout = old_target
| [
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"NumPyNet.exception.NotFittedError",
"numpy.mean",
"numpy.lib.stride_tricks.as_strided",
"numpy.max",
"numpy.var",
"numpy.put_along_axis"
] | [((4951, 4963), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (4958, 4963), True, 'import numpy as np\n'), ((4977, 4988), 'numpy.var', 'np.var', (['arr'], {}), '(arr)\n', (4983, 4988), True, 'import numpy as np\n'), ((5514, 5557), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, num_label)', 'dtype': 'float'}), '(shape=(n, num_label), dtype=float)\n', (5522, 5557), True, 'import numpy as np\n'), ((5560, 5621), 'numpy.put_along_axis', 'np.put_along_axis', (['categorical'], {'indices': 'pos', 'values': '(1)', 'axis': '(1)'}), '(categorical, indices=pos, values=1, axis=1)\n', (5577, 5621), True, 'import numpy as np\n'), ((5926, 5958), 'numpy.argmax', 'np.argmax', (['categoricals'], {'axis': '(-1)'}), '(categoricals, axis=-1)\n', (5935, 5958), True, 'import numpy as np\n'), ((6938, 7005), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['data'], {'shape': 'shape', 'strides': 'strides'}), '(data, shape=shape, strides=strides)\n', (6969, 7005), True, 'import numpy as np\n'), ((4380, 4524), 'NumPyNet.exception.NotFittedError', 'NotFittedError', (['"""This layer instance is not fitted yet. Call "forward" with appropriate arguments before using its backward function."""'], {}), '(\n \'This layer instance is not fitted yet. Call "forward" with appropriate arguments before using its backward function.\'\n )\n', (4394, 4524), False, 'from NumPyNet.exception import NotFittedError\n'), ((5005, 5023), 'numpy.mean', 'np.mean', (['(arr * arr)'], {}), '(arr * arr)\n', (5012, 5023), True, 'import numpy as np\n'), ((5481, 5492), 'numpy.max', 'np.max', (['pos'], {}), '(pos)\n', (5487, 5492), True, 'import numpy as np\n'), ((5427, 5454), 'numpy.expand_dims', 'np.expand_dims', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (5441, 5454), True, 'import numpy as np\n')] |
import os
import numpy as np
import pytest
from mikeio import Dfsu, Mesh
from mikeio.eum import ItemInfo
def test_read_all_items_returns_all_items_and_names():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
(data, t, items) = dfs.read(filename)
assert len(data) == 4
assert len(items) == 4
def test_read_single_item_returns_single_item():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
(data, t, items) = dfs.read(filename, item_numbers=[3])
assert len(data) == 1
assert len(items) == 1
def test_read_returns_array_time_dimension_first():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
(data, t, items) = dfs.read(filename, item_numbers=[3])
assert data[0].shape == (9, 884)
def test_read_selected_item_returns_correct_items():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
(data, t, items) = dfs.read(filename, item_numbers=[0, 3])
assert len(data) == 2
assert len(items) == 2
assert items[0].name == "Surface elevation"
assert items[1].name == "Current speed"
def test_read_selected_item_names_returns_correct_items():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
(data, t, items) = dfs.read(
filename, item_names=["Surface elevation", "Current speed"]
)
assert len(data) == 2
assert len(items) == 2
assert items[0].name == "Surface elevation"
assert items[1].name == "Current speed"
def test_read_all_time_steps():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
ds = dfs.read(filename, item_numbers=[0, 3])
assert len(ds.time) == 9
assert ds.data[0].shape[0] == 9
def test_read_single_time_step():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
ds = dfs.read(filename, item_numbers=[0, 3], time_steps=[1])
assert len(ds.time) == 1
assert ds.data[0].shape[0] == 1
def test_read_single_time_step_outside_bounds_fails():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
with pytest.raises(Exception):
dfs.read(filename, item_numbers=[0, 3], time_steps=[100])
def test_get_number_of_time_steps():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
dfs.read(filename)
assert dfs.get_number_of_time_steps() == 9
def test_get_number_of_time_steps_with_input_arg():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
dfs.read(filename, time_steps=[4])
assert dfs.get_number_of_time_steps() == 9
def test_get_node_coords():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
dfs.read(filename)
nc = dfs.get_node_coords()
assert nc[0, 0] == 607031.4886285994
def test_get_element_coords():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
dfs.read(filename)
ec = dfs.get_element_coords()
assert ec[1, 1] == 6906790.5928664245
def test_find_closest_element_index():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
dfs.read(filename)
idx = dfs.find_closest_element_index(606200, 6905480)
assert idx == 317
def test_read_and_select_single_element():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
ds = dfs.read(filename)
assert ds.data[0].shape == (9, 884)
idx = dfs.find_closest_element_index(606200, 6905480)
selds = ds.isel(idx=idx, axis=1)
assert selds.data[0].shape == (9,)
def test_is_geo_UTM():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
dfs.read(filename)
assert dfs.is_geo is False
def test_is_geo_LONGLAT():
filename = os.path.join("tests", "testdata", "wind_north_sea.dfsu")
dfs = Dfsu()
dfs.read(filename)
assert dfs.is_geo is True
def test_get_element_area_UTM():
filename = os.path.join("tests", "testdata", "HD2D.dfsu")
dfs = Dfsu()
dfs.read(filename)
areas = dfs.get_element_area()
assert areas[0] == 4949.102548750438
def test_get_element_area_LONGLAT():
filename = os.path.join("tests", "testdata", "wind_north_sea.dfsu")
dfs = Dfsu()
dfs.read(filename)
areas = dfs.get_element_area()
assert areas[0] == 139524218.81411952
def test_create(tmpdir):
filename = os.path.join(tmpdir.dirname, "simple.dfs1")
meshfilename = os.path.join("tests", "testdata", "odense_rough.mesh")
msh = Mesh(meshfilename)
# msh.read(meshfilename)
n_elements = msh.number_of_elements
d = np.zeros((1, n_elements))
data = []
data.append(d)
items = [ItemInfo("Zeros")]
dfs = Dfsu()
dfs.create(meshfilename, filename, data, items=items)
assert True
| [
"mikeio.Dfsu",
"mikeio.eum.ItemInfo",
"numpy.zeros",
"pytest.raises",
"mikeio.Mesh",
"os.path.join"
] | [((178, 224), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (190, 224), False, 'import os\n'), ((235, 241), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (239, 241), False, 'from mikeio import Dfsu, Mesh\n'), ((405, 451), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (417, 451), False, 'import os\n'), ((462, 468), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (466, 468), False, 'from mikeio import Dfsu, Mesh\n'), ((653, 699), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (665, 699), False, 'import os\n'), ((710, 716), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (714, 716), False, 'from mikeio import Dfsu, Mesh\n'), ((886, 932), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (898, 932), False, 'import os\n'), ((943, 949), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (947, 949), False, 'from mikeio import Dfsu, Mesh\n'), ((1236, 1282), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (1248, 1282), False, 'import os\n'), ((1293, 1299), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (1297, 1299), False, 'from mikeio import Dfsu, Mesh\n'), ((1604, 1650), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (1616, 1650), False, 'import os\n'), ((1661, 1667), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (1665, 1667), False, 'from mikeio import Dfsu, Mesh\n'), ((1836, 1882), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (1848, 1882), False, 'import os\n'), ((1893, 1899), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (1897, 1899), False, 'from mikeio import Dfsu, Mesh\n'), ((2105, 2151), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (2117, 2151), False, 'import os\n'), ((2162, 2168), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (2166, 2168), False, 'from mikeio import Dfsu, Mesh\n'), ((2326, 2372), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (2338, 2372), False, 'import os\n'), ((2383, 2389), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (2387, 2389), False, 'from mikeio import Dfsu, Mesh\n'), ((2530, 2576), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (2542, 2576), False, 'import os\n'), ((2587, 2593), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (2591, 2593), False, 'from mikeio import Dfsu, Mesh\n'), ((2726, 2772), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (2738, 2772), False, 'import os\n'), ((2783, 2789), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (2787, 2789), False, 'from mikeio import Dfsu, Mesh\n'), ((2934, 2980), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (2946, 2980), False, 'import os\n'), ((2991, 2997), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (2995, 2997), False, 'from mikeio import Dfsu, Mesh\n'), ((3154, 3200), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (3166, 3200), False, 'import os\n'), ((3211, 3217), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (3215, 3217), False, 'from mikeio import Dfsu, Mesh\n'), ((3383, 3429), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (3395, 3429), False, 'import os\n'), ((3440, 3446), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (3444, 3446), False, 'from mikeio import Dfsu, Mesh\n'), ((3694, 3740), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (3706, 3740), False, 'import os\n'), ((3751, 3757), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (3755, 3757), False, 'from mikeio import Dfsu, Mesh\n'), ((3857, 3913), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""wind_north_sea.dfsu"""'], {}), "('tests', 'testdata', 'wind_north_sea.dfsu')\n", (3869, 3913), False, 'import os\n'), ((3924, 3930), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (3928, 3930), False, 'from mikeio import Dfsu, Mesh\n'), ((4035, 4081), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""HD2D.dfsu"""'], {}), "('tests', 'testdata', 'HD2D.dfsu')\n", (4047, 4081), False, 'import os\n'), ((4092, 4098), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (4096, 4098), False, 'from mikeio import Dfsu, Mesh\n'), ((4253, 4309), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""wind_north_sea.dfsu"""'], {}), "('tests', 'testdata', 'wind_north_sea.dfsu')\n", (4265, 4309), False, 'import os\n'), ((4320, 4326), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (4324, 4326), False, 'from mikeio import Dfsu, Mesh\n'), ((4471, 4514), 'os.path.join', 'os.path.join', (['tmpdir.dirname', '"""simple.dfs1"""'], {}), "(tmpdir.dirname, 'simple.dfs1')\n", (4483, 4514), False, 'import os\n'), ((4534, 4588), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""odense_rough.mesh"""'], {}), "('tests', 'testdata', 'odense_rough.mesh')\n", (4546, 4588), False, 'import os\n'), ((4600, 4618), 'mikeio.Mesh', 'Mesh', (['meshfilename'], {}), '(meshfilename)\n', (4604, 4618), False, 'from mikeio import Dfsu, Mesh\n'), ((4696, 4721), 'numpy.zeros', 'np.zeros', (['(1, n_elements)'], {}), '((1, n_elements))\n', (4704, 4721), True, 'import numpy as np\n'), ((4799, 4805), 'mikeio.Dfsu', 'Dfsu', ([], {}), '()\n', (4803, 4805), False, 'from mikeio import Dfsu, Mesh\n'), ((2179, 2203), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2192, 2203), False, 'import pytest\n'), ((4769, 4786), 'mikeio.eum.ItemInfo', 'ItemInfo', (['"""Zeros"""'], {}), "('Zeros')\n", (4777, 4786), False, 'from mikeio.eum import ItemInfo\n')] |
#!/usr/local/bin/python2.7
# encoding: utf-8
'''
Axile -- Outil de conception/simulation de parapentes Nervures
Classe NSpline
Description :
@author: puiseux
@copyright: 2016 Nervures. All rights reserved.
@contact: <EMAIL>
'''
from numbers import Number
import numpy as np
import cPickle
from numpy import asarray as array, linspace, loadtxt, savetxt,sqrt
from scipy.optimize import minimize_scalar
from scipy.interpolate import (CubicSpline, InterpolatedUnivariateSpline,
UnivariateSpline)
from scipy.interpolate.fitpack2 import LSQUnivariateSpline
from scipy.integrate import quad
from utilitaires.utilitairesdivers import (Path, whoami, debug, rdebug, absCurv, baryCentre,
centreGravite)
from utilitaires.lecteurs import pointsFrom, LecteurUniversel
def arrange(dump):
u"""
Remettre d'aplomb dump, des valeurs de clés obsolètes
Modifie dump, ne retourne rien.
"""
for key in ('classename', 'classname') :
#ya les deux orthographes, c'est une erreur
if key in dump.keys() :
dump['classname'] = dump.pop(key)
break
for key in ('precision', 'nbpd') :
#ya les deux orthographes, c'est une erreur
if key in dump.keys() :
dump['nbpd'] = dump.pop(key)
break
for key in ('points', 'cpoints') :
#ya les deux orthographes, c'est une erreur
if key in dump.keys() :
dump['cpoints'] = dump.pop(key)
break
for key in ('rayon', 'courbure') :
#ya les deux orthographes, c'est une erreur
if key in dump.keys() :
dump['courbure'] = dump.pop(key)
break
def absCurvReal(S, T):
u"""
Calcule et retourne l'abscisse curviligne réelle des points S(T) sur la spline S.
L'abscisse curviligne d'un point de paramètre t dans [0,1] est
l'intégrale de 0 à t de phi(t) = sqrt(S.sx(t)**2+S.sy(t)**2)
L'intégration est assurée par scipy.integrate.quad()
Si la spline S a trop de points de contrôle, ca rame et l'erreur est importante
err
:param S: une NSplineAbstract
:param T: les n paramètres t des points dont on veut l'abscisse curviligne.
Ces n paramètres doivent être dans l'intervalle [0,1]
:type T: au choix
- un ndarray de shape (n,1) à valeurs réelles dans [0,1],
- une liste de n valeurs réelles dans [0,1],
- un tuple de n valeurs réelles dans [0,1],
- un réel unique t dans [0,1]
:return ac, err:
- deux ndarray de n réels avec ac = abscisses curvilignes, err erreur estimée
- ou bien deux réels, si T est réel
(voir scipy.integrate)
"""
if isinstance(T, Number) :
#On integre sur les sous intervalles de S délimités par les knots
phi = lambda s : sqrt(S.sx(s,1)**2+S.sy(s,1)**2) #la fonction à integrer
bornes = [tk for tk in S.knots if tk<T]+[T]#bornes de sous intervalles
intervalles = zip(bornes[:-1], bornes[1:])#les intervalles
ac, err = 0, 0
for (t1, t2) in intervalles :
int_t1_t2, err12 = quad(phi, t1, t2)#integration intervalle [t1, t2]
ac += int_t1_t2
err = max(err,err12)
return ac, err
# return ac1+ac2, max(err1, err2)
else :
res = array([absCurvReal(S, t) for t in T])
# res = asarray([quad(lambda s : sqrt(S.sx(s,1)**2+S.sy(s,1)**2), 0.0, t) for t in T])
return res[:,0], res[:,1]
def distancePointSpline(p, S, t0=0.5, tol=1.0e-9):
u"""
Comme son nom l'indique, calcule le carré de la plus courte distance euclidienne
d'un point p à une spline paramétrée S(t) = x(t), y(t), 0<=t<=1
:param S: NSplineAbstract, la spline paramétrique
:param p: (x,y)=(float, float) le point.
:param t0: float, initialisation des itérations
:param tol: float, tolérance passée à scipy.optimize.minimize_scalar
:return: le resultat res,
voir https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html#scipy.optimize.OptimizeResult
en particulier :
- res.x : float, valeur de t réalisant cette distance
- res.nfev : int, nb evaluation de phi(t)
- res.fun : float, valeur finale de phi(t)
local : la fonction phi(t) à optimiser est le carré de la distance de p à S(t)
"""
a, b = p[0], p[1]
x, y = S.sx, S.sy
phi = lambda t: (a-x(t))**2 + (b-y(t))**2
res = minimize_scalar(phi,
bounds=(0.0, 1.0),
method='bounded',
options={'xatol':1.0e-9})
return res
def computeSpline(cpoints, methode):
u"""
Calcule la spline (sx, sy) considérée comme une courbe paramétrée sx(t), sy(t).
sx et sy sont deux splines à une seule variable au sens scipy.
Si cpoints contient des points double consécutifs, la construction échoue.
Retourne: T, sx, sy
--------
- T np.ndarray(n) = abscisses curvilignes des points de cpoints
- sx et sy deux splines d'interpolation de cpoints, i.e. vérifiant [sx(T[i]), sy(T[i])] = cpoints[i]
sx, sy = None, None si cpoints.shape = (0,2) ou cpoints = None
si cpoints contient deux points consécutifs identique, alors
l'exception "ValueError: `x` must be strictly increasing sequence." est levée
Parametres :
----------
- cpoints = np.ndarray((n,2)) les points de contrôle.
- methode : peut prendre differentes valeurs. Il est de la forme :
methode = (type,parametres)
avec :
# type est une chaîne de caractères parmi ('cubic', 'ius', 'us')
c'est le type de spline (suivant la terminologie scipy.interpolate).
# paramètres sont les paramètres associés au type de spline
# si type = 'cubic' c'est une spline d'INTERPOLATION
- parametres = 'periodic' pour splines fermées, sans point anguleux (trous par exemple)
- parametres = 'clamped' fixe à zéro les derivées premières de x et y aux extrémités
équivaut à parametres=((1, 0, 1, 0), (1, 0, 1, 0)) (un extrados p.ex.)
- parametres = 'natural' fixe à zéro les derivées secondes de x et y aux extrémités
équivaut à parametres=((2, 0, 2, 0), (2, 0, 2, 0))
- parametres = 'not-a-knot' : The first and second segment at a curve end are the same polynomial.
It is a good default when there is no information on boundary conditions.
- [obsolete, simplification, cf ci-dessous]
parametres = ((dx0, vx0, dy0, vy0), (dxn, vxn, dyn, vyn)) permet de fixer les dérivées aux extrémités
* le premier tuple concerne le premier point de la spline
- dx0, vx0 : ordre de dérivation en x et valeur de la dérivée dx0-ième.
P.ex. si dx0, vx0 = 1, 3.14 on demande que x'(0)=3.14.
Usuellement, dx0 et dy0 valent 0,1 ou 2
- dy0, vy0 : analogue pour y(0)
* le deuxième tuple définit de la même manière les dérivées de x(t) et y(t) en t=1,
donc pour le dernier point de la spline
[fin obsolete]
- parametres = ((d0, dx0, dy0), (dn, dxn, dyn)) permet de fixer les dérivées aux extrémités
* le premier tuple concerne le premier point de la spline
- d0 = ordre de dérivation et
- dx0,dy0 = le vecteur dérivée d0-ième.
P.ex. si d0, dx0, dy0 = 1, 3.14, 7 on demande que x'(0),y'(0) = 3.14, 7
Usuellement, d0 vaut 0,1 ou 2
* le deuxième tuple définit de la même manière le vecteur dérivé-dn-ieme en t=1,
donc pour le dernier point de la spline
* NB, il est possible de fixer plus finement les valeurs des dérivées aux extrémités
(cf § obsolete ci-dessus, on peut fixer (par exemple) x'(0) et y"(0) indépendemment,
alors qu'actuellement on fixe v'(0)=(x'(0),y'(0)) ou bien v"(0)=(x"(0),y"(0))
# si type = 'ius' ou 'interpolatedunivariatespline', il s'agit de spline d'INTERPOLATION,
(utilise la classe InterpolatedUnivariateSpline de scipy, fille de UnivariateSpline, avec s=0)
parametres est alors un entier, le degré de la spline.
Un polyligne est de type 'ius', avec degré=1.
Les autre degrés ne sont pas forcément utilisés
je crois que ('ius', 3) fait double emploi avec ('cubic', 'not-a-knot')
# si type = 'us' ou 'univariatespline', c'est une spline d'AJUSTEMENT. Dans ce cas, les paramètres sont
un dictionnaire :
parametres = {'w':None, 'bbox':[None, None], 'k':3, 's':None, 'ext':0, 'check_finite':False}
voir UnivariateSpline(x, y, w=None, bbox=[None, None], k=3, s=None, ext=0, check_finite=False)
dans la doc scipy.
"""
type_, parametres = methode
# debug(methode=(type_,parametres),shape=cpoints.shape)
if len(cpoints)<2 :#moins de deux points de controle
#TODO les knots devraient être de shape (0,)??
_knots, sx, sy = np.zeros((2,)), None, None
return _knots, sx, sy
if type_ == 'cubic' :
if parametres in ('periodic', 'per', 'periodique') :
#il faut au moins 3 points, avec P[0]==P[-1]
#et un des points intermediaires P[k]!=P[0]
if len(cpoints)<3 :
_knots, sx, sy = np.zeros((2,)), None, None
return _knots, sx, sy
# debug(cpoints_shape=cpoints.shape, p0=cpoints[0],pn=cpoints[-1])
# if all(cpoints[0] == cpoints[-1]) : pass
if np.linalg.norm(cpoints[0] - cpoints[-1])<1.0e-10 :
cpoints[-1]=cpoints[0]
else : #On rajoute le premier point a la fin
# debug('cpoints[0]-cpoints[1]=%s'%(cpoints[0]-cpoints[1]))
# raise ValueError('Spline periodique, %s != %s'%(cpoints[0],cpoints[-1]))
cpoints = np.vstack((cpoints, cpoints[0]))
N = len(cpoints)
# debug(shape=cpoints.shape)
T = absCurv(cpoints, normalise=True)
# debug(abscurv_cpoints=T)
# _knots = T
X = cpoints[:,0]
Y = cpoints[:,1]
if type_ == 'cubic' :
# debug(parametres=parametres)
if isinstance(parametres, (str, unicode)):
sx = CubicSpline(T, X, bc_type=parametres)
sy = CubicSpline(T, Y, bc_type=parametres)
# debug(sx,sy)
else :
(d0, vx0, vy0), (dn, vxn, vyn) = parametres
sx = CubicSpline(T, X, bc_type=((d0, vx0),(dn,vxn)))
sy = CubicSpline(T, Y, bc_type=((d0, vy0),(dn,vyn)))
elif type_ == 'ius':
# try :
sx = InterpolatedUnivariateSpline(T, X, k=parametres)#s=la précision de l'ajustement s=0 <=> interpolation
sy = InterpolatedUnivariateSpline(T, Y, k=parametres)
# except Exception as msg:
# rdebug('pas de spline (degre trop eleve ?)')
# print unicode (msg)
# sx = sy = None
elif type_ == 'us' :
#UnivariateSpline(x, y, w=None, bbox=[None, None], k=3,
# s=None, ext=0,check_finite=False)
weights = np.ones(N)
W = 1000.0
# eps = 1.0e-5#NPrefs.EPS
# en supposant que tous les termes erreur di^2=wi*(xi-f(ti))^2 sont egaux
# le choix de s suivant implique
# abs(xi-f(ti))<eps et
# abs(x1-f(t1))<eps/(N*W) et abs(xN-f(tN))<eps/(N*W)
weights[0] = weights[-1] = W
weights /= np.sum(weights)
# s = eps/(N*W)
# debug(len(T), parametres)
sx = UnivariateSpline(T, X, k=parametres, w=None, s=1.0e-10)#s=la précision de l'ajustement s=0 <=> interpolation
sy = UnivariateSpline(T, Y, k=parametres, w=None, s=1.0e-10)
# sx = UnivariateSpline(T, X, w=weights, k=parametres, s=s)#s=la précision de l'ajustement s=0 <=> interpolation
# sy = UnivariateSpline(T, Y, w=weights, k=parametres, s=s)
# weights = np.ones(N)
# W = 1000.0
# eps = NPrefs.EPS
# # en supposant que tous les termes erreur di^2=wi*(xi-f(ti))^2 sont egaux
# # le choix de s suivant implique
# # abs(xi-f(ti))<eps et
# # abs(x1-f(t1))<eps/(N*W) et abs(xN-f(tN))<eps/(N*W)
# weights[0] = weights[-1] = W
# weights /= np.sum(weights)
# s = eps/(N*W)
#
# sx = UnivariateSpline(T, X, w=weights, k=parametres, s=s)#s=la précision de l'ajustement s=0 <=> interpolation
# sy = UnivariateSpline(T, Y, w=weights, k=parametres, s=s)
elif type_ == 'lsqus' :
raise NotImplementedError ('LSQUnivariateSpline non implemente')
#LSQUnivariateSpline(x, y, t, w=None, bbox=[None, None], k=3, ext=0,
# check_finite=False)
weights = np.ones(N)
W = 1000.0
# eps = 1.0e-5#NPrefs.EPS
# en supposant que tous les termes erreur di^2=wi*(xi-f(ti))^2 sont egaux
# le choix de s suivant implique
# abs(xi-f(ti))<eps et
# abs(x1-f(t1))<eps/(N*W) et abs(xN-f(tN))<eps/(N*W)
weights[0] = weights[-1] = W
weights /= np.sum(weights)
knots = linspace(0,1,20)
sx = LSQUnivariateSpline(T, X, knots, k=parametres, w=None)#s=la précision de l'ajustement s=0 <=> interpolation
sy = LSQUnivariateSpline(T, Y, knots, k=parametres, w=None)
# sx = UnivariateSpline(T, X, w=weights, k=parametres, s=s)#s=la précision de l'ajustement s=0 <=> interpolation
# sy = UnivariateSpline(T, Y, w=weights, k=parametres, s=s)
# weights = np.ones(N)
# W = 1000.0
# eps = NPrefs.EPS
# # en supposant que tous les termes erreur di^2=wi*(xi-f(ti))^2 sont egaux
# # le choix de s suivant implique
# # abs(xi-f(ti))<eps et
# # abs(x1-f(t1))<eps/(N*W) et abs(xN-f(tN))<eps/(N*W)
# weights[0] = weights[-1] = W
# weights /= np.sum(weights)
# s = eps/(N*W)
#
# sx = UnivariateSpline(T, X, w=weights, k=parametres, s=s)#s=la précision de l'ajustement s=0 <=> interpolation
# sy = UnivariateSpline(T, Y, w=weights, k=parametres, s=s)
return T, sx, sy
class NSplineAbstract(object):
u"""
TODO :
- l'echantillonnage ne doit plus être fixé à l'__init__ (suppression de nbpe, mode)
- self.epoints ne doit plus exister : il devrait etre calculé à la demande
epoints = self.echantillonner(nbp, mode, ta, tb) avec tous les parametres obligatoires
les epoints n'ont pas à être mis à jour, car dans le l'interface graphique,
faire suivre les points échantillonnés devient trop lourd
FIN_TODO
NSplineAbstract est une interface commune à toutes les splines
(simple, composées, profils...)
Ne peut pas être instancié, car elle a des méthodes virtuelles pures.
========================================================================
= La spline est auto-suffisante, le _update() ne doit pas être appelé =
= par d'autres objets. Sauf éventuellement dans les classes filles. =
========================================================================
Une NSpline est constitué de
- self.cpoint (property) les points de contrôle sous forme np.ndarray((n,2))
C'est lui qui fait référence
- self.cpoints(points) = le setter de self.cpoints, appelle self._update()
- self.sx, self.sy : une spline parametrique d'interpolation calculées par scipy
- self.dpoints (property) les points de la spline discrétisée pour visualisation,
ce tableau contient self.precision points (e.g. self.precision est grand:1000)
- self.epoints (property) : les points echantillonnés de la spline, suivant
le mode de répartition défini par self.mode et self.nbpe.
S'ils n'existent pas, la property epoints fait appel à self.echantillonner()
- self.mode : mode d'echantillonnage
- self.nbpe : nb points d'echantillonnage.
- self.name : le nom de la spline
- self.role : chaine de caractères, le rôle.
Méthodes :
--------
- self.abscurv() : recalcule les abscisses curvilignes des cpoints, stocké dans _knots.
NON => Déclenche un recalcul des splines sx et sy <= NON
Ces abscisses sont les paramètres sx.x et sy.x des deux splines
en principe ce sont les trois mêmes tableaux : _knots==sx.x==sy.x
- self.computeSpline() calcul de la spline self.sx, self.sy.
Normalement c'est fait automatiquement dès que cpoints est modifié
- self.save(filename) sauvegarde texte simple, un point (=deux coordonnées) par ligne.
- self.echantillonner() : retourne la spline echantillonnée et renseigne self.epoints.
- self.isClosed(eps=1.0e-8) return True si dist(self[0],self[-1])< eps (distance norme 1)
- self.load(dump) : permet de loader une spline sauvegardée sous la forme retournée par self.toDump()
- self.toDump() retourne un dictionnaire en clair permettant la reconstruction de la spline
- self.__call__(T) équivaut à P = self(T)
où T est un tableau d'abscisses curvilignes quelconques entre 0 et 1
retourne les points (sx(ti), sy(ti) pour ti parcourant T.
- self.plot() pour tracer avec matplotlib
- self.__getitem__(i), équivaut à p = self[i]
i entier, retourne le i-eme point de contrôle.
- self.__setitem__(i, p), équivaut à self[i] = p,
i entier, p de type tuple ou liste (x,y) : affecte p au i-eme point de contrôle.
puis déclenche un self._update()
- len(self) = le nb de points de contrôle.
Les méthodes suivantes déclenchent le _update() i.e. recalcul complet de la spline (sx, sy)
et mise à jour des dpoints,...
- self.[insert,append]Point(p) comme leur nom l'indique, lèvent une exception en cas de point double
- self.removePoint(p) comme son nom l'indique
- self.hardScale, self.hardRotate, self.translate : comme leur nom l'indique.
"""
# defaultprefs = SplinePrefs()
SUPPORTED_FILES = ('*.gnu', '*.dxf', '*.pts', '*.pkl','*.spl','*.npkl')
class Default(dict):
def __init__(self):
dict.__init__(self,{})
def __init__(self, **dump):
super(NSplineAbstract, self).__init__()
#Valeurs par defaut
# default = self.Default()
for key, value in self.Default().iteritems() :
setattr(self, key, value)
self.load(dump)
# self._update()Doit etre appelé explicitement par les héritiers au bon moment
def load(self, dump):
raise NotImplementedError(u"la methode() %s doit etre implemente"%(whoami(self)[:-2]))
def __getitem__(self,k):
u"""
Pour accéder aux points du polygone de controle (en lecture)
ne traite pas les slices
Retourne un tuple (x,y)
>>> S = NSplineSimple(points=....)
>>> S[3]
[10.0,3.14] le point points[3]
ne gère pas les slices
>>> S[1:3]
... AttributeError: 'QPolygonF' object has no attribute 'x'
"""
return self.cpoints[k]
def __str__(self):
return u'\n'.join(self.info)
@property
def height(self):
if not hasattr(self, '_height') :
self._height = 0 if len(self)<=1 else\
max(self.dpoints[:,1]) - min(self.dpoints[:,1])
return self._height
hauteur = height
@property
def width(self):
if not hasattr(self, '_width') :
self._width = 0 if len(self)<=1 else\
max(self.dpoints[:,0]) - min(self.dpoints[:,0])
return self._width
largeur = width
def boundingRect(self):
dpts = self.dpoints
xM, xm, yM, ym = dpts[:,0].max(), dpts[:,0].min(), dpts[:,1].max(), dpts[:,1].min()
return xm,ym,xM,yM
@property
def gravitycenter(self):
u"""Le vrai centre de gravité de la surface (plaque plane)
délimitée par le polygone fermé."""
return self[0] if len(self)==1 else centreGravite(self.dpoints)
centregravite=gravitycenter
@property
def barycentre(self):
u"""Le centre de gravité du nuage de points matériels cpoints."""
return baryCentre(self.cpoints)
u"""methodes virtuelles pures"""
################################################################
def __call__(self, T, diff=0):
raise NotImplementedError(u"la methode() %s doit etre implemente"%(whoami(self)[:-2]))
def toDump(self):
raise NotImplementedError(u"la methode() %s doit etre implemente"%(whoami(self)[:-2]))
def copy(self):
u"""retourne une copie de self"""
raise NotImplementedError(u"la methode() %s doit etre implemente"%(whoami(self)[:-2]))
def save(self, filename):
filename = Path(filename)
ext = filename.ext
try :
if ext in (".gnu", '.txt'):
#seulement les points échantillonnés
savetxt(filename, self.epoints)
# elif ext==".pts":
# #seulement les points échantillonnés
# writeProfil(filename, self.epoints)
# elif ext=='.npkl':
#Spline complète, pickle
# cPickle.dump(self.toDump('new'),open(filename,'w'))
elif ext in ('.pkl','.npkl'):
#Spline complète, dict
cPickle.dump(self.toDump(),open(filename,'w'))
elif ext=='.spl':
#Spline complète, dict
with open(filename,'w') as f :
f.write(str(self.toDump()))
elif ext=='.dxf':
raise NotImplementedError('Format dxf')
debug('Sauvegarde %s : OK'%filename)
except Exception as msg:
rdebug('Sauvegarde %s impossible : %s'%(filename.name,str(msg)))
def open(self, filename):
u"""
"""
filename = Path(filename)
ext = filename.ext
#debug(filename=filename)
if ext in (".gnu", '.txt'):
# self.setDefaultValues()
# dump = self.toDump()
dump = {}
dump['cpoints'] = loadtxt(filename)
dump['name'] = filename.name
elif ext==".pts":
# self.setDefaultValues()
# dump = self.toDump()
dump['cpoints'] = LecteurUniversel(filename).points
dump['name'] = filename.name
elif ext in ('.pkl', '.npkl'):
dump = cPickle.load(open(filename,'r'))
for key in ('points', 'cpoints') :
if dump.has_key(key) :
dump[key] = pointsFrom(dump.pop(key))
elif ext in('.spl','.nspl') :
with open(filename, 'r') as f :
dump = eval(f.read())
if dump.has_key('dmodel') :
dump = dump.pop('dmodel')
self.load(dump)
def computeSpline(self, *args, **kargs):
u"""renseigne self.sx, self.sy"""
raise NotImplementedError(u"la methode() %s doit etre implemente"%(whoami(self)[:-2]))
def echantillonner(self, *args, **kargs):
raise NotImplementedError(u"la methode %s doit etre implemente"%(whoami(self)[:-2]))
def _update(self):
u'''
Est appelé à chaque modification
- (géométrique) d'un point de contrôle de la spline
- ou bien du PolygonF de base
- ou bien de methode spline (cubic, IUS, US,...), ou des dérivées aux extremites
- ou bien de mode d'échantillonage
ultra privée, ne pas l'appeler de l'extérieur.
Supprime et recalcule tout, en particulier les splines sx et sy
'''
raise NotImplementedError(u"la methode() %s doit etre implemente"%(whoami(self)[:-2]))
try : del self._qcpolygon
except AttributeError : pass
try : del self._qepolygon
except AttributeError : pass
try : del self._epoints
except AttributeError : pass
try : del self._dpoints
except AttributeError : pass
try : del self._knots
except AttributeError : pass
try : del self._height
except AttributeError : pass
try : del self._width
except AttributeError : pass
try : del self._longueur
except AttributeError : pass
def plot(self, *args, **kargs):
raise NotImplementedError(u"la methode() %s doit etre implemente"%(whoami(self)[:-2]))
def longueur(self, p='r'):
if p=='c':
return absCurv(self.cpoints, normalise=False)[-1]
elif p=='d' :
return absCurv(self.dpoints, normalise=False)[-1]
elif p=='e' :
return absCurv(self.epoints, normalise=False)[-1]
else:#longueur vraie
raise NotImplementedError(u"la methode %s('r') doit etre implemente"%(whoami(self)[:-2]))
if __name__=="__main__":
debug('Rien a faire')
| [
"numpy.sum",
"scipy.interpolate.CubicSpline",
"utilitaires.lecteurs.LecteurUniversel",
"numpy.ones",
"numpy.linalg.norm",
"scipy.interpolate.InterpolatedUnivariateSpline",
"scipy.interpolate.UnivariateSpline",
"numpy.savetxt",
"utilitaires.utilitairesdivers.baryCentre",
"numpy.loadtxt",
"numpy.l... | [((4470, 4557), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['phi'], {'bounds': '(0.0, 1.0)', 'method': '"""bounded"""', 'options': "{'xatol': 1e-09}"}), "(phi, bounds=(0.0, 1.0), method='bounded', options={'xatol':\n 1e-09})\n", (4485, 4557), False, 'from scipy.optimize import minimize_scalar\n'), ((10033, 10065), 'utilitaires.utilitairesdivers.absCurv', 'absCurv', (['cpoints'], {'normalise': '(True)'}), '(cpoints, normalise=True)\n', (10040, 10065), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((24795, 24816), 'utilitaires.utilitairesdivers.debug', 'debug', (['"""Rien a faire"""'], {}), "('Rien a faire')\n", (24800, 24816), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((20139, 20163), 'utilitaires.utilitairesdivers.baryCentre', 'baryCentre', (['self.cpoints'], {}), '(self.cpoints)\n', (20149, 20163), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((20723, 20737), 'utilitaires.utilitairesdivers.Path', 'Path', (['filename'], {}), '(filename)\n', (20727, 20737), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((21833, 21847), 'utilitaires.utilitairesdivers.Path', 'Path', (['filename'], {}), '(filename)\n', (21837, 21847), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((3125, 3142), 'scipy.integrate.quad', 'quad', (['phi', 't1', 't2'], {}), '(phi, t1, t2)\n', (3129, 3142), False, 'from scipy.integrate import quad\n'), ((9060, 9074), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (9068, 9074), True, 'import numpy as np\n'), ((10290, 10327), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['T', 'X'], {'bc_type': 'parametres'}), '(T, X, bc_type=parametres)\n', (10301, 10327), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((10345, 10382), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['T', 'Y'], {'bc_type': 'parametres'}), '(T, Y, bc_type=parametres)\n', (10356, 10382), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((10498, 10547), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['T', 'X'], {'bc_type': '((d0, vx0), (dn, vxn))'}), '(T, X, bc_type=((d0, vx0), (dn, vxn)))\n', (10509, 10547), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((10563, 10612), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['T', 'Y'], {'bc_type': '((d0, vy0), (dn, vyn))'}), '(T, Y, bc_type=((d0, vy0), (dn, vyn)))\n', (10574, 10612), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((10669, 10717), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['T', 'X'], {'k': 'parametres'}), '(T, X, k=parametres)\n', (10697, 10717), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((10788, 10836), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['T', 'Y'], {'k': 'parametres'}), '(T, Y, k=parametres)\n', (10816, 10836), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((19949, 19976), 'utilitaires.utilitairesdivers.centreGravite', 'centreGravite', (['self.dpoints'], {}), '(self.dpoints)\n', (19962, 19976), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((21611, 21649), 'utilitaires.utilitairesdivers.debug', 'debug', (["('Sauvegarde %s : OK' % filename)"], {}), "('Sauvegarde %s : OK' % filename)\n", (21616, 21649), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((22070, 22087), 'numpy.loadtxt', 'loadtxt', (['filename'], {}), '(filename)\n', (22077, 22087), False, 'from numpy import asarray as array, linspace, loadtxt, savetxt, sqrt\n'), ((9597, 9637), 'numpy.linalg.norm', 'np.linalg.norm', (['(cpoints[0] - cpoints[-1])'], {}), '(cpoints[0] - cpoints[-1])\n', (9611, 9637), True, 'import numpy as np\n'), ((9937, 9969), 'numpy.vstack', 'np.vstack', (['(cpoints, cpoints[0])'], {}), '((cpoints, cpoints[0]))\n', (9946, 9969), True, 'import numpy as np\n'), ((11162, 11172), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (11169, 11172), True, 'import numpy as np\n'), ((11497, 11512), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (11503, 11512), True, 'import numpy as np\n'), ((11587, 11640), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['T', 'X'], {'k': 'parametres', 'w': 'None', 's': '(1e-10)'}), '(T, X, k=parametres, w=None, s=1e-10)\n', (11603, 11640), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((11709, 11762), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['T', 'Y'], {'k': 'parametres', 'w': 'None', 's': '(1e-10)'}), '(T, Y, k=parametres, w=None, s=1e-10)\n', (11725, 11762), False, 'from scipy.interpolate import CubicSpline, InterpolatedUnivariateSpline, UnivariateSpline\n'), ((20888, 20919), 'numpy.savetxt', 'savetxt', (['filename', 'self.epoints'], {}), '(filename, self.epoints)\n', (20895, 20919), False, 'from numpy import asarray as array, linspace, loadtxt, savetxt, sqrt\n'), ((24422, 24460), 'utilitaires.utilitairesdivers.absCurv', 'absCurv', (['self.cpoints'], {'normalise': '(False)'}), '(self.cpoints, normalise=False)\n', (24429, 24460), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((9383, 9397), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (9391, 9397), True, 'import numpy as np\n'), ((12795, 12805), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (12802, 12805), True, 'import numpy as np\n'), ((13130, 13145), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (13136, 13145), True, 'import numpy as np\n'), ((13162, 13180), 'numpy.linspace', 'linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (13170, 13180), False, 'from numpy import asarray as array, linspace, loadtxt, savetxt, sqrt\n'), ((13192, 13246), 'scipy.interpolate.fitpack2.LSQUnivariateSpline', 'LSQUnivariateSpline', (['T', 'X', 'knots'], {'k': 'parametres', 'w': 'None'}), '(T, X, knots, k=parametres, w=None)\n', (13211, 13246), False, 'from scipy.interpolate.fitpack2 import LSQUnivariateSpline\n'), ((13313, 13367), 'scipy.interpolate.fitpack2.LSQUnivariateSpline', 'LSQUnivariateSpline', (['T', 'Y', 'knots'], {'k': 'parametres', 'w': 'None'}), '(T, Y, knots, k=parametres, w=None)\n', (13332, 13367), False, 'from scipy.interpolate.fitpack2 import LSQUnivariateSpline\n'), ((18573, 18585), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (18579, 18585), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((20377, 20389), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (20383, 20389), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((20495, 20507), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (20501, 20507), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((20653, 20665), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (20659, 20665), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((22258, 22284), 'utilitaires.lecteurs.LecteurUniversel', 'LecteurUniversel', (['filename'], {}), '(filename)\n', (22274, 22284), False, 'from utilitaires.lecteurs import pointsFrom, LecteurUniversel\n'), ((22965, 22977), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (22971, 22977), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((23105, 23117), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (23111, 23117), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((23647, 23659), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (23653, 23659), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((24332, 24344), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (24338, 24344), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((24506, 24544), 'utilitaires.utilitairesdivers.absCurv', 'absCurv', (['self.dpoints'], {'normalise': '(False)'}), '(self.dpoints, normalise=False)\n', (24513, 24544), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((24590, 24628), 'utilitaires.utilitairesdivers.absCurv', 'absCurv', (['self.epoints'], {'normalise': '(False)'}), '(self.epoints, normalise=False)\n', (24597, 24628), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n'), ((24744, 24756), 'utilitaires.utilitairesdivers.whoami', 'whoami', (['self'], {}), '(self)\n', (24750, 24756), False, 'from utilitaires.utilitairesdivers import Path, whoami, debug, rdebug, absCurv, baryCentre, centreGravite\n')] |
# ==BEGIN LICENSE==
#
# MIT License
#
# Copyright (c) 2018 SRI Lab, ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==END LICENSE==
import unittest
import tensorflow as tf
import numpy as np
from dpfinder.utils.tf.tf_wrapper import TensorFlowWrapper
class TestTensorFlowWrapper(unittest.TestCase):
shape = (2,)
x_init = np.ones(shape)
def build_graph(self):
self.x = tf.get_variable('x', shape=self.shape)
self.res = tf.reduce_sum(self.x)
return self.res
def test_full(self):
t = TensorFlowWrapper('test')
t.build_fresh_graph('res', self.build_graph)
t.initialize({self.x: self.x_init})
res = t.run(self.res)
self.assertEqual(res, 2.0)
return t
def test_optimize(self):
t = self.test_full()
o = t.get_optimizer(self.res, 20, {self.x: (-1.0, 1.0)}, [])
t.minimize(o)
best = t.run(self.res)
self.assertAlmostEqual(best, -2)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"dpfinder.utils.tf.tf_wrapper.TensorFlowWrapper",
"tensorflow.reduce_sum",
"numpy.ones",
"tensorflow.get_variable"
] | [((1358, 1372), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1365, 1372), True, 'import numpy as np\n'), ((1924, 1939), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1937, 1939), False, 'import unittest\n'), ((1409, 1447), 'tensorflow.get_variable', 'tf.get_variable', (['"""x"""'], {'shape': 'self.shape'}), "('x', shape=self.shape)\n", (1424, 1447), True, 'import tensorflow as tf\n'), ((1461, 1482), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.x'], {}), '(self.x)\n', (1474, 1482), True, 'import tensorflow as tf\n'), ((1530, 1555), 'dpfinder.utils.tf.tf_wrapper.TensorFlowWrapper', 'TensorFlowWrapper', (['"""test"""'], {}), "('test')\n", (1547, 1555), False, 'from dpfinder.utils.tf.tf_wrapper import TensorFlowWrapper\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 18:03:37 2021
@author: <NAME>
"""
import numpy as np
#import scipy.sparse.linalg
from shapely.geometry import Point, Polygon
"""
Caracteristicas basicas da malha
"""
fileName = "fechadoCorpoExt.msh"
#elementSizeFactor = 0.09
import malhaModulo
malha = malhaModulo.Malha(fileName)
X = malha.X
Y = malha.Y
Lx = malha.Lx
Ly = malha.Ly
#nx = malha.nx
#ny = malha.ny
IEN = malha.IEN
IENBound = malha.IENBound
cc = malha.cc
ne = malha.ne
npoints = malha.npoints
"""
Pontos de contorno e criação da bval
"""
bval = np.zeros(npoints,dtype = 'float')
vx = np.zeros(npoints,dtype = 'float')
vy = np.zeros(npoints,dtype = 'float')
#boca
pontos_boca = 10
pboca = 18
yo = Y[pboca]
phi_o = 0
bval[pboca] = phi_o
uboca = 0.1
boca = []
boca.append(pboca)
for i in range (pontos_boca-1):
bval[pboca-1] = phi_o + uboca*(Y[pboca-1]-yo)
vx[pboca-1] = uboca
boca.append(pboca-1)
pboca-=1
#cantos da malha
bval[0] = 0
bval[1] = 0
bval[2] = bval[boca[-1]]
bval[3] = bval[boca[-1]]
#contorno direito
ignore = []
ndir_inic = 50
ndir_fin = 76
i = ndir_inic
while i <= ndir_fin:
ignore.append(i)
i+=1
#contorno inferior
ninf_inic = 35
ninf_fin = 49
i = ninf_inic
inferior=[]
while i <= ninf_fin:
bval[i] = 0
inferior.append(i)
i += 1
#contorno superior
nsup_inic = 77
nsup_fin = 92
i = nsup_inic
superior=[]
while i <= nsup_fin:
bval[i] = bval[boca[-1]]
superior.append(i)
i += 1
#nos excedentes
exc = [[4,34],[93,113]]
exc_lista=[]
for i in range (len(exc)):
ninic = exc[i][0]
nfin = exc[i][1]
j = ninic
while j <= nfin:
if j not in boca:
bval[j] = 0
exc_lista.append(j)
j += 1
#nos acima da boca e abaixo da parte superior (4 a 10, 93 a 99)
entre = [[4,10],[93,99]]
for i in range (len(entre)):
ninic = entre[i][0]
nfin = entre[i][1]
j = ninic
while j <= nfin:
bval[j] = bval[boca[-1]]
j += 1
"""
Matrizes Globais (K, M, GX e GY)
"""
matrizesGlobais = malhaModulo.MatrizesGlobais(fileName)
K = matrizesGlobais.K
M = matrizesGlobais.M
GX = matrizesGlobais.GX
GY = matrizesGlobais.GY
"""
Parametros utilizados
"""
u_real = 50
U = u_real/uboca
x_real = 2.5
L = x_real/Ly
vo = 1.66*10**(-5)
rho_ar = 1.14
mi_ar = 1.90*10**(-5)
D_ar = 110*10**(-6)
rho_w = 993.51
tau_v = (rho_w*(D_ar**2))/(18*mi_ar)
g = 9.81
dt = 0.01
Re =(U*L)/vo
iteracoes = 10
#partida da goticula
xg = 0.07205
yg = 1.6291
vxg = u_real
vyg = 0
dt_real = dt*L/U
"""
Solucao utilizando eq de transporte e de funcao corrente
"""
#matriz identidade
ident = np.identity(npoints)
xg_lista = []
yg_lista = []
for q in range (iteracoes):
#Condicao de Contorno de Vorticidade
B1 = np.dot(GX,vy) - np.dot(GY,vx)
wz = np.linalg.solve(M,B1)
#Matriz de estabilizacao
matrizesGlobais.construirMatrizKest(vx,vy,dt)
Kest = matrizesGlobais.Kest
#Solucao da equacao de transporte
vx_id = ident*vx
vy_id = ident*vy
A2 = M/dt
B2 = np.dot((M/dt - np.dot(vx_id,GX) - np.dot(vy_id,GY) - K/Re),wz) - np.dot(Kest,wz)
for i in cc:
A2[i,:] = 0.0
B2[i] = wz[i]
A2[i,i] = 1.0
wz = np.linalg.solve(A2,B2)
#Reinicializacao da matriz de estabilizacao
matrizesGlobais.Kest = np.zeros( (npoints,npoints), dtype='double')
#Solucao da equacao de funcao corrente
A3 = K
B3 = np.dot(M,wz)
for i in cc:
if i not in ignore:
A3[i,:] = 0.0
B3[i] = bval[i]
A3[i,i] = 1.0
Psi = np.linalg.solve(A3,B3)
#Atualizacao de vx e vy
#vx
A4 = M
B4 = np.dot(GY,Psi)
vx = np.linalg.solve(A4,B4)
#vy
A5 = M
B5 = np.dot(-GX,Psi)
vy = np.linalg.solve(A5,B5)
#Imposicao das cc de vx e vy
vx[0] = 0
vx[1] = 0
vx[2] = 0
vx[3] = 0
vy[0] = 0
vy[1] = 0
vy[2] = 0
vy[3] = 0
for i in ignore:
#contorno direito
vy[i] = 0
#vx[i] = ...
for i in superior:
#contorno superior
vx[i] = 0
vy[i] = 0
for i in inferior:
#contorno inferior
vx[i] = 0
vy[i] = 0
for i in boca:
#contorno da boca
vx[i] = uboca
vy[i] = 0
for i in exc_lista:
#nos excedentes
vx[i] = 0
vy[i] = 0
#primeiro e ultimo no da boca
#vx[boca[0]] = 0
#vx[boca[-1]] = 0
#goticula
p = Point(xg,yg)
for e in range(0,ne):
v1 = IEN[e,0]
v2 = IEN[e,1]
v3 = IEN[e,2]
coords = [(X[v1],Y[v1]),(X[v2],Y[v2]),(X[v3],Y[v3])]
poly = Polygon(coords)
if p.within(poly):
d1 = np.sqrt((xg - X[v1])**2 + (yg - Y[v1])**2)
d2 = np.sqrt((xg - X[v2])**2 + (yg - Y[v2])**2)
d3 = np.sqrt((xg - X[v3])**2 + (yg - Y[v3])**2)
p1 = 1/d1
p2 = 1/d2
p3 = 1/d3
vx_ar = U*(vx[v1]*p1 + vx[v2]*p2 + vx[v3]*p3)/(p1+p2+p3)
vy_ar = U*(vy[v1]*p1 + vy[v2]*p2 + vy[v3]*p3)/(p1+p2+p3)
v_ar = np.sqrt(vx_ar**2 + vy_ar**2)
v_gota = np.sqrt(vxg**2 + vyg**2)
Re_r = (rho_ar*abs(v_ar - v_gota)*D_ar)/mi_ar
if Re_r < 1000:
f = 1 + (Re_r**(2/3))/6
else:
f = 0.0183*Re_r
vxg += (f*dt_real)/tau_v * (vx_ar - vxg)
vyg += (f*dt_real)/tau_v * (vy_ar - vyg) - g*dt_real
xg += vxg*dt_real
yg += vyg*dt_real
xg_lista.append(xg)
yg_lista.append(yg)
break
"""O que voce quer plotar?"""
#--Funcao corrente --> Psi
#--Vorticidade --> wz
#--Velocidade em x --> U*vx
#--Velocidade em y --> U*vy
tempoFinal = str(np.round(len(xg_lista)*dt_real,4))
objetoPlot = Psi
tituloPlot = "Função corrente de t=0 até t=" + tempoFinal + "s"
salvarPlot = False
arquivoPlot = "fechadoCorpoExtCorrente.png"
malha.plotar(objetoPlot,tituloPlot)
"""Plot da goticula"""
D = str(np.round(D_ar*10**6,2))
tituloPlot = "Trajetória da gotícula (D="+ D +" mícrons), t=0 até t="+ tempoFinal+"s"
salvarPlot = False
arquivoPlot = "fechadoCorpoExtGoticula.png"
#malha.plotarGoticula(tituloPlot,xg_lista,yg_lista)
| [
"shapely.geometry.Point",
"shapely.geometry.Polygon",
"malhaModulo.Malha",
"numpy.round",
"numpy.zeros",
"numpy.identity",
"malhaModulo.MatrizesGlobais",
"numpy.dot",
"numpy.linalg.solve",
"numpy.sqrt"
] | [((309, 336), 'malhaModulo.Malha', 'malhaModulo.Malha', (['fileName'], {}), '(fileName)\n', (326, 336), False, 'import malhaModulo\n'), ((568, 600), 'numpy.zeros', 'np.zeros', (['npoints'], {'dtype': '"""float"""'}), "(npoints, dtype='float')\n", (576, 600), True, 'import numpy as np\n'), ((607, 639), 'numpy.zeros', 'np.zeros', (['npoints'], {'dtype': '"""float"""'}), "(npoints, dtype='float')\n", (615, 639), True, 'import numpy as np\n'), ((646, 678), 'numpy.zeros', 'np.zeros', (['npoints'], {'dtype': '"""float"""'}), "(npoints, dtype='float')\n", (654, 678), True, 'import numpy as np\n'), ((2043, 2080), 'malhaModulo.MatrizesGlobais', 'malhaModulo.MatrizesGlobais', (['fileName'], {}), '(fileName)\n', (2070, 2080), False, 'import malhaModulo\n'), ((2614, 2634), 'numpy.identity', 'np.identity', (['npoints'], {}), '(npoints)\n', (2625, 2634), True, 'import numpy as np\n'), ((2782, 2804), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'B1'], {}), '(M, B1)\n', (2797, 2804), True, 'import numpy as np\n'), ((3213, 3236), 'numpy.linalg.solve', 'np.linalg.solve', (['A2', 'B2'], {}), '(A2, B2)\n', (3228, 3236), True, 'import numpy as np\n'), ((3312, 3356), 'numpy.zeros', 'np.zeros', (['(npoints, npoints)'], {'dtype': '"""double"""'}), "((npoints, npoints), dtype='double')\n", (3320, 3356), True, 'import numpy as np\n'), ((3425, 3438), 'numpy.dot', 'np.dot', (['M', 'wz'], {}), '(M, wz)\n', (3431, 3438), True, 'import numpy as np\n'), ((3594, 3617), 'numpy.linalg.solve', 'np.linalg.solve', (['A3', 'B3'], {}), '(A3, B3)\n', (3609, 3617), True, 'import numpy as np\n'), ((3678, 3693), 'numpy.dot', 'np.dot', (['GY', 'Psi'], {}), '(GY, Psi)\n', (3684, 3693), True, 'import numpy as np\n'), ((3702, 3725), 'numpy.linalg.solve', 'np.linalg.solve', (['A4', 'B4'], {}), '(A4, B4)\n', (3717, 3725), True, 'import numpy as np\n'), ((3753, 3769), 'numpy.dot', 'np.dot', (['(-GX)', 'Psi'], {}), '(-GX, Psi)\n', (3759, 3769), True, 'import numpy as np\n'), ((3778, 3801), 'numpy.linalg.solve', 'np.linalg.solve', (['A5', 'B5'], {}), '(A5, B5)\n', (3793, 3801), True, 'import numpy as np\n'), ((4514, 4527), 'shapely.geometry.Point', 'Point', (['xg', 'yg'], {}), '(xg, yg)\n', (4519, 4527), False, 'from shapely.geometry import Point, Polygon\n'), ((6171, 6198), 'numpy.round', 'np.round', (['(D_ar * 10 ** 6)', '(2)'], {}), '(D_ar * 10 ** 6, 2)\n', (6179, 6198), True, 'import numpy as np\n'), ((2743, 2757), 'numpy.dot', 'np.dot', (['GX', 'vy'], {}), '(GX, vy)\n', (2749, 2757), True, 'import numpy as np\n'), ((2759, 2773), 'numpy.dot', 'np.dot', (['GY', 'vx'], {}), '(GY, vx)\n', (2765, 2773), True, 'import numpy as np\n'), ((3103, 3119), 'numpy.dot', 'np.dot', (['Kest', 'wz'], {}), '(Kest, wz)\n', (3109, 3119), True, 'import numpy as np\n'), ((4708, 4723), 'shapely.geometry.Polygon', 'Polygon', (['coords'], {}), '(coords)\n', (4715, 4723), False, 'from shapely.geometry import Point, Polygon\n'), ((4768, 4814), 'numpy.sqrt', 'np.sqrt', (['((xg - X[v1]) ** 2 + (yg - Y[v1]) ** 2)'], {}), '((xg - X[v1]) ** 2 + (yg - Y[v1]) ** 2)\n', (4775, 4814), True, 'import numpy as np\n'), ((4828, 4874), 'numpy.sqrt', 'np.sqrt', (['((xg - X[v2]) ** 2 + (yg - Y[v2]) ** 2)'], {}), '((xg - X[v2]) ** 2 + (yg - Y[v2]) ** 2)\n', (4835, 4874), True, 'import numpy as np\n'), ((4888, 4934), 'numpy.sqrt', 'np.sqrt', (['((xg - X[v3]) ** 2 + (yg - Y[v3]) ** 2)'], {}), '((xg - X[v3]) ** 2 + (yg - Y[v3]) ** 2)\n', (4895, 4934), True, 'import numpy as np\n'), ((5193, 5225), 'numpy.sqrt', 'np.sqrt', (['(vx_ar ** 2 + vy_ar ** 2)'], {}), '(vx_ar ** 2 + vy_ar ** 2)\n', (5200, 5225), True, 'import numpy as np\n'), ((5243, 5271), 'numpy.sqrt', 'np.sqrt', (['(vxg ** 2 + vyg ** 2)'], {}), '(vxg ** 2 + vyg ** 2)\n', (5250, 5271), True, 'import numpy as np\n'), ((3072, 3089), 'numpy.dot', 'np.dot', (['vy_id', 'GY'], {}), '(vy_id, GY)\n', (3078, 3089), True, 'import numpy as np\n'), ((3053, 3070), 'numpy.dot', 'np.dot', (['vx_id', 'GX'], {}), '(vx_id, GX)\n', (3059, 3070), True, 'import numpy as np\n')] |
from mesh import Mesh2D
import unittest
import numpy as np
class TestMesh2D(unittest.TestCase):
"""
Test Class Mesh2D
"""
def test_constructor(self):
#
# Rectangular Mesh
#
mesh = Mesh2D(resolution=(3,3))
self.assertTrue(mesh.is_rectangular())
self.assertFalse(mesh.is_periodic())
self.assertTrue(mesh.is_quadmesh())
#
# Periodic in x-direction
#
mesh = Mesh2D(resolution=(1,1), periodic={0})
self.assertTrue(mesh.is_periodic())
self.assertTrue(mesh.is_periodic({0}))
self.assertFalse(mesh.is_periodic({0,1}))
#
# Periodic in both directions
#
mesh = Mesh2D(resolution=(1,1), periodic={0,1})
self.assertTrue(mesh.is_periodic())
self.assertTrue(mesh.is_periodic({0}))
self.assertTrue(mesh.is_periodic({0,1}))
#
# From Gmsh
#
mesh_folder = '/home/hans-werner/git/quadmesh/tests/test_mesh/'
mesh = Mesh2D(file_path=mesh_folder+'quarter_circle_triangle.msh')
self.assertFalse(mesh.is_periodic())
self.assertFalse(mesh.is_quadmesh())
#
# QuadMesh
#
mesh = Mesh2D(file_path=mesh_folder+'quarter_circle_quad.msh')
self.assertTrue(mesh.is_quadmesh())
self.assertFalse(mesh.is_rectangular())
def test_half_edge_has_cell(self):
#
# Check that every half-edge has a cell
#
mesh = Mesh2D(resolution=(2,2), periodic={0})
for cell in mesh.cells.get_children():
for half_edge in cell.get_half_edges():
self.assertIsNotNone(half_edge.cell())
for half_edge in mesh.half_edges.get_children():
self.assertIsNotNone(half_edge.cell())
def test_periodic_pairing(self):
#
# Periodic in x-direction
#
mesh = Mesh2D(resolution=(2,2), periodic={0})
for he in mesh.half_edges.get_children():
if he.is_periodic():
nbr = he.twin().cell()
for v in [he.base(), he.head()]:
self.assertTrue(v.is_periodic())
for v_nbr in v.get_periodic_pair(nbr):
v1 = v_nbr.get_periodic_pair(he.cell())
self.assertEqual(v,v1[0])
#
# Periodic in x and y directions
#
mesh = Mesh2D(resolution=(2,2), periodic={0,1})
c00 = mesh.cells.get_child(0)
v00 = c00.get_vertex(0)
c10 = mesh.cells.get_child(1)
v10 = c10.get_vertex(1)
c01 = mesh.cells.get_child(2)
v01 = c01.get_vertex(3)
c11 = mesh.cells.get_child(3)
v11 = c11.get_vertex(2)
# Check v00 has 4 periodic pairs
self.assertEqual(len(v00.get_periodic_pair()),4)
# Check periodic paired vertices within each subcell
self.assertEqual(v00.get_periodic_pair(c00)[0], v00)
self.assertEqual(v00.get_periodic_pair(c10)[0], v10)
self.assertEqual(v00.get_periodic_pair(c01)[0], v01)
self.assertEqual(v00.get_periodic_pair(c11)[0], v11)
def test_locate_point(self):
mesh_folder = '/home/hans-werner/git/quadmesh/tests/test_mesh/'
mesh = Mesh2D(file_path=mesh_folder+'quarter_circle_triangle.msh')
point = (0.25,0.25)
cell = mesh.locate_point(point)
self.assertTrue(cell.contains_points(point))
#mesh.cells[0].mark(1)
#self.assertIsNone(mesh.locate_point(point, flag=1))
def test_get_boundary_segments(self):
"""
In each case, get the boundary segments and check that
(i) The twins of all half_edges are None
(ii) The halfedges are in order
"""
mesh_folder = '/home/hans-werner/git/quadmesh/tests/test_mesh/'
#
# Define Meshes
#
mesh_1 = Mesh2D(resolution=(2,2))
mesh_2 = Mesh2D(resolution=(2,2), periodic={0})
mesh_3 = Mesh2D(resolution=(2,2), periodic={1})
mesh_4 = Mesh2D(resolution=(2,2), periodic={0,1})
mesh_5 = Mesh2D(file_path=mesh_folder+'quarter_circle_triangle.msh')
mesh_6 = Mesh2D(file_path=mesh_folder+'quarter_circle_quad.msh')
meshes = [mesh_1, mesh_2, mesh_3, mesh_4, mesh_5, mesh_6]
for mesh in meshes:
# Check boundary
bnd_segments = mesh.get_boundary_segments()
for segment in bnd_segments:
he_current = segment[0]
for i in np.arange(1,len(segment)):
he_next = segment[i]
self.assertEqual(he_current.head(), he_next.base())
self.assertIsNone(he_current.twin())
self.assertIsNone(he_next.twin())
he_current = he_next
def test_mark_region(self):
"""
Test region marker
"""
mesh = Mesh2D(resolution=(2,2))
flag = '1'
tol = 1e-9
# Left boundary
f = lambda x,dummy: np.abs(x)<tol
# Mark half-edges
mesh.mark_region(flag, f, entity_type='half_edge', on_boundary=True)
# Check:
for segment in mesh.get_boundary_segments():
for he in segment:
marked = True
for v in he.get_vertices():
x, y = v.coordinates()
if not f(x,y):
marked = False
break
self.assertEqual(he.is_marked(flag), marked)
# Top vertices
flag = '2'
g = lambda dummy,y: np.abs(y-1)<tol
# Mark vertices
mesh.mark_region(flag, g, entity_type='vertex', on_boundary=True)
for v in mesh.get_boundary_vertices():
x,y = v.coordinates()
if g(x,y):
self.assertTrue(v.is_marked(flag))
| [
"numpy.abs",
"mesh.Mesh2D"
] | [((229, 254), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(3, 3)'}), '(resolution=(3, 3))\n', (235, 254), False, 'from mesh import Mesh2D\n'), ((469, 508), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(1, 1)', 'periodic': '{0}'}), '(resolution=(1, 1), periodic={0})\n', (475, 508), False, 'from mesh import Mesh2D\n'), ((732, 774), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(1, 1)', 'periodic': '{0, 1}'}), '(resolution=(1, 1), periodic={0, 1})\n', (738, 774), False, 'from mesh import Mesh2D\n'), ((1051, 1112), 'mesh.Mesh2D', 'Mesh2D', ([], {'file_path': "(mesh_folder + 'quarter_circle_triangle.msh')"}), "(file_path=mesh_folder + 'quarter_circle_triangle.msh')\n", (1057, 1112), False, 'from mesh import Mesh2D\n'), ((1265, 1322), 'mesh.Mesh2D', 'Mesh2D', ([], {'file_path': "(mesh_folder + 'quarter_circle_quad.msh')"}), "(file_path=mesh_folder + 'quarter_circle_quad.msh')\n", (1271, 1322), False, 'from mesh import Mesh2D\n'), ((1546, 1585), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)', 'periodic': '{0}'}), '(resolution=(2, 2), periodic={0})\n', (1552, 1585), False, 'from mesh import Mesh2D\n'), ((1997, 2036), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)', 'periodic': '{0}'}), '(resolution=(2, 2), periodic={0})\n', (2003, 2036), False, 'from mesh import Mesh2D\n'), ((2519, 2561), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)', 'periodic': '{0, 1}'}), '(resolution=(2, 2), periodic={0, 1})\n', (2525, 2561), False, 'from mesh import Mesh2D\n'), ((3412, 3473), 'mesh.Mesh2D', 'Mesh2D', ([], {'file_path': "(mesh_folder + 'quarter_circle_triangle.msh')"}), "(file_path=mesh_folder + 'quarter_circle_triangle.msh')\n", (3418, 3473), False, 'from mesh import Mesh2D\n'), ((4058, 4083), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)'}), '(resolution=(2, 2))\n', (4064, 4083), False, 'from mesh import Mesh2D\n'), ((4100, 4139), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)', 'periodic': '{0}'}), '(resolution=(2, 2), periodic={0})\n', (4106, 4139), False, 'from mesh import Mesh2D\n'), ((4156, 4195), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)', 'periodic': '{1}'}), '(resolution=(2, 2), periodic={1})\n', (4162, 4195), False, 'from mesh import Mesh2D\n'), ((4212, 4254), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)', 'periodic': '{0, 1}'}), '(resolution=(2, 2), periodic={0, 1})\n', (4218, 4254), False, 'from mesh import Mesh2D\n'), ((4270, 4331), 'mesh.Mesh2D', 'Mesh2D', ([], {'file_path': "(mesh_folder + 'quarter_circle_triangle.msh')"}), "(file_path=mesh_folder + 'quarter_circle_triangle.msh')\n", (4276, 4331), False, 'from mesh import Mesh2D\n'), ((4347, 4404), 'mesh.Mesh2D', 'Mesh2D', ([], {'file_path': "(mesh_folder + 'quarter_circle_quad.msh')"}), "(file_path=mesh_folder + 'quarter_circle_quad.msh')\n", (4353, 4404), False, 'from mesh import Mesh2D\n'), ((5105, 5130), 'mesh.Mesh2D', 'Mesh2D', ([], {'resolution': '(2, 2)'}), '(resolution=(2, 2))\n', (5111, 5130), False, 'from mesh import Mesh2D\n'), ((5229, 5238), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (5235, 5238), True, 'import numpy as np\n'), ((5834, 5847), 'numpy.abs', 'np.abs', (['(y - 1)'], {}), '(y - 1)\n', (5840, 5847), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
import yaml
import numpy as np
import numpy.random as npr
import pdb
from ..utils.cython_bbox import bbox_overlaps, bbox_intersections
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..fast_rcnn.bbox_transform import bbox_transform
# <<<< obsolete
DEBUG = False
def proposal_target_layer(rpn_rois, gt_boxes, _num_classes):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
Parameters
----------
rpn_rois: (1 x H x W x A, 5) [0, x1, y1, x2, y2]
gt_boxes: (G, 5) [x1 ,y1 ,x2, y2, class] int
gt_ishard: (G, 1) {0 | 1} 1 indicates hard
dontcare_areas: (D, 4) [ x1, y1, x2, y2]
_num_classes
----------
Returns
----------
rois: (1 x H x W x A, 5) [0, x1, y1, x2, y2]
labels: (1 x H x W x A, 1) {0,1,...,_num_classes-1}
bbox_targets: (1 x H x W x A, K x4) [dx1, dy1, dx2, dy2]
bbox_inside_weights: (1 x H x W x A, Kx4) 0, 1 masks for the computing loss
bbox_outside_weights: (1 x H x W x A, Kx4) 0, 1 masks for the computing loss
"""
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = rpn_rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
num_images = 1
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
_batch_rois = rois_per_image
# Sample rois with classification labels and bounding box regression
# targets
rois, labels, bbox_targets, bbox_inside_weights, layer_indexes = _sample_rois(
all_rois, gt_boxes, fg_rois_per_image, rois_per_image, _num_classes, sample_type='fpn')
labels_all = []
bbox_targets_all = []
bbox_weights_all = []
rois_all =[]
for i in range(4):
index = (layer_indexes == (i + 2))
num_index = sum(index)
if num_index == 0:
rois_ = np.zeros((1*4, 5), dtype=rois.dtype)
labels_ = np.ones((1*4, ), dtype=labels.dtype)*0
bbox_targets_ = np.zeros((1*4, _num_classes * 4), dtype=bbox_targets.dtype)
bbox_weights_ = np.zeros((1*4, _num_classes * 4), dtype=bbox_inside_weights.dtype)
else:
rois_ = rois[index, :]
labels_ = labels[index]
bbox_weights_= bbox_inside_weights[index, :]
bbox_targets_ = bbox_targets[index, :]
rois_all.append(rois_)
labels_all.append(labels_)
bbox_targets_all.append(bbox_targets_)
bbox_weights_all.append(bbox_weights_)
labels_all = np.concatenate(labels_all)
bbox_targets_all = np.concatenate(bbox_targets_all,axis= 0)
bbox_weights_all = np.concatenate(bbox_weights_all,axis= 0)
#print bbox_targets_all[bbox_targets_all>0]
bbox_outside_weights = np.array(bbox_weights_all>0).astype(np.float32)
return rois_all, labels_all, bbox_targets_all, bbox_weights_all, bbox_outside_weights
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes,
sample_type='', k0=4):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: R x G
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1) # R
max_overlaps = overlaps.max(axis=1) # R
labels = gt_boxes[gt_assignment, 4]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
fg_rois_per_this_image = int(fg_rois_per_this_image)
# Sample foreground regions without replacement
if fg_inds.size > 0:
for i in range(0,len(fg_inds)):
fg_inds[i] = int(fg_inds[i])
fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
bg_rois_per_this_image = int(bg_rois_per_this_image)
# Sample background regions without replacement
if bg_inds.size > 0:
for i in range(0,len(bg_inds)):
bg_inds[i] = int(bg_inds[i])
bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = _compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)
# bbox_target_data (1 x H x W x A, 5)
# bbox_targets <- (1 x H x W x A, K x 4)
# bbox_inside_weights <- (1 x H x W x A, K x 4)
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(bbox_target_data, num_classes)
layer_index = []
if sample_type == 'fpn':
w = (rois[:,3]-rois[:,1])
h = (rois[:,4]-rois[:,2])
s = w * h
s[s<=0]=1e-6
layer_index = np.floor(k0+np.log2(np.sqrt(s)/224))
layer_index[layer_index<2]=2
layer_index[layer_index>5]=5
#print 1
return rois, labels, bbox_targets, bbox_inside_weights, layer_index #rois:[512,5] labels:[512,]
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
start = int(start)
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _jitter_gt_boxes(gt_boxes, jitter=0.05):
""" jitter the gtboxes, before adding them into rois, to be more robust for cls and rgs
gt_boxes: (G, 5) [x1 ,y1 ,x2, y2, class] int
"""
jittered_boxes = gt_boxes.copy()
ws = jittered_boxes[:, 2] - jittered_boxes[:, 0] + 1.0
hs = jittered_boxes[:, 3] - jittered_boxes[:, 1] + 1.0
width_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * ws
height_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * hs
jittered_boxes[:, 0] += width_offset
jittered_boxes[:, 2] += width_offset
jittered_boxes[:, 1] += height_offset
jittered_boxes[:, 3] += height_offset
return jittered_boxes
| [
"numpy.ascontiguousarray",
"numpy.zeros",
"numpy.all",
"numpy.ones",
"numpy.hstack",
"numpy.append",
"numpy.where",
"numpy.array",
"numpy.random.rand",
"numpy.round",
"numpy.concatenate",
"numpy.sqrt"
] | [((1610, 1664), 'numpy.zeros', 'np.zeros', (['(gt_boxes.shape[0], 1)'], {'dtype': 'gt_boxes.dtype'}), '((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)\n', (1618, 1664), True, 'import numpy as np\n'), ((1804, 1831), 'numpy.all', 'np.all', (['(all_rois[:, 0] == 0)'], {}), '(all_rois[:, 0] == 0)\n', (1810, 1831), True, 'import numpy as np\n'), ((1983, 2031), 'numpy.round', 'np.round', (['(cfg.TRAIN.FG_FRACTION * rois_per_image)'], {}), '(cfg.TRAIN.FG_FRACTION * rois_per_image)\n', (1991, 2031), True, 'import numpy as np\n'), ((3223, 3249), 'numpy.concatenate', 'np.concatenate', (['labels_all'], {}), '(labels_all)\n', (3237, 3249), True, 'import numpy as np\n'), ((3273, 3313), 'numpy.concatenate', 'np.concatenate', (['bbox_targets_all'], {'axis': '(0)'}), '(bbox_targets_all, axis=0)\n', (3287, 3313), True, 'import numpy as np\n'), ((3337, 3377), 'numpy.concatenate', 'np.concatenate', (['bbox_weights_all'], {'axis': '(0)'}), '(bbox_weights_all, axis=0)\n', (3351, 3377), True, 'import numpy as np\n'), ((5557, 5584), 'numpy.append', 'np.append', (['fg_inds', 'bg_inds'], {}), '(fg_inds, bg_inds)\n', (5566, 5584), True, 'import numpy as np\n'), ((7077, 7133), 'numpy.zeros', 'np.zeros', (['(clss.size, 4 * num_classes)'], {'dtype': 'np.float32'}), '((clss.size, 4 * num_classes), dtype=np.float32)\n', (7085, 7133), True, 'import numpy as np\n'), ((7160, 7206), 'numpy.zeros', 'np.zeros', (['bbox_targets.shape'], {'dtype': 'np.float32'}), '(bbox_targets.shape, dtype=np.float32)\n', (7168, 7206), True, 'import numpy as np\n'), ((3868, 3922), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['all_rois[:, 1:5]'], {'dtype': 'np.float'}), '(all_rois[:, 1:5], dtype=np.float)\n', (3888, 3922), True, 'import numpy as np\n'), ((3932, 3985), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['gt_boxes[:, :4]'], {'dtype': 'np.float'}), '(gt_boxes[:, :4], dtype=np.float)\n', (3952, 3985), True, 'import numpy as np\n'), ((4199, 4244), 'numpy.where', 'np.where', (['(max_overlaps >= cfg.TRAIN.FG_THRESH)'], {}), '(max_overlaps >= cfg.TRAIN.FG_THRESH)\n', (4207, 4244), True, 'import numpy as np\n'), ((4803, 4900), 'numpy.where', 'np.where', (['((max_overlaps < cfg.TRAIN.BG_THRESH_HI) & (max_overlaps >= cfg.TRAIN.\n BG_THRESH_LO))'], {}), '((max_overlaps < cfg.TRAIN.BG_THRESH_HI) & (max_overlaps >= cfg.\n TRAIN.BG_THRESH_LO))\n', (4811, 4900), True, 'import numpy as np\n'), ((7218, 7236), 'numpy.where', 'np.where', (['(clss > 0)'], {}), '(clss > 0)\n', (7226, 7236), True, 'import numpy as np\n'), ((1710, 1746), 'numpy.hstack', 'np.hstack', (['(zeros, gt_boxes[:, :-1])'], {}), '((zeros, gt_boxes[:, :-1]))\n', (1719, 1746), True, 'import numpy as np\n'), ((2570, 2608), 'numpy.zeros', 'np.zeros', (['(1 * 4, 5)'], {'dtype': 'rois.dtype'}), '((1 * 4, 5), dtype=rois.dtype)\n', (2578, 2608), True, 'import numpy as np\n'), ((2696, 2757), 'numpy.zeros', 'np.zeros', (['(1 * 4, _num_classes * 4)'], {'dtype': 'bbox_targets.dtype'}), '((1 * 4, _num_classes * 4), dtype=bbox_targets.dtype)\n', (2704, 2757), True, 'import numpy as np\n'), ((2784, 2852), 'numpy.zeros', 'np.zeros', (['(1 * 4, _num_classes * 4)'], {'dtype': 'bbox_inside_weights.dtype'}), '((1 * 4, _num_classes * 4), dtype=bbox_inside_weights.dtype)\n', (2792, 2852), True, 'import numpy as np\n'), ((3453, 3483), 'numpy.array', 'np.array', (['(bbox_weights_all > 0)'], {}), '(bbox_weights_all > 0)\n', (3461, 3483), True, 'import numpy as np\n'), ((8045, 8084), 'numpy.array', 'np.array', (['cfg.TRAIN.BBOX_NORMALIZE_STDS'], {}), '(cfg.TRAIN.BBOX_NORMALIZE_STDS)\n', (8053, 8084), True, 'import numpy as np\n'), ((8097, 8140), 'numpy.hstack', 'np.hstack', (['(labels[:, np.newaxis], targets)'], {}), '((labels[:, np.newaxis], targets))\n', (8106, 8140), True, 'import numpy as np\n'), ((2629, 2666), 'numpy.ones', 'np.ones', (['(1 * 4,)'], {'dtype': 'labels.dtype'}), '((1 * 4,), dtype=labels.dtype)\n', (2636, 2666), True, 'import numpy as np\n'), ((7982, 8022), 'numpy.array', 'np.array', (['cfg.TRAIN.BBOX_NORMALIZE_MEANS'], {}), '(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n', (7990, 8022), True, 'import numpy as np\n'), ((8552, 8591), 'numpy.random.rand', 'np.random.rand', (['jittered_boxes.shape[0]'], {}), '(jittered_boxes.shape[0])\n', (8566, 8591), True, 'import numpy as np\n'), ((8634, 8673), 'numpy.random.rand', 'np.random.rand', (['jittered_boxes.shape[0]'], {}), '(jittered_boxes.shape[0])\n', (8648, 8673), True, 'import numpy as np\n'), ((6335, 6345), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (6342, 6345), True, 'import numpy as np\n')] |
"""
A viewer for a CSV file containing a column of paths.
"""
from __future__ import annotations
import asyncio
import numpy as np
import pandas as pd
from typing import Union
from pathlib import Path
from bokeh.io import curdoc
from functools import partial
from bokeh.models import Button, Div, BoxAnnotation
from bokeh.document import Document
from bokeh.server.server import Server
from ramjet.analysis.transit_vetter import TransitVetter
from ramjet.analysis.viewer.light_curve_display import LightCurveDisplay
from ramjet.analysis.viewer.preloader import Preloader
from ramjet.analysis.viewer.view_entity import ViewEntity
from ramjet.photometric_database.tess_ffi_light_curve import TessFfiColumnName, TessFfiLightCurve
class Viewer:
"""
A viewer for a CSV file containing a column of paths.
"""
vetter = TransitVetter()
def __init__(self):
self.light_curve_display: Union[LightCurveDisplay, None] = None
self.preloader: Union[Preloader, None] = None
self.add_to_positives_button: Union[Button, None] = None
self.previous_button: Union[Button, None] = None
self.next_button: Union[Button, None] = None
self.information_div: Union[Div, None] = None
self.document: Union[Document, None] = None
self.maximum_physical_depth_box: Union[BoxAnnotation, None] = None
self.view_entity: Union[ViewEntity, None] = None
async def update_view_entity_with_document_lock(self, view_entity: ViewEntity):
"""
Updates the light curve display using the Bokeh document lock.
:param view_entity: The view entity to update the display with.
"""
light_curve = view_entity.light_curve
self.document.add_next_tick_callback(partial(self.light_curve_display.update_from_light_curve,
light_curve=light_curve))
self.document.add_next_tick_callback(partial(self.update_information_div_for_view_entity,
view_entity=view_entity))
self.document.add_next_tick_callback(partial(self.add_physical_depth_range_annotation_to_light_curve_figure,
view_entity=view_entity))
self.view_entity = view_entity
async def add_physical_depth_range_annotation_to_light_curve_figure(self, view_entity: ViewEntity):
unknown_radius = False
maximum_depth = self.vetter.get_maximum_physical_depth_for_planet_for_target(
view_entity.target, allow_missing_contamination_ratio=True)
if np.isnan(maximum_depth):
maximum_depth = 0.1
unknown_radius = True
self.maximum_physical_depth_box.bottom = 1 - maximum_depth
if view_entity.has_exofop_dispositions:
self.maximum_physical_depth_box.fill_color = 'red'
elif unknown_radius:
self.maximum_physical_depth_box.fill_color = 'yellow'
else:
self.maximum_physical_depth_box.fill_color = 'green'
async def update_information_div_for_view_entity(self, view_entity: ViewEntity):
self.information_div.text = (f'<h1 class="title">TIC {view_entity.light_curve.tic_id} ' +
f'sector {view_entity.light_curve.sector}</h1>' +
f'<p>Network confidence: {view_entity.confidence}</p>' +
f'<p>Result index: {view_entity.index}</p>' +
f'<p>Star radius (solar radii): {view_entity.target.radius}</p>')
async def display_next_view_entity(self):
"""
Moves to the next view entity.
"""
next_view_entity = await self.preloader.increment()
await self.update_view_entity_with_document_lock(next_view_entity)
async def display_previous_view_entity(self):
"""
Moves to the previous view entity.
"""
previous_view_entity = await self.preloader.decrement()
await self.update_view_entity_with_document_lock(previous_view_entity)
def create_display_next_view_entity_task(self):
"""
Creates the async task to move to the next light curve.
"""
asyncio.create_task(self.display_next_view_entity())
def create_display_previous_view_entity_task(self):
"""
Creates the async task to move to the previous light curve.
"""
asyncio.create_task(self.display_previous_view_entity())
def create_light_curve_switching_buttons(self) -> (Button, Button):
"""
Creates buttons for switching between light curves.
"""
next_button = Button(label='Next target')
next_button.on_click(self.create_display_next_view_entity_task)
next_button.sizing_mode = 'stretch_width'
previous_button = Button(label='Previous target')
previous_button.on_click(self.create_display_previous_view_entity_task)
previous_button.sizing_mode = 'stretch_width'
return previous_button, next_button
def create_add_to_positives_button(self) -> Button:
add_to_positives_button = Button(label='Add to positives')
add_to_positives_button.on_click(self.add_current_to_positives)
add_to_positives_button.sizing_mode = 'stretch_width'
return add_to_positives_button
def add_current_to_positives(self):
positives_csv_file_path = Path('positives.csv')
positives_data_frame = pd.DataFrame({'tic_id': [self.view_entity.target.tic_id]})
if positives_csv_file_path.exists():
positives_data_frame.to_csv(positives_csv_file_path, mode='a', header=False, index=False)
else:
positives_data_frame.to_csv(positives_csv_file_path, index=False)
@classmethod
def from_csv_path(cls, bokeh_document: Document, csv_path: Path) -> Viewer:
"""
Creates a viewer from a CSV path containing a light curve path column.
:param bokeh_document: The Bokeh document to run the viewer in.
:param csv_path: The path to the CSV file.
:return: The viewer.
"""
viewer = cls()
viewer.document = bokeh_document
viewer.csv_path = csv_path
viewer.light_curve_display = LightCurveDisplay.for_columns(TessFfiColumnName.TIME__BTJD.value,
TessFfiLightCurve().flux_column_names,
flux_axis_label='Relative flux')
viewer.light_curve_display.exclude_outliers_from_zoom = True
viewer.maximum_physical_depth_box = BoxAnnotation(bottom=1-0.01, top=1, fill_alpha=0.1, fill_color='green')
viewer.light_curve_display.figure.add_layout(viewer.maximum_physical_depth_box)
viewer.add_to_positives_button = viewer.create_add_to_positives_button()
bokeh_document.add_root(viewer.add_to_positives_button)
viewer.previous_button, viewer.next_button = viewer.create_light_curve_switching_buttons()
bokeh_document.add_root(viewer.previous_button)
bokeh_document.add_root(viewer.next_button)
viewer.information_div = Div()
viewer.information_div.sizing_mode = 'stretch_width'
bokeh_document.add_root(viewer.information_div)
bokeh_document.add_root(viewer.light_curve_display.figure)
loop = asyncio.get_running_loop()
loop.create_task(viewer.start_preloader(csv_path))
return viewer
async def start_preloader(self, csv_path):
"""
Starts the light curve preloader.
"""
self.preloader = await Preloader.from_csv_path(csv_path, starting_index=0)
initial_view_entity = self.preloader.current_view_entity
await self.update_view_entity_with_document_lock(initial_view_entity)
def application(bokeh_document: Document):
"""
The application to run from the Tornado server.
:param bokeh_document: The Bokeh document to run the viewer in.
"""
csv_path = Path('/Users/golmsche/Desktop/infer results 2020-10-09-13-21-21.csv')
Viewer.from_csv_path(bokeh_document, csv_path)
if __name__ == '__main__':
document = curdoc()
server = Server({'/': application}, port=5010)
server.start()
server.io_loop.add_callback(server.show, "/")
server.io_loop.start()
| [
"pandas.DataFrame",
"functools.partial",
"bokeh.models.Div",
"bokeh.models.Button",
"ramjet.analysis.viewer.preloader.Preloader.from_csv_path",
"numpy.isnan",
"bokeh.models.BoxAnnotation",
"asyncio.get_running_loop",
"ramjet.photometric_database.tess_ffi_light_curve.TessFfiLightCurve",
"bokeh.io.c... | [((832, 847), 'ramjet.analysis.transit_vetter.TransitVetter', 'TransitVetter', ([], {}), '()\n', (845, 847), False, 'from ramjet.analysis.transit_vetter import TransitVetter\n'), ((8085, 8154), 'pathlib.Path', 'Path', (['"""/Users/golmsche/Desktop/infer results 2020-10-09-13-21-21.csv"""'], {}), "('/Users/golmsche/Desktop/infer results 2020-10-09-13-21-21.csv')\n", (8089, 8154), False, 'from pathlib import Path\n'), ((8250, 8258), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (8256, 8258), False, 'from bokeh.io import curdoc\n'), ((8272, 8309), 'bokeh.server.server.Server', 'Server', (["{'/': application}"], {'port': '(5010)'}), "({'/': application}, port=5010)\n", (8278, 8309), False, 'from bokeh.server.server import Server\n'), ((2610, 2633), 'numpy.isnan', 'np.isnan', (['maximum_depth'], {}), '(maximum_depth)\n', (2618, 2633), True, 'import numpy as np\n'), ((4706, 4733), 'bokeh.models.Button', 'Button', ([], {'label': '"""Next target"""'}), "(label='Next target')\n", (4712, 4733), False, 'from bokeh.models import Button, Div, BoxAnnotation\n'), ((4882, 4913), 'bokeh.models.Button', 'Button', ([], {'label': '"""Previous target"""'}), "(label='Previous target')\n", (4888, 4913), False, 'from bokeh.models import Button, Div, BoxAnnotation\n'), ((5183, 5215), 'bokeh.models.Button', 'Button', ([], {'label': '"""Add to positives"""'}), "(label='Add to positives')\n", (5189, 5215), False, 'from bokeh.models import Button, Div, BoxAnnotation\n'), ((5464, 5485), 'pathlib.Path', 'Path', (['"""positives.csv"""'], {}), "('positives.csv')\n", (5468, 5485), False, 'from pathlib import Path\n'), ((5517, 5575), 'pandas.DataFrame', 'pd.DataFrame', (["{'tic_id': [self.view_entity.target.tic_id]}"], {}), "({'tic_id': [self.view_entity.target.tic_id]})\n", (5529, 5575), True, 'import pandas as pd\n'), ((6690, 6763), 'bokeh.models.BoxAnnotation', 'BoxAnnotation', ([], {'bottom': '(1 - 0.01)', 'top': '(1)', 'fill_alpha': '(0.1)', 'fill_color': '"""green"""'}), "(bottom=1 - 0.01, top=1, fill_alpha=0.1, fill_color='green')\n", (6703, 6763), False, 'from bokeh.models import Button, Div, BoxAnnotation\n'), ((7235, 7240), 'bokeh.models.Div', 'Div', ([], {}), '()\n', (7238, 7240), False, 'from bokeh.models import Button, Div, BoxAnnotation\n'), ((7440, 7466), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (7464, 7466), False, 'import asyncio\n'), ((1756, 1843), 'functools.partial', 'partial', (['self.light_curve_display.update_from_light_curve'], {'light_curve': 'light_curve'}), '(self.light_curve_display.update_from_light_curve, light_curve=\n light_curve)\n', (1763, 1843), False, 'from functools import partial\n'), ((1938, 2015), 'functools.partial', 'partial', (['self.update_information_div_for_view_entity'], {'view_entity': 'view_entity'}), '(self.update_information_div_for_view_entity, view_entity=view_entity)\n', (1945, 2015), False, 'from functools import partial\n'), ((2115, 2215), 'functools.partial', 'partial', (['self.add_physical_depth_range_annotation_to_light_curve_figure'], {'view_entity': 'view_entity'}), '(self.add_physical_depth_range_annotation_to_light_curve_figure,\n view_entity=view_entity)\n', (2122, 2215), False, 'from functools import partial\n'), ((7693, 7744), 'ramjet.analysis.viewer.preloader.Preloader.from_csv_path', 'Preloader.from_csv_path', (['csv_path'], {'starting_index': '(0)'}), '(csv_path, starting_index=0)\n', (7716, 7744), False, 'from ramjet.analysis.viewer.preloader import Preloader\n'), ((6438, 6457), 'ramjet.photometric_database.tess_ffi_light_curve.TessFfiLightCurve', 'TessFfiLightCurve', ([], {}), '()\n', (6455, 6457), False, 'from ramjet.photometric_database.tess_ffi_light_curve import TessFfiColumnName, TessFfiLightCurve\n')] |
# Import routines
import numpy as np
import math
import random
# Defining hyperparameters
m = 5 # number of cities, ranges from 0 ..... m-1
t = 24 # number of hours, ranges from 0 .... t-1
d = 7 # number of days, ranges from 0 ... d-1
C = 5 # Per hour fuel and other costs
R = 9 # per hour revenue from a passenger
class CabDriver():
def __init__(self):
"""initialise your state and define your action space and state space"""
## Retain (0,0) state and all states of the form (i,j) where i!=j
self.action_space= [(p,q) for p in range(m) for q in range(m) if p!=q]
self.action_space.insert(0, (0,0)) ## Insert (0,0) at index 0 for no ride action
## All possible combinations of (m,t,d) in state_space
self.state_space = [(loc,time,day) for loc in range(m) for time in range(t) for day in range(d)]
## Random state initialization
self.state_init = self.state_space[np.random.choice(len(self.state_space))]
# Start the first round
self.reset()
## Encoding state for NN input
## NOTE: Considering Architecture 2 given in the problem statement (where Input: STATE ONLY)
## --->Used in Agent_Architecture2_(Input_State_only).ipynb
def state_encod_arch2(self, state):
"""convert the state into a vector so that it can be fed to the NN. This method converts a given state into a vector format. Hint:
The vector is of size m + t + d."""
curr_loc, curr_time, curr_day= state
## Initialize arrays
loc_arr = np.zeros(m, dtype=int) # For location
time_arr= np.zeros(t, dtype=int) # For time
day_arr= np.zeros(d, dtype= int) # For day
## Encoding respective arrays
loc_arr[curr_loc] = 1
time_arr[curr_time] = 1
day_arr[curr_day] = 1
## Horizontal stacking to get vector of size m+t+d
state_encod= np.hstack((loc_arr, time_arr, day_arr))
state_encod= state_encod.tolist()
return state_encod
## Encoding (state-action) for NN input
## Use this function if you are using architecture-1
## def state_encod_arch2(self, state, action):
## """convert the (state-action) into a vector so that it can be fed to the NN. This method converts a given state-action pair into
## a vector format. Hint: The vector is of size m + t + d + m + m."""
## Getting number of requests
def requests(self, state):
"""Determining the number of requests basis the location.
Use the table specified in the MDP and complete for rest of the locations"""
location = state[0]
if location == 0:
requests = np.random.poisson(2)
if location == 1:
requests = np.random.poisson(12)
if location == 2:
requests = np.random.poisson(4)
if location == 3:
requests = np.random.poisson(7)
if location == 4:
requests = np.random.poisson(8)
if requests >15:
requests =15
## (0,0) implies no action. The driver is free to refuse customer request at any point in time.
## Hence, add the index of action (0,0)->[0] to account for no ride action.
possible_actions_index = random.sample(range(1, (m-1)*m +1), requests) + [0]
actions = [self.action_space[i] for i in possible_actions_index]
return possible_actions_index, actions
def update_time_day(self, curr_time, curr_day, ride_duration):
"""
Takes in the current time, current day and duration taken for driver's journey and returns
updated time and updated day post that journey.
"""
ride_duration = int(ride_duration)
if (curr_time + ride_duration) < 24:
updated_time = curr_time + ride_duration
updated_day= curr_day # Meaning, day is unchanged
else:
# duration spreads over to subsequent days
# convert the time to 0-23 range
updated_time = (curr_time + ride_duration) % 24
# Get the number of days
num_days = (curr_time + ride_duration) // 24
# Convert the day to 0-6 range
updated_day = (curr_day + num_days ) % 7
return updated_time, updated_day
def get_next_state_and_time_func(self, state, action, Time_matrix):
"""Takes state, action and Time_matrix as input and returns next state, wait_time, transit_time, ride_time."""
next_state = []
# Initialize various times
total_time = 0
transit_time = 0 # To go from current location to pickup location
wait_time = 0 # in case driver chooses to refuse all requests. for action: (0,0)
ride_time = 0 # From Pick-up to drop
# Derive the current location, time, day and request locations
curr_loc, curr_time, curr_day = state
pickup_loc, drop_loc= action
"""
3 Possible Scenarios:
i) Refuse all requests. Engage in Idle Time (wait: 1hr (i.e. 1 time unit))
ii) Driver is already at the pickup spot
iii) Driver is not at the pickup spot
"""
if ((pickup_loc== 0) and (drop_loc == 0)):
wait_time = 1 # Refuse all requests, so wait time is 1 unit, next location is current location
next_loc = curr_loc
elif (curr_loc == pickup_loc):
# Means driver is already at the pickup spot. Thus, the wait and transit are both 0
ride_time = Time_matrix[curr_loc][drop_loc][curr_time][curr_day]
# Next location is the drop location
next_loc = drop_loc
else:
# Driver is not at the pickup spot. He/she needs to commute to the pickup spot from the curr_loc
# Time take to reach pickup spot (from current location to pickup spot)
transit_time = Time_matrix[curr_loc][pickup_loc][curr_time][curr_day]
new_time, new_day = self.update_time_day(curr_time, curr_day, transit_time)
# The cab driver is now at the pickup spot
# Time taken to drop the passenger
ride_time = Time_matrix[pickup_loc][drop_loc][new_time][new_day]
next_loc = drop_loc
# Calculate total time as sum of all durations
total_time = (wait_time + transit_time + ride_time)
next_time, next_day = self.update_time_day(curr_time, curr_day, total_time)
# Finding next_state using the next_loc and the next time states.
next_state = [next_loc, next_time, next_day]
return next_state, wait_time, transit_time, ride_time
def next_state_func(self, state, action, Time_matrix):
"""Takes state, action and Time_matrix as input and returns next state"""
next_state= self.get_next_state_and_time_func(state, action, Time_matrix)[0] ## get_next_state_and_time_func() defined above
return next_state
def reward_func(self, state, action, Time_matrix):
"""Takes in state, action and Time_matrix and returns the reward"""
## get_next_state_and_time_func() defined above
wait_time, transit_time, ride_time = self.get_next_state_and_time_func(state, action, Time_matrix)[1:]
# transit and wait time yield no revenue and consumes battery; leading to battery charging costs. So, these are idle times.
idle_time = wait_time + transit_time
customer_ride_time = ride_time
reward = (R * customer_ride_time) - (C * (customer_ride_time + idle_time))
#Returns (-C) in case of no action i.e. customer_ride_time is 0, leading to battery charging costs. Hence, penalizing the model
return reward
def step(self, state, action, Time_matrix):
"""
Take a trip as a cab driver. Takes state, action and Time_matrix as input and returns next_state, reward and total time spent
"""
# Get the next state and the various time durations
next_state, wait_time, transit_time, ride_time = self.get_next_state_and_time_func(state, action, Time_matrix)
# Calculate the reward and total_time of the step
reward = self.reward_func(state, action, Time_matrix)
total_time = wait_time + transit_time + ride_time
return next_state, reward, total_time
def reset(self):
return self.action_space, self.state_space, self.state_init
| [
"numpy.zeros",
"numpy.random.poisson",
"numpy.hstack"
] | [((1589, 1611), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'int'}), '(m, dtype=int)\n', (1597, 1611), True, 'import numpy as np\n'), ((1647, 1669), 'numpy.zeros', 'np.zeros', (['t'], {'dtype': 'int'}), '(t, dtype=int)\n', (1655, 1669), True, 'import numpy as np\n'), ((1700, 1722), 'numpy.zeros', 'np.zeros', (['d'], {'dtype': 'int'}), '(d, dtype=int)\n', (1708, 1722), True, 'import numpy as np\n'), ((1948, 1987), 'numpy.hstack', 'np.hstack', (['(loc_arr, time_arr, day_arr)'], {}), '((loc_arr, time_arr, day_arr))\n', (1957, 1987), True, 'import numpy as np\n'), ((2734, 2754), 'numpy.random.poisson', 'np.random.poisson', (['(2)'], {}), '(2)\n', (2751, 2754), True, 'import numpy as np\n'), ((2804, 2825), 'numpy.random.poisson', 'np.random.poisson', (['(12)'], {}), '(12)\n', (2821, 2825), True, 'import numpy as np\n'), ((2875, 2895), 'numpy.random.poisson', 'np.random.poisson', (['(4)'], {}), '(4)\n', (2892, 2895), True, 'import numpy as np\n'), ((2945, 2965), 'numpy.random.poisson', 'np.random.poisson', (['(7)'], {}), '(7)\n', (2962, 2965), True, 'import numpy as np\n'), ((3015, 3035), 'numpy.random.poisson', 'np.random.poisson', (['(8)'], {}), '(8)\n', (3032, 3035), True, 'import numpy as np\n')] |
import numpy as np
from ..util.lsqr import lsqr
class MultiAttributeLinearRegression():
def __init__(self):
self.coefficients = None
pass
def fit(self,x,y):
'''
:param x 二维 ndarray 类型,为输入的数据量,包含截距
例如 [[x11,x12,x13,1]
[x21,x22,x23,1]
[x31,x32,x33,1]]
:param y 一维 ndarray 类型,为对应的输出数据;
—————————————————————————————————————————————————
系数不分开设置为变量系数或者常系数;
'''
is_sparse_matrix = False
x_rank = np.linalg.matrix_rank(x)
# print(f"X.shape:{x.shape};X.rank:{x_rank}")
x_features = len(x[0])
if x_rank < x_features:
is_sparse_matrix = True
if is_sparse_matrix == False:
## 矩阵的秩等于属性数,np.dot(x.T,x)为满秩矩阵
## 满秩矩阵可以通过直接求解一次导数来求解系数
interin_invert_matrix = np.linalg.inv(x.T.dot(x))
self.coefficients = interin_invert_matrix.dot(x.T).dot(y)
else :
self.coefficients,error = lsqr(x,y,alpha=0.001,error = 0.01,count = 1000)
print(self.coefficients)
pass
def predict(self,x):
'''
:param x 数据向量, ndarray 类型
:return 预测的 y 值
'''
if isinstance(self.coefficients,np.ndarray) != True:
raise EOFError("还没有进行模型拟合")
return x.dot(self.coefficients) | [
"numpy.linalg.matrix_rank"
] | [((536, 560), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['x'], {}), '(x)\n', (557, 560), True, 'import numpy as np\n')] |
from typing import Optional
import cv2
import numpy as np
def rect_empty(rect):
return rect[0] == rect[2] and rect[1] == rect[3]
def rect_to_region(left, top, width, height, w=1, h=1):
return [top / h, left / w, (top + height) / h, (left + width) / w]
def region_reparent(region, reference_region):
# if region is defined relative to reference_frame, where is it globally?
if reference_region is None:
return region
outer_h = reference_region[2] - reference_region[0]
outer_w = reference_region[3] - reference_region[1]
r2 = [
reference_region[0] + outer_h * region[0],
reference_region[1] + outer_w * region[1],
reference_region[0] + outer_h * region[2],
reference_region[1] + outer_w * region[3],
]
return r2
def extract_region(image: np.ndarray, region) -> np.ndarray:
h, w, d = image.shape
t = int(h * region[0])
l = int(w * region[1])
b = int(h * region[2])
r = int(w * region[3])
roi = image[t:b, l:r, :]
return roi
def display_shadow_text(img, x, y, text, font=cv2.FONT_HERSHEY_PLAIN, font_scale=1.25, thickness=1, line_spacing=1.5):
"""
Displays with a grey shadow at point x,y
"""
text_color = (255, 255, 255) # color as (B,G,R)
text_shadow = (0, 0, 0)
(w, h), _ = cv2.getTextSize(
text="jM^",
fontFace=font,
fontScale=font_scale,
thickness=thickness,
)
org = np.array([x, y], dtype=np.float)
for line in text.splitlines():
cv2.putText(img, line, tuple((org + [1, 1]).astype(int)), font, font_scale, text_shadow, thickness=thickness,
lineType=cv2.LINE_AA)
cv2.putText(img, line, tuple(org.astype(int)), font, font_scale, text_color, thickness=thickness,
lineType=cv2.LINE_AA)
org += [0, h * line_spacing]
return img
class CVWindow:
def __init__(self, caption: str) -> None:
super().__init__()
self.name = caption
self.showing: Optional[np.ndarray] = None
self.key_buffer = []
def destroy_window(self):
cv2.destroyWindow(self.name)
def display_image(self, img: np.ndarray, reduction=2, overlay_fn=None, is_rgb=False):
"""
Resize image and display using imshow. Mainly for debugging
Resizing the image allows us to see the full frame on the monitor
as cv2.imshow only allows zooming in.
The reduction factor can be specified, but defaults to half size
Text can also be displayed - in white at top of frame
"""
reduction = np.clip(reduction, 1 / 8, 8)
newx, newy = int(img.shape[1] / reduction), int(img.shape[0] / reduction) # new size (w,h)
newimg = cv2.resize(img, (newx, newy), interpolation=cv2.INTER_NEAREST)
if is_rgb:
newimg = cv2.cvtColor(newimg, cv2.COLOR_RGB2BGR)
if callable(overlay_fn):
overlay_fn(newimg)
self.show(newimg)
def show(self, img: np.ndarray):
cv2.imshow(self.name, img)
self.showing = img
VK_RightArrow = 65363
VK_LeftArrow = 65361
VK_UpArrow = 65362
VK_DownArrow = 65364
VK_Escape = 27
VK_Enter = 10
VK_Home = 65360
VK_End = 65367
VK_PgUp = 65365
VK_PgDn = 65366
VK_Tab = 9
def wait_key(self, timeout=None):
key = 0xffff & cv2.waitKey(timeout)
if key == 0xffff:
return None
return key
def key_loop(self, time=5):
key = self.wait_key(time)
if key is not None:
self.key_buffer.append(key)
def get_key(self):
self.key_loop()
if self.key_buffer:
return self.key_buffer.pop(0)
return None
def select_roi(self, from_center=True):
assert self.showing is not None
rect = cv2.selectROI(self.name, self.showing, showCrosshair=True, fromCenter=from_center)
if rect_empty(rect):
return None
scaled = rect_to_region(*rect, self.showing.shape[1], self.showing.shape[0])
return scaled
| [
"cv2.selectROI",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.getTextSize",
"numpy.clip",
"numpy.array",
"cv2.destroyWindow",
"cv2.imshow",
"cv2.resize"
] | [((1316, 1406), 'cv2.getTextSize', 'cv2.getTextSize', ([], {'text': '"""jM^"""', 'fontFace': 'font', 'fontScale': 'font_scale', 'thickness': 'thickness'}), "(text='jM^', fontFace=font, fontScale=font_scale, thickness=\n thickness)\n", (1331, 1406), False, 'import cv2\n'), ((1451, 1483), 'numpy.array', 'np.array', (['[x, y]'], {'dtype': 'np.float'}), '([x, y], dtype=np.float)\n', (1459, 1483), True, 'import numpy as np\n'), ((2117, 2145), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.name'], {}), '(self.name)\n', (2134, 2145), False, 'import cv2\n'), ((2604, 2632), 'numpy.clip', 'np.clip', (['reduction', '(1 / 8)', '(8)'], {}), '(reduction, 1 / 8, 8)\n', (2611, 2632), True, 'import numpy as np\n'), ((2750, 2812), 'cv2.resize', 'cv2.resize', (['img', '(newx, newy)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img, (newx, newy), interpolation=cv2.INTER_NEAREST)\n', (2760, 2812), False, 'import cv2\n'), ((3029, 3055), 'cv2.imshow', 'cv2.imshow', (['self.name', 'img'], {}), '(self.name, img)\n', (3039, 3055), False, 'import cv2\n'), ((3839, 3926), 'cv2.selectROI', 'cv2.selectROI', (['self.name', 'self.showing'], {'showCrosshair': '(True)', 'fromCenter': 'from_center'}), '(self.name, self.showing, showCrosshair=True, fromCenter=\n from_center)\n', (3852, 3926), False, 'import cv2\n'), ((2853, 2892), 'cv2.cvtColor', 'cv2.cvtColor', (['newimg', 'cv2.COLOR_RGB2BGR'], {}), '(newimg, cv2.COLOR_RGB2BGR)\n', (2865, 2892), False, 'import cv2\n'), ((3376, 3396), 'cv2.waitKey', 'cv2.waitKey', (['timeout'], {}), '(timeout)\n', (3387, 3396), False, 'import cv2\n')] |
"""Transform a roidb into a trainable roidb by adding a bunch of metadata."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datasets
import numpy as np
from model.utils.config import cfg
from datasets.factory import get_imdb
import PIL
import pdb
def prepare_roidb(imdb):
"""
Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
roidb = imdb.roidb
if not (imdb.name.startswith('coco')):
sizes = [PIL.Image.open(imdb.image_path_at(i)).size
for i in range(imdb.num_images)]
for i in range(len(imdb.image_index)):
# index of image, from 0 to number of images
roidb[i]['img_id'] = imdb.image_id_at(i)
# image absolute path
roidb[i]['image'] = imdb.image_path_at(i)
if not (imdb.name.startswith('coco')):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def rank_roidb_ratio(roidb):
# rank roidb based on the ratio between width and height.
ratio_large = 4 # largest ratio to preserve.
ratio_small = 0.25 # smallest ratio to preserve.
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = width / float(height)
if ratio > ratio_large:
roidb[i]['need_crop'] = 1
ratio = ratio_large
print(roidb[i]['image'])
elif ratio < ratio_small:
roidb[i]['need_crop'] = 1
ratio = ratio_small
print(roidb[i]['image'])
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return ratio_list[ratio_index], ratio_index
def filter_roidb(roidb):
# filter the image without bounding box.
print('before filtering, there are %d images...' % (len(roidb)))
i = 0
while i < len(roidb):
if len(roidb[i]['boxes']) == 0:
del roidb[i]
i -= 1
i += 1
print('after filtering, there are %d images...' % (len(roidb)))
return roidb
def combined_roidb(imdb_names, training=True):
"""
Combine multiple roidbs
"""
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
# ratio_index = rank_roidb_ratio(imdb)
print('done')
return imdb.roidb
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
# get all roidb,
# like this: [{}, {},...,{}]
# roidb keys:
# boxes: [x1, y1, x2, y2]
# gt_overlaps: a sparse matrix, shape is (num_obj, num_classes),
# gt_classes: class index list
# flipped: bool, is origin image or flipped origin
# img_id: image's index from 0 to num of images(include flipped)
# image: absolution path of image
# width: width of image
# height: height of image
# max_classes: classes index column
# max_overlaps: 0-1 column, 1 means fg, 0 means bg
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
# if only use only one dataset, like pascal voc 2007, the length
# of roidbs is 1. Otherwise, the length equals to datasets length
roidb = roidbs[0] # roidb of first dataset
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
if training:
roidb = filter_roidb(roidb)
ratio_list, ratio_index = rank_roidb_ratio(roidb)
return imdb, roidb, ratio_list, ratio_index
| [
"datasets.imdb.imdb",
"numpy.argsort",
"numpy.where",
"numpy.array",
"datasets.factory.get_imdb"
] | [((2686, 2706), 'numpy.array', 'np.array', (['ratio_list'], {}), '(ratio_list)\n', (2694, 2706), True, 'import numpy as np\n'), ((2725, 2747), 'numpy.argsort', 'np.argsort', (['ratio_list'], {}), '(ratio_list)\n', (2735, 2747), True, 'import numpy as np\n'), ((3757, 3776), 'datasets.factory.get_imdb', 'get_imdb', (['imdb_name'], {}), '(imdb_name)\n', (3765, 3776), False, 'from datasets.factory import get_imdb\n'), ((4980, 5023), 'datasets.imdb.imdb', 'datasets.imdb.imdb', (['imdb_names', 'tmp.classes'], {}), '(imdb_names, tmp.classes)\n', (4998, 5023), False, 'import datasets\n'), ((5049, 5069), 'datasets.factory.get_imdb', 'get_imdb', (['imdb_names'], {}), '(imdb_names)\n', (5057, 5069), False, 'from datasets.factory import get_imdb\n'), ((1685, 1712), 'numpy.where', 'np.where', (['(max_overlaps == 0)'], {}), '(max_overlaps == 0)\n', (1693, 1712), True, 'import numpy as np\n'), ((1862, 1888), 'numpy.where', 'np.where', (['(max_overlaps > 0)'], {}), '(max_overlaps > 0)\n', (1870, 1888), True, 'import numpy as np\n')] |
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sys
from warped_image import warp
from image_pipeline import pipeline
from fit_line import fit_polynomial
from lane_pixels import find_lane_pixels
# Read in the saved matrix and distortion coefficient
file= 'camera_cali.p'
with open(file, mode='rb') as f:
dist_pickle= pickle.load(f)
mtx = dist_pickle["matrix"]
dist = dist_pickle["dist_c"]
# Read in an image
#img = mpimg.imread('straight_lines1.jpg')
cap=cv2.VideoCapture('harder_challenge_video.mp4')
#gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
#cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('H', '2', '6', '4'))
size=(width, height)
print("size", size)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('testing4.avi',fourcc, 20, size)
src= np.float32(
[[712,466],
[1035,662],
[307,662],
[569, 469]])
dst= np.float32([
[842,0],
[842,664],
[400,664],
[400,0]],)
#VIDEO
while(1):
_,img=cap.read()
#print("yehh",img.shape[0])
"""
def measure_curvature_real():
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Start by generating our fake example data
ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
"""
undistorted=cv2.undistort(img, mtx, dist, None, mtx)
result = pipeline(undistorted)
ysize=img.shape[0]
xsize=img.shape[1]
region_select= np.copy(img)
region_of_interest_vertices = [
(0, 720),
(1280/2, 720/2),
(1280, 720),
]
roi= np.array([region_of_interest_vertices],np.int32)
mask=np.zeros_like(result)
match_mask_color= 255
cv2.fillPoly(mask, roi, match_mask_color)
masked_image= cv2.bitwise_and(result, mask)
warped_img= warp(result)
#bird_eye= warp(img)
try:
out_img = fit_polynomial(warped_img, undistorted)
print('Writing down result')
out.write(out_img)
except TypeError:
out.write(undistorted)
#orig_img=warp(bird_eye,2)
#cv2.imshow('feed',out_img)
#warped_img1= np.dstack((warped_img, warped_img, warped_img))*255
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
print("writing down the file")
out.release()
cv2.destroyAllWindows()
#plt.imshow(out_img)
#plt.show()
| [
"numpy.zeros_like",
"cv2.VideoWriter_fourcc",
"numpy.copy",
"cv2.bitwise_and",
"warped_image.warp",
"fit_line.fit_polynomial",
"numpy.float32",
"cv2.waitKey",
"cv2.fillPoly",
"cv2.VideoCapture",
"pickle.load",
"numpy.array",
"cv2.VideoWriter",
"cv2.destroyAllWindows",
"image_pipeline.pip... | [((528, 574), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""harder_challenge_video.mp4"""'], {}), "('harder_challenge_video.mp4')\n", (544, 574), False, 'import cv2\n'), ((840, 871), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (862, 871), False, 'import cv2\n'), ((878, 927), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""testing4.avi"""', 'fourcc', '(20)', 'size'], {}), "('testing4.avi', fourcc, 20, size)\n", (893, 927), False, 'import cv2\n'), ((934, 995), 'numpy.float32', 'np.float32', (['[[712, 466], [1035, 662], [307, 662], [569, 469]]'], {}), '([[712, 466], [1035, 662], [307, 662], [569, 469]])\n', (944, 995), True, 'import numpy as np\n'), ((1043, 1099), 'numpy.float32', 'np.float32', (['[[842, 0], [842, 664], [400, 664], [400, 0]]'], {}), '([[842, 0], [842, 664], [400, 664], [400, 0]])\n', (1053, 1099), True, 'import numpy as np\n'), ((3025, 3048), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3046, 3048), False, 'import cv2\n'), ((389, 403), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (400, 403), False, 'import pickle\n'), ((2142, 2182), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (2155, 2182), False, 'import cv2\n'), ((2194, 2215), 'image_pipeline.pipeline', 'pipeline', (['undistorted'], {}), '(undistorted)\n', (2202, 2215), False, 'from image_pipeline import pipeline\n'), ((2272, 2284), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (2279, 2284), True, 'import numpy as np\n'), ((2385, 2434), 'numpy.array', 'np.array', (['[region_of_interest_vertices]', 'np.int32'], {}), '([region_of_interest_vertices], np.int32)\n', (2393, 2434), True, 'import numpy as np\n'), ((2440, 2461), 'numpy.zeros_like', 'np.zeros_like', (['result'], {}), '(result)\n', (2453, 2461), True, 'import numpy as np\n'), ((2486, 2527), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'roi', 'match_mask_color'], {}), '(mask, roi, match_mask_color)\n', (2498, 2527), False, 'import cv2\n'), ((2543, 2572), 'cv2.bitwise_and', 'cv2.bitwise_and', (['result', 'mask'], {}), '(result, mask)\n', (2558, 2572), False, 'import cv2\n'), ((2588, 2600), 'warped_image.warp', 'warp', (['result'], {}), '(result)\n', (2592, 2600), False, 'from warped_image import warp\n'), ((2643, 2682), 'fit_line.fit_polynomial', 'fit_polynomial', (['warped_img', 'undistorted'], {}), '(warped_img, undistorted)\n', (2657, 2682), False, 'from fit_line import fit_polynomial\n'), ((2918, 2932), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2929, 2932), False, 'import cv2\n')] |
import pytest
import numpy as np
import os
from dispersion import Material
from dispersion import Spectrum
@pytest.fixture
def spectrum():
spectrum = Spectrum(0.5, unit='um')
yield spectrum
@pytest.fixture
def root_path():
this_dir = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.join(this_dir,"..","data")
yield root_path
def test_mat_init(spectrum):
md = Material(fixed_n=1.0)
n = md.get_nk_data(spectrum)
assert np.isclose(np.real(n), 1.0)
assert np.isclose(np.imag(n), 0.0)
def test_from_yml_file(spectrum, root_path):
relpath = os.path.join('RefractiveIndexInfo',
'data', 'main', 'Ag',
'Hagemann.yml')
filepath = os.path.join(root_path,relpath)
md = Material(file_path=filepath,
spectrum_type='wavelength',
unit='micrometer')
n = md.get_nk_data(spectrum)
assert np.isclose(np.real(n), 0.23805806451612901)
assert np.isclose(np.imag(n), 3.126040322580645)
def test_from_txt_file(spectrum, root_path):
relpath = os.path.join('UserData','AlSb.txt')
filepath = os.path.join(root_path,relpath)
md = Material(file_path=filepath,
spectrum_type='wavelength',
unit='micrometer')
n = md.get_nk_data(spectrum)
assert np.isclose(np.real(n), 4.574074754901961)
assert np.isclose(np.imag(n), 0.4318627450980393)
def test_from_nk_file(spectrum, root_path):
relpath = os.path.join('Palik','Ag.nk')
filepath = os.path.join(root_path,relpath)
md = Material(file_path=filepath,
spectrum_type='wavelength',
unit='angstrom')
n = md.get_nk_data(spectrum)
assert np.isclose(np.real(n), 0.13)
assert np.isclose(np.imag(n), 2.917632850241546)
def test_from_model(spectrum):
wp = 8.55 # eV
loss = 18.4e-3 #eV
model_kw = {'name':'Drude','parameters':[wp, loss],
'valid_range':[0.0, np.inf],
'spectrum_type':'energy', 'unit':'ev'}
md = Material(model_kw=model_kw)
n = md.get_nk_data(spectrum)
assert np.isclose(np.real(n), 0.013366748652710245)
assert np.isclose(np.imag(n), 3.2997524521729824)
if __name__ == "__main__":
pass
| [
"os.path.abspath",
"dispersion.Material",
"dispersion.Spectrum",
"numpy.imag",
"numpy.real",
"os.path.join"
] | [((155, 179), 'dispersion.Spectrum', 'Spectrum', (['(0.5)'], {'unit': '"""um"""'}), "(0.5, unit='um')\n", (163, 179), False, 'from dispersion import Spectrum\n'), ((307, 343), 'os.path.join', 'os.path.join', (['this_dir', '""".."""', '"""data"""'], {}), "(this_dir, '..', 'data')\n", (319, 343), False, 'import os\n'), ((401, 422), 'dispersion.Material', 'Material', ([], {'fixed_n': '(1.0)'}), '(fixed_n=1.0)\n', (409, 422), False, 'from dispersion import Material\n'), ((594, 667), 'os.path.join', 'os.path.join', (['"""RefractiveIndexInfo"""', '"""data"""', '"""main"""', '"""Ag"""', '"""Hagemann.yml"""'], {}), "('RefractiveIndexInfo', 'data', 'main', 'Ag', 'Hagemann.yml')\n", (606, 667), False, 'import os\n'), ((737, 769), 'os.path.join', 'os.path.join', (['root_path', 'relpath'], {}), '(root_path, relpath)\n', (749, 769), False, 'import os\n'), ((778, 853), 'dispersion.Material', 'Material', ([], {'file_path': 'filepath', 'spectrum_type': '"""wavelength"""', 'unit': '"""micrometer"""'}), "(file_path=filepath, spectrum_type='wavelength', unit='micrometer')\n", (786, 853), False, 'from dispersion import Material\n'), ((1091, 1127), 'os.path.join', 'os.path.join', (['"""UserData"""', '"""AlSb.txt"""'], {}), "('UserData', 'AlSb.txt')\n", (1103, 1127), False, 'import os\n'), ((1142, 1174), 'os.path.join', 'os.path.join', (['root_path', 'relpath'], {}), '(root_path, relpath)\n', (1154, 1174), False, 'import os\n'), ((1183, 1258), 'dispersion.Material', 'Material', ([], {'file_path': 'filepath', 'spectrum_type': '"""wavelength"""', 'unit': '"""micrometer"""'}), "(file_path=filepath, spectrum_type='wavelength', unit='micrometer')\n", (1191, 1258), False, 'from dispersion import Material\n'), ((1494, 1524), 'os.path.join', 'os.path.join', (['"""Palik"""', '"""Ag.nk"""'], {}), "('Palik', 'Ag.nk')\n", (1506, 1524), False, 'import os\n'), ((1539, 1571), 'os.path.join', 'os.path.join', (['root_path', 'relpath'], {}), '(root_path, relpath)\n', (1551, 1571), False, 'import os\n'), ((1580, 1653), 'dispersion.Material', 'Material', ([], {'file_path': 'filepath', 'spectrum_type': '"""wavelength"""', 'unit': '"""angstrom"""'}), "(file_path=filepath, spectrum_type='wavelength', unit='angstrom')\n", (1588, 1653), False, 'from dispersion import Material\n'), ((2056, 2083), 'dispersion.Material', 'Material', ([], {'model_kw': 'model_kw'}), '(model_kw=model_kw)\n', (2064, 2083), False, 'from dispersion import Material\n'), ((264, 289), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (279, 289), False, 'import os\n'), ((478, 488), 'numpy.real', 'np.real', (['n'], {}), '(n)\n', (485, 488), True, 'import numpy as np\n'), ((517, 527), 'numpy.imag', 'np.imag', (['n'], {}), '(n)\n', (524, 527), True, 'import numpy as np\n'), ((945, 955), 'numpy.real', 'np.real', (['n'], {}), '(n)\n', (952, 955), True, 'import numpy as np\n'), ((1000, 1010), 'numpy.imag', 'np.imag', (['n'], {}), '(n)\n', (1007, 1010), True, 'import numpy as np\n'), ((1350, 1360), 'numpy.real', 'np.real', (['n'], {}), '(n)\n', (1357, 1360), True, 'import numpy as np\n'), ((1403, 1413), 'numpy.imag', 'np.imag', (['n'], {}), '(n)\n', (1410, 1413), True, 'import numpy as np\n'), ((1745, 1755), 'numpy.real', 'np.real', (['n'], {}), '(n)\n', (1752, 1755), True, 'import numpy as np\n'), ((1785, 1795), 'numpy.imag', 'np.imag', (['n'], {}), '(n)\n', (1792, 1795), True, 'import numpy as np\n'), ((2140, 2150), 'numpy.real', 'np.real', (['n'], {}), '(n)\n', (2147, 2150), True, 'import numpy as np\n'), ((2196, 2206), 'numpy.imag', 'np.imag', (['n'], {}), '(n)\n', (2203, 2206), True, 'import numpy as np\n')] |
import sys,os
import random
sys.path.append(os.getcwd())
from Process.process import *
import torch as th
from torch_scatter import scatter_mean
import torch.nn.functional as F
import numpy as np
from tools.earlystopping import EarlyStopping
from torch_geometric.data import DataLoader
from tqdm import tqdm
from Process.rand5fold import load5foldData
from tools.evaluate import *
from torch_geometric.nn import GCNConv
import copy
# th.manual_seed(100)
# th.cuda.manual_seed_all(100)
# random.seed(12345)
# th.manual_seed(12345)
# th.cuda.manual_seed_all(12345)
import os
import datetime
import json
def ensure_directory(path):
if not os.path.exists(path):
os.makedirs(path)
def write_results(string): # TODO:
with open(RESULTS_FILE, 'a') as out_file:
out_file.write(str(string) + '\n')
def save_json_file(path, data):
# ensure_directory(path)
with open(path, "w") as json_file:
json_file.write(json.dumps(data))
def load_json_file(path):
with open(path, "r") as json_file:
data = json.loads(json_file.read())
return data
ensure_directory("./results")
current = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
RESULTS_FILE = "./results/out_{0}_{1}_{2}.txt".format("Twitter16", "BiGCN_org", current)
from torch_scatter import scatter_mean
from torch_geometric.nn import GCNConv
import copy
class TDrumorGCN(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(TDrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
class BUrumorGCN(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(BUrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.BU_edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
class Net(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(Net, self).__init__()
self.TDrumorGCN = TDrumorGCN(in_feats, hid_feats, out_feats)
self.BUrumorGCN = BUrumorGCN(in_feats, hid_feats, out_feats)
self.fc = th.nn.Linear((out_feats+hid_feats)*2, 4)
def forward(self, data):
TD_x = self.TDrumorGCN(data)
BU_x = self.BUrumorGCN(data)
x = th.cat((BU_x, TD_x), 1)
x = self.fc(x)
x = F.log_softmax(x, dim=1)
return x
def train_GCN(treeDic, x_test, x_train,TDdroprate,BUdroprate,lr, weight_decay,patience,n_epochs,batchsize,dataname,iter):
model = Net(5000,64,64).to(device)
# BU_params=list(map(id,model.BUrumorGCN.conv1.parameters()))
# BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
# base_params=filter(lambda p:id(p) not in BU_params,model.parameters())
# optimizer = th.optim.Adam([
# {'params':base_params},
# {'params':model.BUrumorGCN.conv1.parameters(),'lr':lr/5},
# {'params': model.BUrumorGCN.conv2.parameters(), 'lr': lr/5}
# ], lr=lr, weight_decay=weight_decay)
# optimizer = th.optim.Adam([
# {'params': model.parameters(),'lr':lr/5},
# ], lr=lr, weight_decay=weight_decay)
optimizer = th.optim.Adam([
{'params': model.parameters(),'lr':lr},
], lr=lr, weight_decay=weight_decay)
model.train()
train_losses = []
val_losses = []
train_accs = []
val_accs = []
early_stopping = EarlyStopping(patience=patience, verbose=True)
criterion = th.nn.CrossEntropyLoss()
for epoch in range(n_epochs):
traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate,BUdroprate)
# print()
# train_loader = DataLoader(traindata_list, batch_size=batchsize, shuffle=True, num_workers=5)
# test_loader = DataLoader(testdata_list, batch_size=batchsize, shuffle=True, num_workers=5)
train_loader = DataLoader(traindata_list, batch_size=batchsize, shuffle=False, num_workers=5)
test_loader = DataLoader(testdata_list, batch_size=batchsize, shuffle=False, num_workers=5)
avg_loss = []
avg_acc = []
batch_idx = 0
# tqdm_train_loader = tqdm(train_loader) # JIHO
tqdm_train_loader = train_loader
for Batch_data in tqdm_train_loader:
Batch_data.to(device)
out_labels= model(Batch_data)
# finalloss=F.nll_loss(out_labels,Batch_data.y)
finalloss=criterion(out_labels,Batch_data.y)
loss=finalloss
optimizer.zero_grad()
loss.backward()
avg_loss.append(loss.item())
optimizer.step()
_, pred = out_labels.max(dim=-1)
correct = pred.eq(Batch_data.y).sum().item()
train_acc = correct / len(Batch_data.y)
avg_acc.append(train_acc)
print("Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}".format(iter,epoch, batch_idx, loss.item(), train_acc))
batch_idx = batch_idx + 1
train_losses.append(np.mean(avg_loss))
train_accs.append(np.mean(avg_acc))
temp_val_losses = []
temp_val_accs = []
temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \
temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2, \
temp_val_Acc3, temp_val_Prec3, temp_val_Recll3, temp_val_F3, \
temp_val_Acc4, temp_val_Prec4, temp_val_Recll4, temp_val_F4 = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
model.eval()
# tqdm_test_loader = tqdm(test_loader) # JIHO
tqdm_test_loader = test_loader
for Batch_data in tqdm_test_loader:
Batch_data.to(device)
val_out = model(Batch_data)
val_loss = F.nll_loss(val_out, Batch_data.y)
temp_val_losses.append(val_loss.item())
_, val_pred = val_out.max(dim=1)
correct = val_pred.eq(Batch_data.y).sum().item()
val_acc = correct / len(Batch_data.y)
Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2, Acc3, Prec3, Recll3, F3, Acc4, Prec4, Recll4, F4 = evaluation4class(
val_pred, Batch_data.y)
temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(
Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \
temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(
Recll2), temp_val_F2.append(F2), \
temp_val_Acc3.append(Acc3), temp_val_Prec3.append(Prec3), temp_val_Recll3.append(
Recll3), temp_val_F3.append(F3), \
temp_val_Acc4.append(Acc4), temp_val_Prec4.append(Prec4), temp_val_Recll4.append(Recll4), temp_val_F4.append(F4)
temp_val_accs.append(val_acc)
val_losses.append(np.mean(temp_val_losses))
val_accs.append(np.mean(temp_val_accs))
print("Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}".format(epoch, np.mean(temp_val_losses),
np.mean(temp_val_accs)))
res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),
'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(temp_val_Prec1),
np.mean(temp_val_Recll1), np.mean(temp_val_F1)),
'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(temp_val_Prec2),
np.mean(temp_val_Recll2), np.mean(temp_val_F2)),
'C3:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc3), np.mean(temp_val_Prec3),
np.mean(temp_val_Recll3), np.mean(temp_val_F3)),
'C4:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc4), np.mean(temp_val_Prec4),
np.mean(temp_val_Recll4), np.mean(temp_val_F4))]
print('results:', res)
early_stopping(np.mean(temp_val_losses), np.mean(temp_val_accs), np.mean(temp_val_F1), np.mean(temp_val_F2),
np.mean(temp_val_F3), np.mean(temp_val_F4), model, 'BiGCN', dataname)
accs =np.mean(temp_val_accs)
F1 = np.mean(temp_val_F1)
F2 = np.mean(temp_val_F2)
F3 = np.mean(temp_val_F3)
F4 = np.mean(temp_val_F4)
if early_stopping.early_stop:
print("Early stopping")
accs=early_stopping.accs
F1=early_stopping.F1
F2 = early_stopping.F2
F3 = early_stopping.F3
F4 = early_stopping.F4
break
return train_losses , val_losses ,train_accs, val_accs,accs,F1,F2,F3,F4
# =========================
# MAIN
# =========================
lr=0.0005
weight_decay=1e-4
patience=10
n_epochs=200
# n_epochs=100 # JIHO
batchsize=128
TDdroprate=0.2
BUdroprate=0.2
datasetname=sys.argv[1] #"Twitter15"、"Twitter16"
iterations=int(sys.argv[2])
model="GCN"
device = th.device('cuda:1' if th.cuda.is_available() else 'cpu')
test_accs = []
NR_F1 = []
FR_F1 = []
TR_F1 = []
UR_F1 = []
for iter in range(iterations):
fold0_x_test, fold0_x_train, fold1_x_test, fold1_x_train, fold2_x_test, fold2_x_train, fold3_x_test, fold3_x_train, fold4_x_test,fold4_x_train = load5foldData(datasetname)
# write_results(fold0_x_train)
# write_results(fold0_x_test)
# ensure_directory("./temp")
save_json_file("./temp/fold0_train.txt", fold0_x_train)
save_json_file("./temp/fold0_test.txt", fold0_x_test)
treeDic=loadTree(datasetname)
train_losses, val_losses, train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0 = train_GCN(treeDic, fold0_x_test, fold0_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs0: " + str(accs0))
train_losses, val_losses, train_accs, val_accs1, accs1, F1_1, F2_1, F3_1, F4_1 = train_GCN(treeDic, fold1_x_test, fold1_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs1: " + str(accs1))
train_losses, val_losses, train_accs, val_accs2, accs2, F1_2, F2_2, F3_2, F4_2 = train_GCN(treeDic, fold2_x_test, fold2_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs2: " + str(accs2))
train_losses, val_losses, train_accs, val_accs3, accs3, F1_3, F2_3, F3_3, F4_3 = train_GCN(treeDic, fold3_x_test, fold3_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs3: " + str(accs3))
train_losses, val_losses, train_accs, val_accs4, accs4, F1_4, F2_4, F3_4, F4_4 = train_GCN(treeDic, fold4_x_test, fold4_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs4: " + str(accs4))
test_accs.append((accs0+accs1+accs2+accs3+accs4)/5)
print(train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0)
NR_F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)
FR_F1.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)
TR_F1.append((F3_0 + F3_1 + F3_2 + F3_3 + F3_4) / 5)
UR_F1.append((F4_0 + F4_1 + F4_2 + F4_3 + F4_4) / 5)
print("Total_Test_Accuracy: {:.4f}|NR F1: {:.4f}|FR F1: {:.4f}|TR F1: {:.4f}|UR F1: {:.4f}".format(
sum(test_accs) / iterations, sum(NR_F1) /iterations, sum(FR_F1) /iterations, sum(TR_F1) / iterations, sum(UR_F1) / iterations))
| [
"torch.nn.functional.dropout",
"torch.cat",
"json.dumps",
"numpy.mean",
"os.path.exists",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.relu",
"datetime.datetime.now",
"torch_scatter.scatter_mean",
"torch.cuda.is_available",
"Proce... | [((44, 55), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (53, 55), False, 'import os\n'), ((5333, 5379), 'tools.earlystopping.EarlyStopping', 'EarlyStopping', ([], {'patience': 'patience', 'verbose': '(True)'}), '(patience=patience, verbose=True)\n', (5346, 5379), False, 'from tools.earlystopping import EarlyStopping\n'), ((5396, 5420), 'torch.nn.CrossEntropyLoss', 'th.nn.CrossEntropyLoss', ([], {}), '()\n', (5418, 5420), True, 'import torch as th\n'), ((11321, 11347), 'Process.rand5fold.load5foldData', 'load5foldData', (['datasetname'], {}), '(datasetname)\n', (11334, 11347), False, 'from Process.rand5fold import load5foldData\n'), ((645, 665), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (659, 665), False, 'import os\n'), ((675, 692), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (686, 692), False, 'import os\n'), ((1132, 1155), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1153, 1155), False, 'import datetime\n'), ((1518, 1546), 'torch_geometric.nn.GCNConv', 'GCNConv', (['in_feats', 'hid_feats'], {}), '(in_feats, hid_feats)\n', (1525, 1546), False, 'from torch_geometric.nn import GCNConv\n'), ((1568, 1608), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(hid_feats + in_feats)', 'out_feats'], {}), '(hid_feats + in_feats, out_feats)\n', (1575, 1608), False, 'from torch_geometric.nn import GCNConv\n'), ((1770, 1782), 'copy.copy', 'copy.copy', (['x'], {}), '(x)\n', (1779, 1782), False, 'import copy\n'), ((2095, 2122), 'torch.cat', 'th.cat', (['(x, root_extend)', '(1)'], {}), '((x, root_extend), 1)\n', (2101, 2122), True, 'import torch as th\n'), ((2135, 2144), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2141, 2144), True, 'import torch.nn.functional as F\n'), ((2157, 2193), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (2166, 2193), True, 'import torch.nn.functional as F\n'), ((2244, 2253), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2250, 2253), True, 'import torch.nn.functional as F\n'), ((2490, 2517), 'torch.cat', 'th.cat', (['(x, root_extend)', '(1)'], {}), '((x, root_extend), 1)\n', (2496, 2517), True, 'import torch as th\n'), ((2530, 2564), 'torch_scatter.scatter_mean', 'scatter_mean', (['x', 'data.batch'], {'dim': '(0)'}), '(x, data.batch, dim=0)\n', (2542, 2564), False, 'from torch_scatter import scatter_mean\n'), ((2736, 2764), 'torch_geometric.nn.GCNConv', 'GCNConv', (['in_feats', 'hid_feats'], {}), '(in_feats, hid_feats)\n', (2743, 2764), False, 'from torch_geometric.nn import GCNConv\n'), ((2786, 2826), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(hid_feats + in_feats)', 'out_feats'], {}), '(hid_feats + in_feats, out_feats)\n', (2793, 2826), False, 'from torch_geometric.nn import GCNConv\n'), ((2991, 3003), 'copy.copy', 'copy.copy', (['x'], {}), '(x)\n', (3000, 3003), False, 'import copy\n'), ((3316, 3343), 'torch.cat', 'th.cat', (['(x, root_extend)', '(1)'], {}), '((x, root_extend), 1)\n', (3322, 3343), True, 'import torch as th\n'), ((3356, 3365), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3362, 3365), True, 'import torch.nn.functional as F\n'), ((3378, 3414), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (3387, 3414), True, 'import torch.nn.functional as F\n'), ((3465, 3474), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3471, 3474), True, 'import torch.nn.functional as F\n'), ((3711, 3738), 'torch.cat', 'th.cat', (['(x, root_extend)', '(1)'], {}), '((x, root_extend), 1)\n', (3717, 3738), True, 'import torch as th\n'), ((3751, 3785), 'torch_scatter.scatter_mean', 'scatter_mean', (['x', 'data.batch'], {'dim': '(0)'}), '(x, data.batch, dim=0)\n', (3763, 3785), False, 'from torch_scatter import scatter_mean\n'), ((4078, 4122), 'torch.nn.Linear', 'th.nn.Linear', (['((out_feats + hid_feats) * 2)', '(4)'], {}), '((out_feats + hid_feats) * 2, 4)\n', (4090, 4122), True, 'import torch as th\n'), ((4235, 4258), 'torch.cat', 'th.cat', (['(BU_x, TD_x)', '(1)'], {}), '((BU_x, TD_x), 1)\n', (4241, 4258), True, 'import torch as th\n'), ((4294, 4317), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (4307, 4317), True, 'import torch.nn.functional as F\n'), ((5813, 5891), 'torch_geometric.data.DataLoader', 'DataLoader', (['traindata_list'], {'batch_size': 'batchsize', 'shuffle': '(False)', 'num_workers': '(5)'}), '(traindata_list, batch_size=batchsize, shuffle=False, num_workers=5)\n', (5823, 5891), False, 'from torch_geometric.data import DataLoader\n'), ((5914, 5991), 'torch_geometric.data.DataLoader', 'DataLoader', (['testdata_list'], {'batch_size': 'batchsize', 'shuffle': '(False)', 'num_workers': '(5)'}), '(testdata_list, batch_size=batchsize, shuffle=False, num_workers=5)\n', (5924, 5991), False, 'from torch_geometric.data import DataLoader\n'), ((10235, 10257), 'numpy.mean', 'np.mean', (['temp_val_accs'], {}), '(temp_val_accs)\n', (10242, 10257), True, 'import numpy as np\n'), ((10271, 10291), 'numpy.mean', 'np.mean', (['temp_val_F1'], {}), '(temp_val_F1)\n', (10278, 10291), True, 'import numpy as np\n'), ((10305, 10325), 'numpy.mean', 'np.mean', (['temp_val_F2'], {}), '(temp_val_F2)\n', (10312, 10325), True, 'import numpy as np\n'), ((10339, 10359), 'numpy.mean', 'np.mean', (['temp_val_F3'], {}), '(temp_val_F3)\n', (10346, 10359), True, 'import numpy as np\n'), ((10373, 10393), 'numpy.mean', 'np.mean', (['temp_val_F4'], {}), '(temp_val_F4)\n', (10380, 10393), True, 'import numpy as np\n'), ((11047, 11069), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (11067, 11069), True, 'import torch as th\n'), ((944, 960), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (954, 960), False, 'import json\n'), ((1995, 2023), 'torch.eq', 'th.eq', (['data.batch', 'num_batch'], {}), '(data.batch, num_batch)\n', (2000, 2023), True, 'import torch as th\n'), ((2390, 2418), 'torch.eq', 'th.eq', (['data.batch', 'num_batch'], {}), '(data.batch, num_batch)\n', (2395, 2418), True, 'import torch as th\n'), ((3216, 3244), 'torch.eq', 'th.eq', (['data.batch', 'num_batch'], {}), '(data.batch, num_batch)\n', (3221, 3244), True, 'import torch as th\n'), ((3611, 3639), 'torch.eq', 'th.eq', (['data.batch', 'num_batch'], {}), '(data.batch, num_batch)\n', (3616, 3639), True, 'import torch as th\n'), ((6971, 6988), 'numpy.mean', 'np.mean', (['avg_loss'], {}), '(avg_loss)\n', (6978, 6988), True, 'import numpy as np\n'), ((7016, 7032), 'numpy.mean', 'np.mean', (['avg_acc'], {}), '(avg_acc)\n', (7023, 7032), True, 'import numpy as np\n'), ((7718, 7751), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['val_out', 'Batch_data.y'], {}), '(val_out, Batch_data.y)\n', (7728, 7751), True, 'import torch.nn.functional as F\n'), ((8802, 8826), 'numpy.mean', 'np.mean', (['temp_val_losses'], {}), '(temp_val_losses)\n', (8809, 8826), True, 'import numpy as np\n'), ((8852, 8874), 'numpy.mean', 'np.mean', (['temp_val_accs'], {}), '(temp_val_accs)\n', (8859, 8874), True, 'import numpy as np\n'), ((10034, 10058), 'numpy.mean', 'np.mean', (['temp_val_losses'], {}), '(temp_val_losses)\n', (10041, 10058), True, 'import numpy as np\n'), ((10060, 10082), 'numpy.mean', 'np.mean', (['temp_val_accs'], {}), '(temp_val_accs)\n', (10067, 10082), True, 'import numpy as np\n'), ((10084, 10104), 'numpy.mean', 'np.mean', (['temp_val_F1'], {}), '(temp_val_F1)\n', (10091, 10104), True, 'import numpy as np\n'), ((10106, 10126), 'numpy.mean', 'np.mean', (['temp_val_F2'], {}), '(temp_val_F2)\n', (10113, 10126), True, 'import numpy as np\n'), ((10151, 10171), 'numpy.mean', 'np.mean', (['temp_val_F3'], {}), '(temp_val_F3)\n', (10158, 10171), True, 'import numpy as np\n'), ((10173, 10193), 'numpy.mean', 'np.mean', (['temp_val_F4'], {}), '(temp_val_F4)\n', (10180, 10193), True, 'import numpy as np\n'), ((8958, 8982), 'numpy.mean', 'np.mean', (['temp_val_losses'], {}), '(temp_val_losses)\n', (8965, 8982), True, 'import numpy as np\n'), ((9059, 9081), 'numpy.mean', 'np.mean', (['temp_val_accs'], {}), '(temp_val_accs)\n', (9066, 9081), True, 'import numpy as np\n'), ((9120, 9145), 'numpy.mean', 'np.mean', (['temp_val_Acc_all'], {}), '(temp_val_Acc_all)\n', (9127, 9145), True, 'import numpy as np\n'), ((9203, 9225), 'numpy.mean', 'np.mean', (['temp_val_Acc1'], {}), '(temp_val_Acc1)\n', (9210, 9225), True, 'import numpy as np\n'), ((9227, 9250), 'numpy.mean', 'np.mean', (['temp_val_Prec1'], {}), '(temp_val_Prec1)\n', (9234, 9250), True, 'import numpy as np\n'), ((9307, 9331), 'numpy.mean', 'np.mean', (['temp_val_Recll1'], {}), '(temp_val_Recll1)\n', (9314, 9331), True, 'import numpy as np\n'), ((9333, 9353), 'numpy.mean', 'np.mean', (['temp_val_F1'], {}), '(temp_val_F1)\n', (9340, 9353), True, 'import numpy as np\n'), ((9411, 9433), 'numpy.mean', 'np.mean', (['temp_val_Acc2'], {}), '(temp_val_Acc2)\n', (9418, 9433), True, 'import numpy as np\n'), ((9435, 9458), 'numpy.mean', 'np.mean', (['temp_val_Prec2'], {}), '(temp_val_Prec2)\n', (9442, 9458), True, 'import numpy as np\n'), ((9515, 9539), 'numpy.mean', 'np.mean', (['temp_val_Recll2'], {}), '(temp_val_Recll2)\n', (9522, 9539), True, 'import numpy as np\n'), ((9541, 9561), 'numpy.mean', 'np.mean', (['temp_val_F2'], {}), '(temp_val_F2)\n', (9548, 9561), True, 'import numpy as np\n'), ((9619, 9641), 'numpy.mean', 'np.mean', (['temp_val_Acc3'], {}), '(temp_val_Acc3)\n', (9626, 9641), True, 'import numpy as np\n'), ((9643, 9666), 'numpy.mean', 'np.mean', (['temp_val_Prec3'], {}), '(temp_val_Prec3)\n', (9650, 9666), True, 'import numpy as np\n'), ((9723, 9747), 'numpy.mean', 'np.mean', (['temp_val_Recll3'], {}), '(temp_val_Recll3)\n', (9730, 9747), True, 'import numpy as np\n'), ((9749, 9769), 'numpy.mean', 'np.mean', (['temp_val_F3'], {}), '(temp_val_F3)\n', (9756, 9769), True, 'import numpy as np\n'), ((9827, 9849), 'numpy.mean', 'np.mean', (['temp_val_Acc4'], {}), '(temp_val_Acc4)\n', (9834, 9849), True, 'import numpy as np\n'), ((9851, 9874), 'numpy.mean', 'np.mean', (['temp_val_Prec4'], {}), '(temp_val_Prec4)\n', (9858, 9874), True, 'import numpy as np\n'), ((9931, 9955), 'numpy.mean', 'np.mean', (['temp_val_Recll4'], {}), '(temp_val_Recll4)\n', (9938, 9955), True, 'import numpy as np\n'), ((9957, 9977), 'numpy.mean', 'np.mean', (['temp_val_F4'], {}), '(temp_val_F4)\n', (9964, 9977), True, 'import numpy as np\n')] |
from __future__ import annotations
import numpy as np
EPSILON = 10e-17
def vector_from_points(u: np.array, v: np.array) -> np.array:
"""
Return the vector 'uv', from points 'u' to 'v'.
Args:
u (np.array): Point 'u'.
v (np.array): Point 'v'.
Returns:
np.array: vector
"""
return v - u
# 2D plane
class Plane:
"""
2D Plane class.
"""
def __init__(self, a: float, b: float, c: float, d: float):
"""ax + by + cz + d = 0"""
self.a, self.b, self.c, self.d = a, b, c, d
def point_on_plane(self, point: np.array) -> bool:
"""
Is the given point on this plane ?
If the distance to th eplane is less than EPSILON, the point is
considered to be on the plane.
Args:
point (np.array): Point to check.
Returns:
bool: The point is one the plane.
"""
return self.normal_dist(point) < EPSILON
def normal_dist(self, point: np.array) -> float:
"""
Return the normal distance of the 'point' to the plane.
Args:
point (np.array): Point to check.
Returns:
float: Normal distance to the plane.
"""
return abs(self.a * point[0] + self.b * point[1] + self.c * point[2] + self.d)
def dist_with_point(self, point: np.array) -> float:
"""
Return the distance of the 'point' to the plane.
Args:
point (np.array): Point to check.
Returns:
float: Distance to the plane.
"""
return self.normal_dist(point) / self.base()
def base(self):
"""
Return the base of the plane.
Returns:
float: The base of the plane.
"""
return np.sqrt(self.a ** 2 + self.b ** 2 + self.c ** 2)
@staticmethod
def from_vectors(u: np.array, v: np.array, point: np.array = (0, 0, 0)) -> Plane:
"""
Returns the Plane defined with the two given vectors, starting from the 'point'.
Args:
u (np.array): Vector u.
v (np.array): Vector v.
point (np.array): Starting point.
Returns:
Plane: Plane defined by the triplet (point, u(vect), v(vect)).
"""
normal = np.cross(u, v)
a, b, c = normal
d = -(a * point[0] + b * point[1] + c * point[2])
return Plane(a, b, c, d)
@staticmethod
def from_points(P: np.array, Q: np.array, R: np.array) -> Plane:
"""
Returns the Plane defined by those 3 points.
Returns the Plane defined by (P, PQ(vect), PR(vect))
Args:
P (np.array): First point.
Q (np.array): Second point.
R (np.array): Third point.
Returns:
Plane: Plane defined by those 3 points.
"""
PQ = vector.vector(P, Q)
PR = vector.vector(P, R)
return Plane.from_vectors(PQ, PR, P)
| [
"numpy.cross",
"numpy.sqrt"
] | [((1799, 1847), 'numpy.sqrt', 'np.sqrt', (['(self.a ** 2 + self.b ** 2 + self.c ** 2)'], {}), '(self.a ** 2 + self.b ** 2 + self.c ** 2)\n', (1806, 1847), True, 'import numpy as np\n'), ((2319, 2333), 'numpy.cross', 'np.cross', (['u', 'v'], {}), '(u, v)\n', (2327, 2333), True, 'import numpy as np\n')] |
import pylab as pl
import matplotlib
from matplotlib.collections import PatchCollection
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.text as text
import matplotlib.font_manager as fm
from matplotlib.patches import Ellipse, Circle
import matplotlib.cm as cm
from matplotlib import gridspec
from scipy.interpolate import splev, splrep
import seaborn as sns
import numpy as np
import pandas as pd
from box import Box as ddict
pl.style.use('seaborn-white')
def drawIVIVE(LPR,POD_nmtx,POD_effect,POD_hcift,
fig_text=ddict(a=ddict(title=r'(a) $POD_{nam}$ vs $POD_{tox}$',
xlab='Dose [mg/kg/day]'),
b=ddict(title=r'(b) Hepatic LOAELs',
xlab='Hepatic effects'),
c=ddict(title=r'(c) HCI AEDs',
xlab='HCI Endpoints')
),
Conci=ddict(heprn_max_conc=ddict(color='mediumblue',
marker='<',
dy=0,z=25,size=30,lw=0.8,
label='$Max_{HepRn}$')
),
Podi=ddict(heprn_ac50=ddict(color='mediumblue',
marker='D',
dy=0,z=30,size=40,lw=0.8,
label='$POD_{HepRn}$'),
heprnatg_ac50=ddict(color='blueviolet',
marker='d',
dy=0.2,z=8,size=30,lw=0.5,
label='$POD_{HepRnAtg}$'),
tx_ac50=ddict(color='forestgreen',
marker='8',
dy=-0.2,z=5,size=30,lw=0.4,
label='$POD_{ToxCast}$'),
loael=ddict(color='crimson',
marker='s',
dy=0,z=15,size=50,lw=0.5,
label='$POD_{Tox}$'),
),
lpr_info=ddict(LPR_heprn_ac50=ddict(color='mediumblue',
label='$LPR_{HepRn}$'),
LPR_heprnatg_ac50=ddict(color='blueviolet',
label='$LPR_{HepRnAtg}$'),
LPR_tx_ac50=ddict(color='forestgreen',
label='$LPR_{ToxCast}$')),
cmap=ddict(tox=cm.hot,hts=cm.Blues_r),
fig_sz=(17,15),fig_file=None,fig_dpi=600,
pod_range=ddict(xmin=1e-2,xmax=1e4),
fill_hm=False,
bb_legend=[0.75,0.1,0.2,0.1]
):
"""
Three panels with
a) IVIVE: ranges for POD_nam and POD_tox for chemicals
b) HTS: AED for potency values for significant effects
c) TOX: Effect types with Dose values
2x5 grid with
1,0: Chemical names
1,1: IVIVE
0,1: POD_nam:kde, POD_tox:kde
0,2: LPR: kde
1,2: LPR
0,3: Tox:hist
1,3: Tox
0,4: HTS:hist
1,4: HTS
"""
fig=pl.figure(figsize=fig_sz)
GS = gridspec.GridSpec(2,6,
width_ratios=[0.2,0.6,0,0.5,0.2,0.1],
height_ratios=[0.1,1])
# Panel(a)
drawHistPOD(POD_nmtx,pl.subplot(GS[0,1]),pod_info=Podi,pod_range=pod_range,
title=fig_text.a.title)
# drawBoxPOD(POD_nmtx,pl.subplot(GS[0,1]),pod_info=Podi,pod_range=pod_range,
# title=fig_text.a.title)
drawChems(LPR,pl.subplot(GS[1,0]))
drawPOD(POD_nmtx,pl.subplot(GS[1,1]),pod_info=Podi,
conc_info=Conci,pod_range=pod_range,
lg_bb=fig_text.a.lg_bb,
xlab=fig_text.a.xlab)
# Panel (b)
# drawHistLPR(LPR,ax=pl.subplot(GS[0,2]),
# lpr_info=lpr_info,
# title=fig_text.b.title)
# drawLPR(LPR,ax=pl.subplot(GS[1,2]),lpr_info=lpr_info,cmap=cmap.lpr,
# xlab=fig_text.b.xlab)
# Panel (c)
drawBar(POD_effect,pl.subplot(GS[0,3]),cmap=cmap.tox,
title=fig_text.c.title)
hm_tox=drawHM(POD_effect,pl.subplot(GS[1,3]),cmap=cmap.tox,
xlab=fig_text.c.xlab,
pod_range=pod_range,
fill=fill_hm)
# Panel (d)
drawBar(POD_hcift,pl.subplot(GS[0,4]),cmap=cmap.hts,
title=fig_text.d.title)
hm_hts=drawHM(POD_hcift,pl.subplot(GS[1,4]),cmap=cmap.hts,
xlab=fig_text.d.xlab,
pod_range=pod_range,
fill=fill_hm)
# Add the legend for heatmaps
ax = pl.subplot(GS[1,5])
ax.set_axis_off()
# HCI
ax1 = fig.add_axes(bb_legend)
ax1.set_axis_off()
cb = pl.colorbar(hm_hts,ax=ax1)
cb.ax.set_xlabel('Dose [mg/kg/day]')
#cb.ax.set_yticklabels([0.1,1,10,100,1000,10000])
pl.subplots_adjust(wspace=0.05,hspace=0.03,top=0.8,bottom=0.1)
if fig_file:
fig.savefig(fig_file,bbox_inches='tight',dpi=fig_dpi)
def drawChems(X0,ax,x0=20):
X=X0.reset_index()
for i in range(X.shape[0]):
ax.text(x0,i+0.5,X.name[i],ha='right',va='center',color='black',
fontproperties=fm.FontProperties(size=11))
ax.set_ylim(0,X.shape[0])
ax.set_xlim(0,x0)
ax.set_axis_off()
def drawPOD(X,ax,pod_info=None,conc_info=None,
pod_range=None,xlab=r"Dose [mg/kg/day]",title=None,lg_bb=None):
yoff=0.5
VL = np.logspace(np.log10(pod_range.xmin),np.log10(pod_range.xmax),num=pod_range.num)
#XT = ['0.001','0.01','0.1','1','10','100','1000']
#XT = 10**VL
ax.hlines(np.arange(X.shape[0])+0.5,
pod_range.xmin,pod_range.xmax,
colors='grey',lw=0.5)
ax.vlines(VL,-1,X.shape[0],colors='grey',lw=0.5)
for k,info in pod_info.items():
ax.scatter(X[k],np.arange(X.shape[0])+yoff+info.dy,
s=info.size,c=info.color,marker=info.marker,
label=info.label,lw=0,
zorder=info.z,alpha=1)
# ax.hlines(np.arange(X.shape[0])+yoff+info.dy,
# X[p05c],X[p95c],colors=info.color,
# zorder=info.z,
# lw=info.lw,alpha=0.6)
for k,info in conc_info.items():
ax.scatter(X[k],np.arange(X.shape[0])+yoff+info.dy,
s=info.size,c=info.color,marker=info.marker,
label=info.label,lw=0,
zorder=info.z,alpha=1)
ax.set_xscale('log')
ax.set_ylim(0,X.shape[0])
ax.set_xlim(pod_range.xmin,pod_range.xmax)
ax.set_xlabel(xlab)
if title: ax.set_title(title)
#ax.set_xticklabels(VL,rotation=90)
for tick in ax.get_yticklabels():
tick.set_visible(False)
ax.xaxis.tick_bottom()
leg = ax.legend(loc=3,fontsize='medium',fancybox=False,
bbox_to_anchor=lg_bb,
framealpha=2,facecolor='grey')
#leg.get_frame().set_facecolor('white')
def drawLPR(X,ax,lpr_info=None,cmap=None,title=None,xlab=None,fn_sz=10):
K1 = list(lpr_info.keys())
X1 = X[K1].reset_index(drop=True)
#X1.index=[range(1,X1.shape[0]+1)]
C1 = [lpr_info[k].color for k in K1]
#X1.plot.barh(color=C1,ax=ax,legend=False,width=1.0,lw=0,alpha=0.7)
X1.plot.barh(color=C1,ax=ax,legend=False,width=0.9,lw=0,alpha=0.9,grid=True)
#ax.hlines(np.arange(X.shape[0])+0.5,X1.min().min(),X1.max().max(),colors='grey',lw=0.5)
#ax.vlines([-1,0,1,2,3],0,len(X),colors='grey',lw=0.5)
ax.set_xticks([-1,0,1,2,3])
ax.set_xticklabels([-1,0,1,2,3],rotation=90)
for tick in ax.get_xticklabels():
tick.set_visible(True)
tick.set_fontsize(10)
for tick in ax.get_yticklines():
tick.set_visible(False)
for tick in ax.get_yticklabels():
tick.set_visible(False)
if xlab: ax.set_xlabel(xlab)
if title: ax.set_title(title,fontsize=16)
ax.set_ylim(-0.5,X.shape[0]-0.5)
#ax.set_xlim(xmin,-xmin)
def drawHM(X,ax,cmap=None,xlab='Hepatic effects',fn_sz=10,fill=False,
pod_range=None,title=None):
ax.xaxis.tick_bottom()
ax.set_ylim(0,X.shape[0])
Nrm = matplotlib.colors.LogNorm(vmin=pod_range.xmin,vmax=pod_range.xmax)
myCol = cm.ScalarMappable(Nrm,cmap=cmap)
if fill: X = X.fillna(fill)
hm=ax.pcolor(X,norm=Nrm,cmap=cmap,lw=1,edgecolors='#bcbcbc')
#ax.set_axis_off()
ax.set_xticks(np.arange(X.shape[1])+0.5, minor=False)
xl=ax.set_xticklabels(X.columns,rotation=90)
for tick in ax.get_xticklines():
tick.set_visible(False)
for tick in ax.get_xticklabels():
tick.set_fontsize(fn_sz)
for tick in ax.get_yticklines():
tick.set_visible(False)
for tick in ax.get_yticklabels():
tick.set_visible(False)
ax.set_xlabel(xlab)
if title: ax.set_title(title)
return hm
def drawBar(X,ax,cmap=None,title=None):
N = pd.DataFrame(X.notnull().sum()).reset_index()
N.columns=['x','y']
col=cmap(128)
sns.barplot(x='x',y='y',data=N,ax=ax,color=col)
for tick in ax.get_xticklines():
tick.set_visible(False)
for tick in ax.get_xticklabels():
tick.set_visible(False)
ax.set_axis_off()
if title: ax.set_title(title,fontsize=16)
# ax.vlines(XT,0,len(N)+0.5,
# linewidth=0.3,linestyle='-',color='#676767')
# ax.hlines(np.arange(0,len(N)),0,15,
# linewidth=0.3,linestyle='-',color='#676767')
# ax.set_xticks(XT)
# ax.set_xlabel('# chemicals')
# ax.set_ylabel('')
def drawHistPOD(X,ax,pod_info=None,pod_range=None,title=None):
Bins = np.logspace(-3,4,num=50)
D=[]
C=[]
for k,info in pod_info.items():
Xi = X[k]
D.append(Xi[Xi.notnull()])
C.append(info.color)
ax.hist(D,bins=Bins,histtype='bar',stacked=True,
color=C,alpha=0.9,rwidth=0.9)
ax.set_xscale('log')
ax.set_xlim(pod_range.xmin,pod_range.xmax)
ax.set_axis_off()
if title: ax.set_title(title,fontsize=16)
def drawBarPOD(X,ax,pod_info=None,pod_range=None,title=None):
Bins = np.logspace(-3,4,num=50)
X.plot.box()
ax.hist(D,bins=Bins,histtype='bar',stacked=True,
color=C,alpha=0.9,rwidth=0.9)
ax.set_xscale('log')
ax.set_xlim(pod_range.xmin,pod_range.xmax)
ax.set_axis_off()
if title: ax.set_title(title,fontsize=16)
def drawHistLPR(X,ax,lpr_info=None,title=None):
K1 = list(lpr_info.keys())
X1 = X[K1]
xmin,xmax=X1.min().min(),X1.max().max()
Bins = np.linspace(xmin,xmax,num=10)
D=[]
C=[]
for k,info in lpr_info.items():
Xi = X[k]
D.append(Xi[Xi.notnull()])
C.append(info.color)
ax.hist(D,bins=Bins,histtype='bar',stacked=True,
color=C,alpha=0.9,rwidth=0.9)
ax.set_xlim(xmin,xmax)
ax.set_axis_off()
if title: ax.set_title(title,fontsize=16)
def drawHistLPR1(X,ax,bins=10,title=None,cmap=None):
ax.hist(X[X.notnull()],bins=bins,histtype='bar',
color=cmap(20),alpha=0.9,rwidth=0.9)
#ax.set_xlim(pod_range.xmin,pod_range.xmax)
ax.set_axis_off()
if title: ax.set_title(title,fontsize=16) | [
"box.Box",
"pylab.subplots_adjust",
"matplotlib.font_manager.FontProperties",
"matplotlib.cm.ScalarMappable",
"numpy.logspace",
"pylab.style.use",
"seaborn.barplot",
"numpy.log10",
"pylab.subplot",
"pylab.colorbar",
"pylab.figure",
"matplotlib.colors.LogNorm",
"numpy.linspace",
"numpy.aran... | [((502, 531), 'pylab.style.use', 'pl.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (514, 531), True, 'import pylab as pl\n'), ((2609, 2642), 'box.Box', 'ddict', ([], {'tox': 'cm.hot', 'hts': 'cm.Blues_r'}), '(tox=cm.hot, hts=cm.Blues_r)\n', (2614, 2642), True, 'from box import Box as ddict\n'), ((2723, 2753), 'box.Box', 'ddict', ([], {'xmin': '(0.01)', 'xmax': '(10000.0)'}), '(xmin=0.01, xmax=10000.0)\n', (2728, 2753), True, 'from box import Box as ddict\n'), ((3235, 3260), 'pylab.figure', 'pl.figure', ([], {'figsize': 'fig_sz'}), '(figsize=fig_sz)\n', (3244, 3260), True, 'import pylab as pl\n'), ((3270, 3364), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(6)'], {'width_ratios': '[0.2, 0.6, 0, 0.5, 0.2, 0.1]', 'height_ratios': '[0.1, 1]'}), '(2, 6, width_ratios=[0.2, 0.6, 0, 0.5, 0.2, 0.1],\n height_ratios=[0.1, 1])\n', (3287, 3364), False, 'from matplotlib import gridspec\n'), ((4783, 4803), 'pylab.subplot', 'pl.subplot', (['GS[1, 5]'], {}), '(GS[1, 5])\n', (4793, 4803), True, 'import pylab as pl\n'), ((4906, 4933), 'pylab.colorbar', 'pl.colorbar', (['hm_hts'], {'ax': 'ax1'}), '(hm_hts, ax=ax1)\n', (4917, 4933), True, 'import pylab as pl\n'), ((5037, 5102), 'pylab.subplots_adjust', 'pl.subplots_adjust', ([], {'wspace': '(0.05)', 'hspace': '(0.03)', 'top': '(0.8)', 'bottom': '(0.1)'}), '(wspace=0.05, hspace=0.03, top=0.8, bottom=0.1)\n', (5055, 5102), True, 'import pylab as pl\n'), ((8369, 8436), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {'vmin': 'pod_range.xmin', 'vmax': 'pod_range.xmax'}), '(vmin=pod_range.xmin, vmax=pod_range.xmax)\n', (8394, 8436), False, 'import matplotlib\n'), ((8448, 8481), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', (['Nrm'], {'cmap': 'cmap'}), '(Nrm, cmap=cmap)\n', (8465, 8481), True, 'import matplotlib.cm as cm\n'), ((9228, 9279), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""x"""', 'y': '"""y"""', 'data': 'N', 'ax': 'ax', 'color': 'col'}), "(x='x', y='y', data=N, ax=ax, color=col)\n", (9239, 9279), True, 'import seaborn as sns\n'), ((9848, 9874), 'numpy.logspace', 'np.logspace', (['(-3)', '(4)'], {'num': '(50)'}), '(-3, 4, num=50)\n', (9859, 9874), True, 'import numpy as np\n'), ((10339, 10365), 'numpy.logspace', 'np.logspace', (['(-3)', '(4)'], {'num': '(50)'}), '(-3, 4, num=50)\n', (10350, 10365), True, 'import numpy as np\n'), ((10794, 10825), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax'], {'num': '(10)'}), '(xmin, xmax, num=10)\n', (10805, 10825), True, 'import numpy as np\n'), ((3461, 3481), 'pylab.subplot', 'pl.subplot', (['GS[0, 1]'], {}), '(GS[0, 1])\n', (3471, 3481), True, 'import pylab as pl\n'), ((3695, 3715), 'pylab.subplot', 'pl.subplot', (['GS[1, 0]'], {}), '(GS[1, 0])\n', (3705, 3715), True, 'import pylab as pl\n'), ((3741, 3761), 'pylab.subplot', 'pl.subplot', (['GS[1, 1]'], {}), '(GS[1, 1])\n', (3751, 3761), True, 'import pylab as pl\n'), ((4200, 4220), 'pylab.subplot', 'pl.subplot', (['GS[0, 3]'], {}), '(GS[0, 3])\n', (4210, 4220), True, 'import pylab as pl\n'), ((4300, 4320), 'pylab.subplot', 'pl.subplot', (['GS[1, 3]'], {}), '(GS[1, 3])\n', (4310, 4320), True, 'import pylab as pl\n'), ((4485, 4505), 'pylab.subplot', 'pl.subplot', (['GS[0, 4]'], {}), '(GS[0, 4])\n', (4495, 4505), True, 'import pylab as pl\n'), ((4589, 4609), 'pylab.subplot', 'pl.subplot', (['GS[1, 4]'], {}), '(GS[1, 4])\n', (4599, 4609), True, 'import pylab as pl\n'), ((5637, 5661), 'numpy.log10', 'np.log10', (['pod_range.xmin'], {}), '(pod_range.xmin)\n', (5645, 5661), True, 'import numpy as np\n'), ((5662, 5686), 'numpy.log10', 'np.log10', (['pod_range.xmax'], {}), '(pod_range.xmax)\n', (5670, 5686), True, 'import numpy as np\n'), ((618, 688), 'box.Box', 'ddict', ([], {'title': '"""(a) $POD_{nam}$ vs $POD_{tox}$"""', 'xlab': '"""Dose [mg/kg/day]"""'}), "(title='(a) $POD_{nam}$ vs $POD_{tox}$', xlab='Dose [mg/kg/day]')\n", (623, 688), True, 'from box import Box as ddict\n'), ((759, 816), 'box.Box', 'ddict', ([], {'title': '"""(b) Hepatic LOAELs"""', 'xlab': '"""Hepatic effects"""'}), "(title='(b) Hepatic LOAELs', xlab='Hepatic effects')\n", (764, 816), True, 'from box import Box as ddict\n'), ((887, 936), 'box.Box', 'ddict', ([], {'title': '"""(c) HCI AEDs"""', 'xlab': '"""HCI Endpoints"""'}), "(title='(c) HCI AEDs', xlab='HCI Endpoints')\n", (892, 936), True, 'from box import Box as ddict\n'), ((1047, 1141), 'box.Box', 'ddict', ([], {'color': '"""mediumblue"""', 'marker': '"""<"""', 'dy': '(0)', 'z': '(25)', 'size': '(30)', 'lw': '(0.8)', 'label': '"""$Max_{HepRn}$"""'}), "(color='mediumblue', marker='<', dy=0, z=25, size=30, lw=0.8, label=\n '$Max_{HepRn}$')\n", (1052, 1141), True, 'from box import Box as ddict\n'), ((1308, 1402), 'box.Box', 'ddict', ([], {'color': '"""mediumblue"""', 'marker': '"""D"""', 'dy': '(0)', 'z': '(30)', 'size': '(40)', 'lw': '(0.8)', 'label': '"""$POD_{HepRn}$"""'}), "(color='mediumblue', marker='D', dy=0, z=30, size=40, lw=0.8, label=\n '$POD_{HepRn}$')\n", (1313, 1402), True, 'from box import Box as ddict\n'), ((1546, 1644), 'box.Box', 'ddict', ([], {'color': '"""blueviolet"""', 'marker': '"""d"""', 'dy': '(0.2)', 'z': '(8)', 'size': '(30)', 'lw': '(0.5)', 'label': '"""$POD_{HepRnAtg}$"""'}), "(color='blueviolet', marker='d', dy=0.2, z=8, size=30, lw=0.5, label=\n '$POD_{HepRnAtg}$')\n", (1551, 1644), True, 'from box import Box as ddict\n'), ((1787, 1886), 'box.Box', 'ddict', ([], {'color': '"""forestgreen"""', 'marker': '"""8"""', 'dy': '(-0.2)', 'z': '(5)', 'size': '(30)', 'lw': '(0.4)', 'label': '"""$POD_{ToxCast}$"""'}), "(color='forestgreen', marker='8', dy=-0.2, z=5, size=30, lw=0.4, label\n ='$POD_{ToxCast}$')\n", (1792, 1886), True, 'from box import Box as ddict\n'), ((2009, 2098), 'box.Box', 'ddict', ([], {'color': '"""crimson"""', 'marker': '"""s"""', 'dy': '(0)', 'z': '(15)', 'size': '(50)', 'lw': '(0.5)', 'label': '"""$POD_{Tox}$"""'}), "(color='crimson', marker='s', dy=0, z=15, size=50, lw=0.5, label=\n '$POD_{Tox}$')\n", (2014, 2098), True, 'from box import Box as ddict\n'), ((2262, 2310), 'box.Box', 'ddict', ([], {'color': '"""mediumblue"""', 'label': '"""$LPR_{HepRn}$"""'}), "(color='mediumblue', label='$LPR_{HepRn}$')\n", (2267, 2310), True, 'from box import Box as ddict\n'), ((2387, 2438), 'box.Box', 'ddict', ([], {'color': '"""blueviolet"""', 'label': '"""$LPR_{HepRnAtg}$"""'}), "(color='blueviolet', label='$LPR_{HepRnAtg}$')\n", (2392, 2438), True, 'from box import Box as ddict\n'), ((2507, 2558), 'box.Box', 'ddict', ([], {'color': '"""forestgreen"""', 'label': '"""$LPR_{ToxCast}$"""'}), "(color='forestgreen', label='$LPR_{ToxCast}$')\n", (2512, 2558), True, 'from box import Box as ddict\n'), ((5792, 5813), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (5801, 5813), True, 'import numpy as np\n'), ((8624, 8645), 'numpy.arange', 'np.arange', (['X.shape[1]'], {}), '(X.shape[1])\n', (8633, 8645), True, 'import numpy as np\n'), ((5372, 5398), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'size': '(11)'}), '(size=11)\n', (5389, 5398), True, 'import matplotlib.font_manager as fm\n'), ((6018, 6039), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (6027, 6039), True, 'import numpy as np\n'), ((6460, 6481), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (6469, 6481), True, 'import numpy as np\n')] |
# !/usr/bin/python3
# https://github.com/SintefManufacturing/python-urx/blob/master/urx/urrobot.py
# Must be run with library from the repo
# 10 July 2019 -- this works even while inputting commands from pendant
from tcpUR.pyUR import PyUR
import logging
import time
import numpy as np
import sys
import os
import signal
def keyboardInterruptHandler(signal, frame):
print("KeyboardInterrupt (ID: {}) has been caught. Cleaning up...".format(signal))
robot.close()
# exit(0)
print('exiting')
sys.exit()
print('exiting again')
os._exit(1)
signal.signal(signal.SIGINT, keyboardInterruptHandler)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
np.set_printoptions(precision=4)
try:
robot = PyUR(send_ur5_progs=True)
while True:
pose = robot.get_state('cartesian_info')
print("robot tcp is at: ", np.array(pose), "\n")
width = robot.get_state('gripper_width')
print("robot finger width", width)
# width = rob.secmon.get_cartesian_info()
# print(rob.secmon.get_all_data()["ToolData"]["analogInput2"])
# print(rob.secmon.get_all_data()["ToolData"]["analogInput2"])
# print(rob.secmon.get_all_data()["CartesianInfo"])
# width = rob.secmon.get_tool_analog_in(2)
time.sleep(0.05)
except Exception as e:
print("Oopsie. Except", e)
# print('caught exception')
# print('closing robot')
print('exiting')
sys.exit()
print('exiting again')
os._exit(1)
| [
"numpy.set_printoptions",
"logging.basicConfig",
"time.sleep",
"os._exit",
"tcpUR.pyUR.PyUR",
"numpy.array",
"signal.signal",
"sys.exit"
] | [((570, 624), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'keyboardInterruptHandler'], {}), '(signal.SIGINT, keyboardInterruptHandler)\n', (583, 624), False, 'import signal\n'), ((514, 524), 'sys.exit', 'sys.exit', ([], {}), '()\n', (522, 524), False, 'import sys\n'), ((556, 567), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (564, 567), False, 'import os\n'), ((657, 696), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (676, 696), False, 'import logging\n'), ((701, 733), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (720, 733), True, 'import numpy as np\n'), ((760, 785), 'tcpUR.pyUR.PyUR', 'PyUR', ([], {'send_ur5_progs': '(True)'}), '(send_ur5_progs=True)\n', (764, 785), False, 'from tcpUR.pyUR import PyUR\n'), ((1355, 1371), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1365, 1371), False, 'import time\n'), ((1536, 1546), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1544, 1546), False, 'import sys\n'), ((1586, 1597), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (1594, 1597), False, 'import os\n'), ((898, 912), 'numpy.array', 'np.array', (['pose'], {}), '(pose)\n', (906, 912), True, 'import numpy as np\n')] |
"dataset: defines some representations for data source from Zarr & Cytomine."
import abc
import pathlib
from typing import Dict, Iterator, List, Optional, Set, Tuple, Union
import numpy as np
import numpy.typing as npt
from sklearn.model_selection import train_test_split
from cytomine.models import (
AnnotationCollection,
SliceInstanceCollection,
TermCollection,
)
from msi_zarr_analysis.cli.utils import load_img_mask
from msi_zarr_analysis.ml.dataset.utils import (
bin_array_dataset,
build_class_masks,
nonbinned_array_dataset,
)
from msi_zarr_analysis.utils.check import open_group_ro
from msi_zarr_analysis.utils.cytomine_utils import iter_annoation_single_term
from msi_zarr_analysis.utils.iter_chunks import clean_slice_tuple
class Dataset(abc.ABC):
@abc.abstractmethod
def iter_rows(self) -> Iterator[Tuple[npt.NDArray, npt.NDArray]]:
"""Iterate all rows of attribute and class from the dataset
Yields
------
Iterator[Tuple[npt.NDArray, npt.NDArray]]
pair of (attributes, class)
"""
...
@abc.abstractmethod
def is_table_like(self) -> bool:
"If true, the dataset has a fixed number of attribute for all rows."
...
@abc.abstractmethod
def as_table(self) -> Tuple[npt.NDArray, npt.NDArray]:
"""Whole dataset as a pair of table of rows.
May raise an error for datasets where the number of attribute is not
constant. See Dataset.is_table_like .
Returns
-------
Tuple[npt.NDArray, npt.NDArray]
pair of (table of attributes, table of classes)
"""
...
def as_train_test_tables(
self, **kwargs
) -> Tuple[npt.NDArray, npt.NDArray, npt.NDArray, npt.NDArray]:
"""Whole dataset, splitted as train et test pairs.
May raise an error for datasets where the number of attribute is not
constant. See Dataset.is_table_like .
Returns
-------
Tuple[npt.NDArray, npt.NDArray, npt.NDArray, npt.NDArray]
X_train, X_test, y_train, y_test
"""
return train_test_split(*self.as_table(), **kwargs)
@abc.abstractmethod
def attribute_names(self) -> List[str]:
"""List of names matching the attributes.
May raise an error for datasets where the number of attribute is not
constant. See Dataset.is_table_like .
Returns
-------
List[str]
a list of name, matching the attributes in order
"""
...
@abc.abstractmethod
def class_names(self) -> List[str]:
"""List of names matching the classes.
May return an empty list if no names are found.
Returns:
List[str]: list of classes, matching the class index in the targets
"""
def __raw_check_dataset(self) -> Tuple[np.ndarray, np.ndarray]:
ds_x, ds_y = self.as_table()
n_features = ds_x.shape[1]
# check X : look for correlations between the samples
corr = np.zeros((n_features, n_features))
# for i in range(1, n_features):
# for j in range(i + 1, n_features):
# corr[i, j] = np.corrcoef(ds_x[:, i], ds_x[:, j])[0, 1]
corr[:] = np.nan
# ensure symmetric matrix with unitary diagonal
corr = (corr + corr.T) / 2
np.fill_diagonal(corr, 1.0)
# check Y : look for class imbalance
_, occurrences = np.unique(ds_y, return_counts=True)
return corr, occurrences
def check_dataset(
self,
cache: bool = False,
print_: bool = False,
print_once: bool = False,
) -> Tuple[np.ndarray, float]:
"""identify danger of feature correlations and class imbalance in the
tabular dataset.
May raise an error for datasets where the number of attribute is not
constant. See Dataset.is_table_like .
Args:
cache (bool, optional): cache the computation. Defaults to False.
print_ (bool, optional): print results to stdout. Defaults to False.
print_once (bool, optional): subsequent calls to this function don't print more than once. Defaults to False.
Returns:
Tuple[np.ndarray, float]: the correlation matrix and the highest relative occurrence
"""
cache_attr_name = "__cached_check_dataset"
if cache and hasattr(self, cache_attr_name):
corr, occurrences = getattr(self, cache_attr_name)
else:
corr, occurrences = self.__raw_check_dataset()
if cache:
setattr(self, cache_attr_name, (corr, occurrences))
single_print_attr_name = "__cached_single_print"
if print_ and not (print_once and hasattr(self, single_print_attr_name)):
setattr(self, single_print_attr_name, True)
# print("checking inter-feature correlation:")
# for i in range(corr.shape[0]):
# for j in range(i + 1, corr.shape[1]):
# if np.abs(corr[i, j]) > 0.8:
# print(f"\t{i=} {j=} {corr[i, j]=}")
print("checking for class imbalance:")
n_classes = occurrences.size
n_items = np.sum(occurrences)
# number of items in each class
# print(f"{occurrences=}")
occurrence_per_class = dict(zip(occurrences, self.class_names()))
print(f"{occurrence_per_class=}")
# max and min relative occurrence
print(f"{np.max(occurrences / n_items) = :.4f}")
print(f"{np.min(occurrences / n_items) = :.4f}")
# ideal occurrence
print(f". . . . . . . . {1 / n_classes = :.4f}")
# largest relative occurrence
imbalance = np.max(occurrences) / np.sum(occurrences)
return corr, imbalance
class Tabular(Dataset):
def __init__(
self,
dataset_x: np.ndarray,
dataset_y: np.ndarray,
attributes_names: List[str],
classes_names: List[str],
) -> None:
super().__init__()
self.dataset_x = dataset_x
self.dataset_y = dataset_y
self.attribute_names_ = attributes_names
self.classes_names_ = classes_names
def iter_rows(self) -> Iterator[Tuple[npt.NDArray, npt.NDArray]]:
yield from zip(self.dataset_x, self.dataset_y)
def is_table_like(self) -> bool:
return True
def as_table(self) -> Tuple[npt.NDArray, npt.NDArray]:
return self.dataset_x, self.dataset_y
def attribute_names(self) -> List[str]:
if not self.attribute_names_:
raise ValueError("no attribute name found")
return self.attribute_names_
def class_names(self) -> List[str]:
if not self.classes_names_:
raise ValueError("no class name found")
return self.classes_names_
class ZarrAbstractDataset(Dataset):
def __init__(
self,
data_zarr_path: str,
classes: Union[
Dict[str, npt.NDArray[np.dtype("bool")]],
Dict[str, str],
List[Union[str, pathlib.Path]],
],
roi_mask: Union[str, None, npt.NDArray[np.dtype("bool")]] = None,
background_class: bool = False,
y_slice: slice = slice(None),
x_slice: slice = slice(None),
) -> None:
super().__init__()
if not classes:
raise ValueError("empty class set")
if isinstance(classes, (list, tuple)):
for idx, cls in enumerate(classes):
if not isinstance(cls, (str, pathlib.Path)):
raise ValueError(
f"classes[{idx}] path has invalid type {type(cls)}"
)
classes = {pathlib.Path(cls).stem: load_img_mask(cls) for cls in classes}
elif isinstance(classes, dict):
classes = classes.copy()
for key, value in classes.items():
if isinstance(value, (pathlib.Path, str)):
classes[key] = load_img_mask(value)
elif isinstance(value, np.ndarray):
continue
else:
raise ValueError(f"classes[{key}] has invalid type {type(value)}")
else:
raise ValueError(f"classes has invalid type {type(classes)}")
if roi_mask:
if not isinstance(roi_mask, (str, pathlib.Path, np.ndarray)):
raise ValueError(f"roi_mask has invalid type {type(roi_mask)}")
self.z = open_group_ro(data_zarr_path)
if isinstance(roi_mask, (str, pathlib.Path)):
roi_mask = load_img_mask(roi_mask)
self.cls_mask, _, self.class_names_ = build_class_masks(
self.z, classes, roi_mask, background_class
)
self.y_slice, self.x_slice = clean_slice_tuple(
self.z["/0"].shape[2:], y_slice, x_slice
)
self._cached_ds = None
def class_names(self) -> List[str]:
return self.class_names_
class ZarrContinuousNonBinned(ZarrAbstractDataset):
"""Read an OME-Zarr MSI data from a path. The channels (or masses, m/Z) will
be used as the attributes' keys while the intensities will be used as the
attributes' values. No additional preprocessing step is applied to the data
before interpretation.
An ROI may be supplied : only coordinates (x, y) present in the ROI will be
studied.
Classes segmentation masks may be submitted via numpy mask arrays (or path
to such arrays encoded as greyscale image format).
Analysis space may be restraining to a rectangle using slices for the Y and
X axes.
"""
def __init__(
self,
data_zarr_path: str,
classes: Union[
Dict[str, npt.NDArray[np.dtype("bool")]],
Dict[str, str],
List[Union[str, pathlib.Path]],
],
roi_mask: Union[str, None, npt.NDArray[np.dtype("bool")]] = None,
background_class: bool = False,
y_slice: slice = slice(None),
x_slice: slice = slice(None),
) -> None:
super().__init__(
data_zarr_path, classes, roi_mask, background_class, y_slice, x_slice
)
binary_mode = self.z.attrs["pims-msi"]["binary_mode"]
if binary_mode != "continuous":
raise ValueError(f"invalid {binary_mode=}: expected 'continuous'")
def iter_rows(self) -> Iterator[Tuple[npt.NDArray, npt.NDArray]]:
dataset_x, dataset_y = self.as_table()
for row_x, row_y in zip(dataset_x, dataset_y):
yield row_x, row_y
def is_table_like(self) -> bool:
return True
def __load_ds(self) -> Tuple[npt.NDArray, npt.NDArray]:
"heavy lifting: call to utils"
return nonbinned_array_dataset(
self.z, self.cls_mask, self.y_slice, self.x_slice
)
def as_table(self) -> Tuple[npt.NDArray, npt.NDArray]:
if not self._cached_ds:
self._cached_ds = self.__load_ds()
return self._cached_ds
def get_dataset_x(self) -> Tuple[npt.NDArray]:
return self.as_table()[0]
def get_dataset_y(self) -> Tuple[npt.NDArray]:
return self.as_table()[1]
def attribute_names(self) -> List[str]:
return [str(v) for v in self.z["/labels/mzs/0"][:, 0, 0, 0]]
class ZarrProcessedBinned(ZarrAbstractDataset):
"""Read an OME-Zarr MSI data from a path. The bins for the channel (or
masses, m/Z) will be used as the attributes' keys (formally, mean of bin ±
half width of bin) while the sum of the intensities from that bin will be
used as the attributes' values. No additional preprocessing step is applied
to the data before interpretation.
An ROI may be supplied : only coordinates (x, y) present in the ROI will be
studied.
Classes segmentation masks may be submitted via numpy mask arrays (or path
to such arrays encoded as greyscale image format).
Analysis space may be restraining to a rectangle using slices for the Y and
X axes.
"""
def __init__(
self,
data_zarr_path: str,
classes: Union[
Dict[str, npt.NDArray[np.dtype("bool")]],
Dict[str, str],
List[Union[str, pathlib.Path]],
],
bin_lo: npt.NDArray,
bin_hi: npt.NDArray,
roi_mask: Union[str, None, npt.NDArray[np.dtype("bool")]] = None,
background_class: bool = False,
y_slice: slice = slice(None),
x_slice: slice = slice(None),
) -> None:
super().__init__(
data_zarr_path, classes, roi_mask, background_class, y_slice, x_slice
)
self.bin_lo = bin_lo
self.bin_hi = bin_hi
binary_mode = self.z.attrs["pims-msi"]["binary_mode"]
if binary_mode != "processed":
raise ValueError(f"invalid {binary_mode=}: expected 'processed'")
def iter_rows(self) -> Iterator[Tuple[npt.NDArray, npt.NDArray]]:
dataset_x, dataset_y = self.as_table()
for row_x, row_y in zip(dataset_x, dataset_y):
yield row_x, row_y
def is_table_like(self) -> bool:
return True
def __load_ds(self) -> Tuple[npt.NDArray, npt.NDArray]:
"heavy lifting: call to utils"
return bin_array_dataset(
self.z,
self.cls_mask,
self.y_slice,
self.x_slice,
self.bin_lo,
self.bin_hi,
)
def as_table(self) -> Tuple[npt.NDArray, npt.NDArray]:
if not self._cached_ds:
self._cached_ds = self.__load_ds()
return self._cached_ds
def get_dataset_x(self) -> Tuple[npt.NDArray]:
return self.as_table()[0]
def get_dataset_y(self) -> Tuple[npt.NDArray]:
return self.as_table()[1]
def attribute_names(self) -> List[str]:
return [
f"{0.5*(lo + hi)} ± {0.5*(hi - lo)}"
for lo, hi in zip(self.bin_lo, self.bin_hi)
]
class CytomineNonBinned(Dataset):
def __init__(
self,
project_id: int,
image_id: int,
term_set: Optional[Set[str]] = None,
cache_data: bool = True,
) -> None:
"""
Parameters
----------
project_id : int
image_id : int
term_set : Optional[Set[str]], optional
whitelist of term to load, by default None (all terms loaded)
cache_data : bool, optional
data must be tabular, by default True
"""
super().__init__()
self.project_id = project_id
self.image_id = image_id
self.term_set = term_set or set()
self.cache_data = bool(cache_data)
self._cached_table = None
self.cached_term_lst = None
def iter_rows(self) -> Iterator[Tuple[npt.NDArray, npt.NDArray]]:
if self.cache_data:
for row in zip(*self.as_table()):
yield row
return
for profile, class_idx in self.__raw_iter():
yield np.array(profile), class_idx
def __raw_iter(self) -> Iterator[Tuple[npt.NDArray, npt.NDArray]]:
term_collection = TermCollection().fetch_with_filter("project", self.project_id)
annotations = AnnotationCollection(
project=self.project_id, image=self.image_id, showTerm=True, showWKT=True
).fetch()
term_lst = []
for annotation, term in iter_annoation_single_term(
annotations,
term_collection,
):
term_name = term.name
if term_name not in self.term_set:
continue
try:
term_idx = term_lst.index(term_name)
except ValueError:
term_idx = len(term_lst)
term_lst.append(term_name)
for profile in annotation.profile():
yield profile["profile"], term_idx
self.cached_term_lst = term_lst
def __load_ds(self) -> Tuple[npt.NDArray, npt.NDArray]:
attributes, classes = zip(*self.__raw_iter())
dtype = type(attributes[0][0])
return np.array(attributes, dtype=dtype), np.array(classes)
def as_table(self) -> Tuple[npt.NDArray, npt.NDArray]:
if not self._cached_table:
self._cached_table = self.__load_ds()
if not self.cache_data:
# remove cache if it shouldn't be there
tmp, self._cached_table = self._cached_table, None
return tmp
return self._cached_table
def is_table_like(self) -> bool:
try:
_ = self.as_table()
return True
except (ValueError, IndexError):
return False
def attribute_names(self) -> List[str]:
return [
xySlice.zName
for xySlice in SliceInstanceCollection().fetch_with_filter(
"imageinstance", self.image_id
)
]
def class_names(self) -> List[str]:
if self.cached_term_lst is None:
for _ in self.__raw_iter():
pass
return self.cached_term_lst
| [
"msi_zarr_analysis.ml.dataset.utils.build_class_masks",
"numpy.sum",
"msi_zarr_analysis.utils.iter_chunks.clean_slice_tuple",
"pathlib.Path",
"msi_zarr_analysis.utils.cytomine_utils.iter_annoation_single_term",
"numpy.unique",
"cytomine.models.AnnotationCollection",
"numpy.max",
"numpy.fill_diagonal... | [((3057, 3091), 'numpy.zeros', 'np.zeros', (['(n_features, n_features)'], {}), '((n_features, n_features))\n', (3065, 3091), True, 'import numpy as np\n'), ((3380, 3407), 'numpy.fill_diagonal', 'np.fill_diagonal', (['corr', '(1.0)'], {}), '(corr, 1.0)\n', (3396, 3407), True, 'import numpy as np\n'), ((3479, 3514), 'numpy.unique', 'np.unique', (['ds_y'], {'return_counts': '(True)'}), '(ds_y, return_counts=True)\n', (3488, 3514), True, 'import numpy as np\n'), ((8600, 8629), 'msi_zarr_analysis.utils.check.open_group_ro', 'open_group_ro', (['data_zarr_path'], {}), '(data_zarr_path)\n', (8613, 8629), False, 'from msi_zarr_analysis.utils.check import open_group_ro\n'), ((8779, 8841), 'msi_zarr_analysis.ml.dataset.utils.build_class_masks', 'build_class_masks', (['self.z', 'classes', 'roi_mask', 'background_class'], {}), '(self.z, classes, roi_mask, background_class)\n', (8796, 8841), False, 'from msi_zarr_analysis.ml.dataset.utils import bin_array_dataset, build_class_masks, nonbinned_array_dataset\n'), ((8902, 8961), 'msi_zarr_analysis.utils.iter_chunks.clean_slice_tuple', 'clean_slice_tuple', (["self.z['/0'].shape[2:]", 'y_slice', 'x_slice'], {}), "(self.z['/0'].shape[2:], y_slice, x_slice)\n", (8919, 8961), False, 'from msi_zarr_analysis.utils.iter_chunks import clean_slice_tuple\n'), ((10845, 10919), 'msi_zarr_analysis.ml.dataset.utils.nonbinned_array_dataset', 'nonbinned_array_dataset', (['self.z', 'self.cls_mask', 'self.y_slice', 'self.x_slice'], {}), '(self.z, self.cls_mask, self.y_slice, self.x_slice)\n', (10868, 10919), False, 'from msi_zarr_analysis.ml.dataset.utils import bin_array_dataset, build_class_masks, nonbinned_array_dataset\n'), ((13349, 13448), 'msi_zarr_analysis.ml.dataset.utils.bin_array_dataset', 'bin_array_dataset', (['self.z', 'self.cls_mask', 'self.y_slice', 'self.x_slice', 'self.bin_lo', 'self.bin_hi'], {}), '(self.z, self.cls_mask, self.y_slice, self.x_slice, self.\n bin_lo, self.bin_hi)\n', (13366, 13448), False, 'from msi_zarr_analysis.ml.dataset.utils import bin_array_dataset, build_class_masks, nonbinned_array_dataset\n'), ((15484, 15540), 'msi_zarr_analysis.utils.cytomine_utils.iter_annoation_single_term', 'iter_annoation_single_term', (['annotations', 'term_collection'], {}), '(annotations, term_collection)\n', (15510, 15540), False, 'from msi_zarr_analysis.utils.cytomine_utils import iter_annoation_single_term\n'), ((5284, 5303), 'numpy.sum', 'np.sum', (['occurrences'], {}), '(occurrences)\n', (5290, 5303), True, 'import numpy as np\n'), ((5831, 5850), 'numpy.max', 'np.max', (['occurrences'], {}), '(occurrences)\n', (5837, 5850), True, 'import numpy as np\n'), ((5853, 5872), 'numpy.sum', 'np.sum', (['occurrences'], {}), '(occurrences)\n', (5859, 5872), True, 'import numpy as np\n'), ((8708, 8731), 'msi_zarr_analysis.cli.utils.load_img_mask', 'load_img_mask', (['roi_mask'], {}), '(roi_mask)\n', (8721, 8731), False, 'from msi_zarr_analysis.cli.utils import load_img_mask\n'), ((16181, 16214), 'numpy.array', 'np.array', (['attributes'], {'dtype': 'dtype'}), '(attributes, dtype=dtype)\n', (16189, 16214), True, 'import numpy as np\n'), ((16216, 16233), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (16224, 16233), True, 'import numpy as np\n'), ((7849, 7867), 'msi_zarr_analysis.cli.utils.load_img_mask', 'load_img_mask', (['cls'], {}), '(cls)\n', (7862, 7867), False, 'from msi_zarr_analysis.cli.utils import load_img_mask\n'), ((15217, 15233), 'cytomine.models.TermCollection', 'TermCollection', ([], {}), '()\n', (15231, 15233), False, 'from cytomine.models import AnnotationCollection, SliceInstanceCollection, TermCollection\n'), ((15302, 15402), 'cytomine.models.AnnotationCollection', 'AnnotationCollection', ([], {'project': 'self.project_id', 'image': 'self.image_id', 'showTerm': '(True)', 'showWKT': '(True)'}), '(project=self.project_id, image=self.image_id, showTerm\n =True, showWKT=True)\n', (15322, 15402), False, 'from cytomine.models import AnnotationCollection, SliceInstanceCollection, TermCollection\n'), ((7825, 7842), 'pathlib.Path', 'pathlib.Path', (['cls'], {}), '(cls)\n', (7837, 7842), False, 'import pathlib\n'), ((15090, 15107), 'numpy.array', 'np.array', (['profile'], {}), '(profile)\n', (15098, 15107), True, 'import numpy as np\n'), ((5579, 5608), 'numpy.max', 'np.max', (['(occurrences / n_items)'], {}), '(occurrences / n_items)\n', (5585, 5608), True, 'import numpy as np\n'), ((5640, 5669), 'numpy.min', 'np.min', (['(occurrences / n_items)'], {}), '(occurrences / n_items)\n', (5646, 5669), True, 'import numpy as np\n'), ((7251, 7267), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (7259, 7267), True, 'import numpy as np\n'), ((8106, 8126), 'msi_zarr_analysis.cli.utils.load_img_mask', 'load_img_mask', (['value'], {}), '(value)\n', (8119, 8126), False, 'from msi_zarr_analysis.cli.utils import load_img_mask\n'), ((10010, 10026), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (10018, 10026), True, 'import numpy as np\n'), ((12457, 12473), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (12465, 12473), True, 'import numpy as np\n'), ((16873, 16898), 'cytomine.models.SliceInstanceCollection', 'SliceInstanceCollection', ([], {}), '()\n', (16896, 16898), False, 'from cytomine.models import AnnotationCollection, SliceInstanceCollection, TermCollection\n'), ((7101, 7117), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (7109, 7117), True, 'import numpy as np\n'), ((9860, 9876), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (9868, 9876), True, 'import numpy as np\n'), ((12249, 12265), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (12257, 12265), True, 'import numpy as np\n')] |
import numpy as np
import pybullet as p
import igibson.utils.transform_utils as T
from igibson.controllers import ControlType, ManipulationController
from igibson.utils.filters import MovingAverageFilter
# Different modes
IK_MODE_COMMAND_DIMS = {
"pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
"pose_delta_ori": 6, # 6DOF (dx,dy,dz,dax,day,daz) control over pose
"position_fixed_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation
"position_compliant_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time)
}
IK_MODES = set(IK_MODE_COMMAND_DIMS.keys())
class InverseKinematicsController(ManipulationController):
"""
Controller class to convert (delta) EEF commands into joint velocities using Inverse Kinematics (IK).
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2. Run Inverse Kinematics to back out joint velocities for a desired task frame command
3. Clips the resulting command by the motor (velocity) limits
"""
def __init__(
self,
base_body_id,
task_link_id,
task_name,
control_freq,
default_joint_pos,
joint_damping,
control_limits,
joint_idx,
command_input_limits="default",
command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)),
kv=2.0,
mode="pose_delta_ori",
smoothing_filter_size=None,
workspace_pose_limiter=None,
joint_range_tolerance=0.01,
ik_joint_idx=None,
):
"""
:param base_body_id: int, unique pybullet ID corresponding to the pybullet body being controlled by IK
:param task_link_id: int, pybullet link ID corresponding to the link within the body being controlled by IK
:param task_name: str, name assigned to this task frame for computing IK control. During control calculations,
the inputted control_dict should include entries named <@task_name>_pos_relative and
<@task_name>_quat_relative. See self._command_to_control() for what these values should entail.
:param control_freq: int, controller loop frequency
:param default_joint_pos: Array[float], default joint positions, used as part of nullspace controller in IK
:param joint_damping: Array[float], joint damping parameters associated with each joint
in the body being controlled
:param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
control signal. Should specify per-actuator type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"torque": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
:param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
controller-relevant values during control computations
:param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
:param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set,
is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
:param kv: float, Gain applied to error between IK-commanded joint positions and current joint positions
:param mode: str, mode to use when computing IK. In all cases, position commands are 3DOF delta (dx,dy,dz)
cartesian values, relative to the robot base frame. Valid options are:
- "pose_absolute_ori": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
where the orientation is given in absolute axis-angle coordinates
- "pose_delta_ori": 6DOF (dx,dy,dz,dax,day,daz) control over pose
- "position_fixed_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands being kept as fixed initial absolute orientation
- "position_compliant_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands automatically being sent as 0s (so can drift over time)
:param smoothing_filter_size: None or int, if specified, sets the size of a moving average filter to apply
on all outputted IK joint positions.
:param workspace_pose_limiter: None or function, if specified, callback method that should clip absolute
target (x,y,z) cartesian position and absolute quaternion orientation (x,y,z,w) to a specific workspace
range (i.e.: this can be unique to each robot, and implemented by each embodiment).
Function signature should be:
def limiter(command_pos: Array[float], command_quat: Array[float], control_dict: Dict[str, Any]) --> Tuple[Array[float], Array[float]]
where pos_command is (x,y,z) cartesian position values, command_quat is (x,y,z,w) quarternion orientation
values, and the returned tuple is the processed (pos, quat) command.
:param joint_range_tolerance: float, amount to add to each side of the inputted joint range, to improve IK
convergence stability (e.g.: for joint_ranges = 0 for no limits, prevents NaNs from occurring)
"""
# Store arguments
# If your robot has virtual joints, you should pass ik_joint_idx with the indices in the pybullet model
# that correspond to the joint indices in iGibson (virtual joints will get ids in iG but not in PB).
# if your robot doesn't have virtual joints, we use joint_idx
self.ik_joint_idx = ik_joint_idx if ik_joint_idx is not None else joint_idx
self.control_filter = (
None
if smoothing_filter_size in {None, 0}
else MovingAverageFilter(obs_dim=len(self.ik_joint_idx), filter_width=smoothing_filter_size)
)
assert mode in IK_MODES, "Invalid ik mode specified! Valid options are: {IK_MODES}, got: {mode}"
self.mode = mode
self.kv = kv
self.workspace_pose_limiter = workspace_pose_limiter
self.base_body_id = base_body_id
self.task_link_id = task_link_id
self.task_name = task_name
self.default_joint_pos = np.array(default_joint_pos)
self.joint_damping = np.array(joint_damping)
self.joint_range_tolerance = joint_range_tolerance
# Other variables that will be filled in at runtime
self._quat_target = None
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
joint_idx=joint_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def reset(self):
# Reset the filter and clear internal control state
if self.control_filter is not None:
self.control_filter.reset()
self._quat_target = None
def dump_state(self):
"""
:return Any: the state of the object other than what's not included in pybullet state.
"""
dump = {"quat_target": self._quat_target if self._quat_target is None else self._quat_target.tolist()}
if self.control_filter is not None:
dump["control_filter"] = self.control_filter.dump_state()
return dump
def load_state(self, dump):
"""
Load the state of the object other than what's not included in pybullet state.
:param dump: Any: the dumped state
"""
self._quat_target = dump["quat_target"] if dump["quat_target"] is None else np.array(dump["quat_target"])
if self.control_filter is not None:
self.control_filter.load_state(dump["control_filter"])
@staticmethod
def _pose_in_base_to_pose_in_world(pose_in_base, base_in_world):
"""
Convert a pose in the base frame to a pose in the world frame.
:param pose_in_base: Tuple[Array[float], Array[float]], Cartesian xyz position,
quaternion xyzw orientation tuple corresponding to the desired pose in its local base frame
:param base_in_world: Tuple[Array[float], Array[float]], Cartesian xyz position,
quaternion xyzw orientation tuple corresponding to the base pose in the global static frame
:return Tuple[Array[float], Array[float]]: Cartesian xyz position,
quaternion xyzw orientation tuple corresponding to the desired pose in the global static frame
"""
pose_in_base_mat = T.pose2mat(pose_in_base)
base_pose_in_world_mat = T.pose2mat(base_in_world)
pose_in_world_mat = T.pose_in_A_to_pose_in_B(pose_A=pose_in_base_mat, pose_A_in_B=base_pose_in_world_mat)
return T.mat2pose(pose_in_world_mat)
def _command_to_control(self, command, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal.
This processes the command based on self.mode, possibly clips the command based on self.workspace_pose_limiter,
:param command: Array[float], desired (already preprocessed) command to convert into control signals
Is one of:
(dx,dy,dz) - desired delta cartesian position
(dx,dy,dz,dax,day,daz) - desired delta cartesian position and delta axis-angle orientation
(dx,dy,dz,ax,ay,az) - desired delta cartesian position and global axis-angle orientation
:param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
base_pos: (x,y,z) cartesian position of the robot's base relative to the static global frame
base_quat: (x,y,z,w) quaternion orientation of the robot's base relative to the static global frame
<@self.task_name>_pos_relative: (x,y,z) relative cartesian position of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
:return: Array[float], outputted (non-clipped!) velocity control signal to deploy
"""
# Grab important info from control dict
pos_relative = np.array(control_dict["{}_pos_relative".format(self.task_name)])
quat_relative = np.array(control_dict["{}_quat_relative".format(self.task_name)])
# The first three values of the command are always the (delta) position, convert to absolute values
dpos = command[:3]
target_pos = pos_relative + dpos
# Compute orientation
if self.mode == "position_fixed_ori":
# We need to grab the current robot orientation as the commanded orientation if there is none saved
if self._quat_target is None:
self._quat_target = quat_relative
target_quat = self._quat_target
elif self.mode == "position_compliant_ori":
# Target quat is simply the current robot orientation
target_quat = quat_relative
elif self.mode == "pose_absolute_ori":
# Received "delta" ori is in fact the desired absolute orientation
target_quat = T.axisangle2quat(command[3:])
else: # pose_delta_ori control
# Grab dori and compute target ori
dori = T.quat2mat(T.axisangle2quat(command[3:]))
target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative))
# Possibly limit to workspace if specified
if self.workspace_pose_limiter is not None:
target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict)
# Convert to world frame
target_pos, target_quat = self._pose_in_base_to_pose_in_world(
pose_in_base=(target_pos, target_quat),
base_in_world=(np.array(control_dict["base_pos"]), np.array(control_dict["base_quat"])),
)
# Calculate and return IK-backed out joint angles
joint_targets = self._calc_joint_angles_from_ik(target_pos=target_pos, target_quat=target_quat)[
self.ik_joint_idx
]
# Optionally pass through smoothing filter for better stability
if self.control_filter is not None:
joint_targets = self.control_filter.estimate(joint_targets)
# Grab the resulting error and scale it by the velocity gain
u = -self.kv * (control_dict["joint_position"][self.ik_joint_idx] - joint_targets)
# Return these commanded velocities.
return u
def _calc_joint_angles_from_ik(self, target_pos, target_quat):
"""
Solves for joint angles given the ik target position and orientation
Note that this outputs joint angles for the entire pybullet robot body! It is the responsibility of the
associated Robot class to filter out the redundant / non-impact joints from the computation
Args:
target_pos (3-array): absolute (x, y, z) eef position command (in robot base frame)
target_quat (4-array): absolute (x, y, z, w) eef quaternion command (in robot base frame)
Returns:
n-array: corresponding joint positions to match the inputted targets
"""
# Run IK
cmd_joint_pos = np.array(
p.calculateInverseKinematics(
bodyIndex=self.base_body_id,
endEffectorLinkIndex=self.task_link_id,
targetPosition=target_pos.tolist(),
targetOrientation=target_quat.tolist(),
lowerLimits=(self.control_limits[ControlType.POSITION][0] - self.joint_range_tolerance).tolist(),
upperLimits=(self.control_limits[ControlType.POSITION][1] + self.joint_range_tolerance).tolist(),
jointRanges=(
self.control_limits[ControlType.POSITION][1]
- self.control_limits[ControlType.POSITION][0]
+ 2 * self.joint_range_tolerance
).tolist(),
restPoses=self.default_joint_pos.tolist(),
jointDamping=self.joint_damping.tolist(),
)
)
return cmd_joint_pos
@property
def control_type(self):
return ControlType.VELOCITY
@property
def command_dim(self):
return IK_MODE_COMMAND_DIMS[self.mode]
| [
"numpy.array",
"igibson.utils.transform_utils.pose2mat",
"igibson.utils.transform_utils.quat2mat",
"igibson.utils.transform_utils.pose_in_A_to_pose_in_B",
"igibson.utils.transform_utils.axisangle2quat",
"igibson.utils.transform_utils.mat2pose"
] | [((7213, 7240), 'numpy.array', 'np.array', (['default_joint_pos'], {}), '(default_joint_pos)\n', (7221, 7240), True, 'import numpy as np\n'), ((7270, 7293), 'numpy.array', 'np.array', (['joint_damping'], {}), '(joint_damping)\n', (7278, 7293), True, 'import numpy as np\n'), ((9518, 9542), 'igibson.utils.transform_utils.pose2mat', 'T.pose2mat', (['pose_in_base'], {}), '(pose_in_base)\n', (9528, 9542), True, 'import igibson.utils.transform_utils as T\n'), ((9576, 9601), 'igibson.utils.transform_utils.pose2mat', 'T.pose2mat', (['base_in_world'], {}), '(base_in_world)\n', (9586, 9601), True, 'import igibson.utils.transform_utils as T\n'), ((9630, 9720), 'igibson.utils.transform_utils.pose_in_A_to_pose_in_B', 'T.pose_in_A_to_pose_in_B', ([], {'pose_A': 'pose_in_base_mat', 'pose_A_in_B': 'base_pose_in_world_mat'}), '(pose_A=pose_in_base_mat, pose_A_in_B=\n base_pose_in_world_mat)\n', (9654, 9720), True, 'import igibson.utils.transform_utils as T\n'), ((9731, 9760), 'igibson.utils.transform_utils.mat2pose', 'T.mat2pose', (['pose_in_world_mat'], {}), '(pose_in_world_mat)\n', (9741, 9760), True, 'import igibson.utils.transform_utils as T\n'), ((8598, 8627), 'numpy.array', 'np.array', (["dump['quat_target']"], {}), "(dump['quat_target'])\n", (8606, 8627), True, 'import numpy as np\n'), ((12496, 12525), 'igibson.utils.transform_utils.axisangle2quat', 'T.axisangle2quat', (['command[3:]'], {}), '(command[3:])\n', (12512, 12525), True, 'import igibson.utils.transform_utils as T\n'), ((13138, 13172), 'numpy.array', 'np.array', (["control_dict['base_pos']"], {}), "(control_dict['base_pos'])\n", (13146, 13172), True, 'import numpy as np\n'), ((13174, 13209), 'numpy.array', 'np.array', (["control_dict['base_quat']"], {}), "(control_dict['base_quat'])\n", (13182, 13209), True, 'import numpy as np\n'), ((12643, 12672), 'igibson.utils.transform_utils.axisangle2quat', 'T.axisangle2quat', (['command[3:]'], {}), '(command[3:])\n', (12659, 12672), True, 'import igibson.utils.transform_utils as T\n'), ((12718, 12743), 'igibson.utils.transform_utils.quat2mat', 'T.quat2mat', (['quat_relative'], {}), '(quat_relative)\n', (12728, 12743), True, 'import igibson.utils.transform_utils as T\n')] |
# Usage:
# python train_net.py -cfg ../configs/example.yaml -- learning_rate 1.0
import sys
import os
import argparse
import time
from tqdm import tqdm
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import imageio
sys.path.insert(0, '../')
from lib.common.trainer import Trainer
from lib.common.config import get_cfg_defaults
from lib.dataset.AMASSdataset import AMASSdataset
from lib.net.DeepSDF import Net
from lib.net.test_net import TestEngine
parser = argparse.ArgumentParser()
parser.add_argument(
'-cfg', '--config_file', type=str, help='path of the yaml config file')
argv = sys.argv[1:sys.argv.index('--')]
args = parser.parse_args(argv)
# opts = sys.argv[sys.argv.index('--') + 1:]
# default cfg: defined in 'lib.common.config.py'
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config_file)
# Now override from a list (opts could come from the command line)
# opts = ['dataset.root', '../data/XXXX', 'learning_rate', '1e-2']
# cfg.merge_from_list(opts)
cfg.freeze()
def test(net, data_loader, trainer, global_step):
net.eval()
test_loader = data_loader
test_loss = 0
correct = 0
with torch.no_grad():
pbar = tqdm(test_loader)
for data_dict in pbar:
data_BX, data_BT, target = \
data_dict['data_BX'], data_dict['data_BT'], data_dict['targets']
data_BX = data_BX.cuda()
data_BT = data_BT.cuda()
target = target.cuda()
output = net(data_BX, data_BT)
output_max = torch.max(output, dim=2)[0]
test_loss_sample = F.mse_loss(output_max, target).item()
output_verts = output[:, -(cfg.dataset.num_verts):, :]
weights = data_dict['weights'].cuda()
test_loss_skw = F.mse_loss(output_verts, weights).item()
test_loss += test_loss_sample + cfg.dataset.sk_ratio * test_loss_skw
pred = output_max.data
pred = pred.masked_fill(pred<0.5, 0.)
pred = pred.masked_fill(pred>0.5, 1.)
correct += pred.eq(target.data.view_as(pred)).float().mean()
test_loss /= len(test_loader)
correct /= len(test_loader)
trainer.logger.info('\nTest set: Avg. loss: {:.4f}, Accuracy: {:.2f}\n'.format(
test_loss, correct))
trainer.tb_writer.add_scalar('test/loss_total', test_loss, global_step)
trainer.tb_writer.add_scalar('test/acc', correct, global_step)
def train(device='cuda'):
# set dataset
train_dataset = AMASSdataset(cfg, split="train")
train_data_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.batch_size, shuffle=True,
num_workers=cfg.num_threads, pin_memory=True, drop_last=True)
# set dataset
test_dataset = AMASSdataset(cfg, split="test")
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=cfg.batch_size, shuffle=False,
num_workers=cfg.num_threads, pin_memory=True)
# setup net
net = Net(train_dataset.num_poses, 4, 40, 4).to(device)
# setup trainer
trainer = Trainer(net, cfg, use_tb=True)
# load ckpt
if os.path.exists(cfg.ckpt_path):
trainer.load_ckpt(cfg.ckpt_path)
else:
trainer.logger.info(f'ckpt {cfg.ckpt_path} not found.')
trainer.logger.info(
f'train data size: {len(train_dataset)}; '+
f'loader size: {len(train_data_loader)};')
# update network graph
dummy_data_bx = torch.randn(12, 17, 21, 3).to(device)
dummy_data_bt = torch.randn(12, 17, 21, 3).to(device)
trainer.tb_writer.add_graph(net, (dummy_data_bx, dummy_data_bt), False)
start_iter = trainer.iteration
start_epoch = trainer.epoch
images = []
# start training
for epoch in range(start_epoch, cfg.num_epoch):
trainer.net.train()
train_data_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.batch_size, shuffle=True,
num_workers=cfg.num_threads, pin_memory=True, drop_last=True)
loader = iter(train_data_loader)
niter = len(train_data_loader)
epoch_start_time = iter_start_time = time.time()
for iteration in range(start_iter, niter):
# data_BX [B, N, 21, 3]
# data_BT [B, N, 21, 3]
data_dict = next(loader)
data_BX, data_BT, target = \
data_dict['data_BX'], data_dict['data_BT'], data_dict['targets']
iter_data_time = time.time() - iter_start_time
global_step = epoch * niter + iteration
data_BX = data_BX.to(device)
data_BT = data_BT.to(device)
target = target.to(device)
output = trainer.net(data_BX, data_BT)
output_max = torch.max(output, dim=2)[0]
loss_sample = F.mse_loss(output_max, target)
# weights [B, 6890, 21]
# output_verts [B, 6890, 21]
output_verts = output[:, -(cfg.dataset.num_verts):, :]
weights = data_dict['weights'].to(device)
loss_skw = F.mse_loss(output_verts, weights)
loss = loss_sample + cfg.dataset.sk_ratio * loss_skw
output_max = output_max.masked_fill(output_max<0.5, 0.)
output_max = output_max.masked_fill(output_max>0.5, 1.)
correct = output_max.eq(target).float().mean()
trainer.optimizer.zero_grad()
loss.backward()
trainer.optimizer.step()
iter_time = time.time() - iter_start_time
eta = (niter-start_iter) * (time.time()-epoch_start_time) / (iteration-start_iter+1)
# print
if iteration % cfg.freq_plot == 0 and iteration > 0:
trainer.logger.info(
f'Name: {cfg.name}|Epoch: {epoch:02d}({iteration:05d}/{niter})|' \
+f'dataT: {(iter_data_time):.3f}|' \
+f'totalT: {(iter_time):.3f}|'
+f'ETA: {int(eta // 60):02d}:{int(eta - 60 * (eta // 60)):02d}|' \
+f'Err:{loss.item():.4f}|' \
+f'Prop:{correct.item():.5f}|'
)
trainer.tb_writer.add_scalar('train/loss_total', loss.item(), global_step)
trainer.tb_writer.add_scalar('train/loss_sample', loss_sample.item(), global_step)
trainer.tb_writer.add_scalar('train/loss_weight', loss_skw.item(), global_step)
trainer.tb_writer.add_scalar('train/acc', correct.item(), global_step)
# update image
if iteration % cfg.freq_show == 0 and iteration > 0:
test_engine = TestEngine(trainer.query_func, device)
render = test_engine(priors=data_dict)
images.append(np.flip(render[:, :, ::-1],axis=0))
imageio.mimsave(os.path.join(cfg.results_path,cfg.name, "results.gif"), images)
trainer.tb_writer.add_image('Image', np.flip(render[:, :, ::-1],axis=0).transpose(2,0,1), global_step)
# save
if iteration % cfg.freq_save == 0 and iteration > 0 and not cfg.overfit:
trainer.update_ckpt(
f'ckpt_{epoch}.pth', epoch, iteration)
# evaluation
if iteration % cfg.freq_eval == 0 and iteration > 0 and not cfg.overfit:
trainer.net.eval()
test(trainer.net.module, test_loader, trainer, global_step)
trainer.net.train()
# end
iter_start_time = time.time()
trainer.scheduler.step()
start_iter = 0
if __name__ == '__main__':
train() | [
"lib.dataset.AMASSdataset.AMASSdataset",
"tqdm.tqdm",
"lib.common.trainer.Trainer",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"numpy.flip",
"lib.net.DeepSDF.Net",
"torch.nn.functional.mse_loss",
"os.path.exists",
"sys.path.insert",
"torch.randn",
"time.time",
"lib.net.test_ne... | [((275, 300), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (290, 300), False, 'import sys\n'), ((519, 544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (542, 544), False, 'import argparse\n'), ((815, 833), 'lib.common.config.get_cfg_defaults', 'get_cfg_defaults', ([], {}), '()\n', (831, 833), False, 'from lib.common.config import get_cfg_defaults\n'), ((2571, 2603), 'lib.dataset.AMASSdataset.AMASSdataset', 'AMASSdataset', (['cfg'], {'split': '"""train"""'}), "(cfg, split='train')\n", (2583, 2603), False, 'from lib.dataset.AMASSdataset import AMASSdataset\n'), ((2629, 2778), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'cfg.batch_size', 'shuffle': '(True)', 'num_workers': 'cfg.num_threads', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(train_dataset, batch_size=cfg.batch_size,\n shuffle=True, num_workers=cfg.num_threads, pin_memory=True, drop_last=True)\n', (2656, 2778), False, 'import torch\n'), ((2838, 2869), 'lib.dataset.AMASSdataset.AMASSdataset', 'AMASSdataset', (['cfg'], {'split': '"""test"""'}), "(cfg, split='test')\n", (2850, 2869), False, 'from lib.dataset.AMASSdataset import AMASSdataset\n'), ((2888, 3021), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'cfg.batch_size', 'shuffle': '(False)', 'num_workers': 'cfg.num_threads', 'pin_memory': '(True)'}), '(test_dataset, batch_size=cfg.batch_size,\n shuffle=False, num_workers=cfg.num_threads, pin_memory=True)\n', (2915, 3021), False, 'import torch\n'), ((3156, 3186), 'lib.common.trainer.Trainer', 'Trainer', (['net', 'cfg'], {'use_tb': '(True)'}), '(net, cfg, use_tb=True)\n', (3163, 3186), False, 'from lib.common.trainer import Trainer\n'), ((3210, 3239), 'os.path.exists', 'os.path.exists', (['cfg.ckpt_path'], {}), '(cfg.ckpt_path)\n', (3224, 3239), False, 'import os\n'), ((660, 680), 'sys.argv.index', 'sys.argv.index', (['"""--"""'], {}), "('--')\n", (674, 680), False, 'import sys\n'), ((1195, 1210), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1208, 1210), False, 'import torch\n'), ((1227, 1244), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (1231, 1244), False, 'from tqdm import tqdm\n'), ((3920, 4069), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'cfg.batch_size', 'shuffle': '(True)', 'num_workers': 'cfg.num_threads', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(train_dataset, batch_size=cfg.batch_size,\n shuffle=True, num_workers=cfg.num_threads, pin_memory=True, drop_last=True)\n', (3947, 4069), False, 'import torch\n'), ((4243, 4254), 'time.time', 'time.time', ([], {}), '()\n', (4252, 4254), False, 'import time\n'), ((3071, 3109), 'lib.net.DeepSDF.Net', 'Net', (['train_dataset.num_poses', '(4)', '(40)', '(4)'], {}), '(train_dataset.num_poses, 4, 40, 4)\n', (3074, 3109), False, 'from lib.net.DeepSDF import Net\n'), ((3533, 3559), 'torch.randn', 'torch.randn', (['(12)', '(17)', '(21)', '(3)'], {}), '(12, 17, 21, 3)\n', (3544, 3559), False, 'import torch\n'), ((3591, 3617), 'torch.randn', 'torch.randn', (['(12)', '(17)', '(21)', '(3)'], {}), '(12, 17, 21, 3)\n', (3602, 3617), False, 'import torch\n'), ((4933, 4963), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['output_max', 'target'], {}), '(output_max, target)\n', (4943, 4963), True, 'import torch.nn.functional as F\n'), ((5199, 5232), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['output_verts', 'weights'], {}), '(output_verts, weights)\n', (5209, 5232), True, 'import torch.nn.functional as F\n'), ((7687, 7698), 'time.time', 'time.time', ([], {}), '()\n', (7696, 7698), False, 'import time\n'), ((1575, 1599), 'torch.max', 'torch.max', (['output'], {'dim': '(2)'}), '(output, dim=2)\n', (1584, 1599), False, 'import torch\n'), ((4586, 4597), 'time.time', 'time.time', ([], {}), '()\n', (4595, 4597), False, 'import time\n'), ((4878, 4902), 'torch.max', 'torch.max', (['output'], {'dim': '(2)'}), '(output, dim=2)\n', (4887, 4902), False, 'import torch\n'), ((5629, 5640), 'time.time', 'time.time', ([], {}), '()\n', (5638, 5640), False, 'import time\n'), ((6788, 6826), 'lib.net.test_net.TestEngine', 'TestEngine', (['trainer.query_func', 'device'], {}), '(trainer.query_func, device)\n', (6798, 6826), False, 'from lib.net.test_net import TestEngine\n'), ((1634, 1664), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['output_max', 'target'], {}), '(output_max, target)\n', (1644, 1664), True, 'import torch.nn.functional as F\n'), ((1831, 1864), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['output_verts', 'weights'], {}), '(output_verts, weights)\n', (1841, 1864), True, 'import torch.nn.functional as F\n'), ((6912, 6947), 'numpy.flip', 'np.flip', (['render[:, :, ::-1]'], {'axis': '(0)'}), '(render[:, :, ::-1], axis=0)\n', (6919, 6947), True, 'import numpy as np\n'), ((6980, 7035), 'os.path.join', 'os.path.join', (['cfg.results_path', 'cfg.name', '"""results.gif"""'], {}), "(cfg.results_path, cfg.name, 'results.gif')\n", (6992, 7035), False, 'import os\n'), ((5699, 5710), 'time.time', 'time.time', ([], {}), '()\n', (5708, 5710), False, 'import time\n'), ((7097, 7132), 'numpy.flip', 'np.flip', (['render[:, :, ::-1]'], {'axis': '(0)'}), '(render[:, :, ::-1], axis=0)\n', (7104, 7132), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 19:05:47 2019
==================================
The Refrigeration Problem
==================================
Note: This method computes everything by hand, step by step. For most people,
the new API for fuzzy systems will be preferable. The same problem is solved
with the new API `in this example <./plot_refrigeration_problem_newapi.html>`_.
The 'refrigeration problem' is commonly used to illustrate the power of fuzzy logic
principles to generate complex behavior from a compact, intuitive set of
expert rules.
Input variables
---------------
PID -
Output variable
---------------
The output variable is simply the tip amount, in percentage points:
* ``tip`` : Percent of bill to add as tip
For the purposes of discussion, let's say we need 'high', 'medium', and 'low'
membership functions for both input variables and our output variable. These
are defined in scikit-fuzzy as follows
"""
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
#'poor' == 'NL'
#'average' == 'ZR'
#'good' == 'PL'
ErroTev = ctrl.Antecedent(np.arange(0 , 11 , 1),'ErroTev')
DeltaErroTev = ctrl.Antecedent(np.arange(0 , 11 , 1), 'DeltaErroTev')
Compressor = ctrl.Consequent(np.arange(0, 100 , 1), 'Compressor')
ErroTev.automf(3)
DeltaErroTev.automf(3)
ErroTev['NL'] = fuzz.trimf(ErroTev.universe, [0 , 1 , 2])
#ErroTev['NM'] = fuzz.trimf(ErroTev.universe, [-1.5 , -1 , -0.7])
#ErroTev['NS'] = fuzz.trimf(ErroTev.universe, [-1 , -0.7 , 0])
ErroTev['ZR'] = fuzz.trimf(ErroTev.universe, [1 , 2 , 3])
#ErroTev['PS'] = fuzz.trimf(ErroTev.universe, [0 , 0.7 , 1.5])
#ErroTev['PM'] = fuzz.trimf(ErroTev.universe, [0.7 , 1 , 2])
ErroTev['PL'] = fuzz.trimf(ErroTev.universe, [2 , 3 , 4])
DeltaErroTev['NL'] = fuzz.trimf(DeltaErroTev.universe, [0 , 0.2 , 0.4])
#DeltaErroTev['NM'] = fuzz.trimf(DeltaErroTev.universe, [-0.5 , -1 , 0.7])
#DeltaErroTev['NS'] = fuzz.trimf(DeltaErroTev.universe, [-1 , 0.75 , 0])
DeltaErroTev['ZR'] = fuzz.trimf(DeltaErroTev.universe, [0.2 , 0.4 , 0.6])
#DeltaErroTev['PS'] = fuzz.trimf(DeltaErroTev.universe, [0 , 0.5 , 0.7])
#DeltaErroTev['PM'] = fuzz.trimf(DeltaErroTev.universe, [0.2 , 0.7 , 1])
DeltaErroTev['PL'] = fuzz.trimf(DeltaErroTev.universe, [0.4 , 0.6 , 1])
Compressor['low'] = fuzz.trimf(Compressor.universe, [63 , 64 , 65])
#Compressor['NM'] = fuzz.trimf(Compressor.universe, [63.2 , 65.5 , 63.7])
#Compressor['NS'] = fuzz.trimf(Compressor.universe, [63.5 , 63.7 , 64])
Compressor['medium'] = fuzz.trimf(Compressor.universe, [64 , 65 , 66])
#Compressor['PS'] = fuzz.trimf(Compressor.universe, [64 , 68.5 , 73])
#Compressor['PM'] = fuzz.trimf(Compressor.universe, [68.5 , 77.25 , 88])
Compressor['high'] = fuzz.trimf(Compressor.universe, [65 , 75 , 93])
ErroTev['ZR'].view()
DeltaErroTev.view()
Compressor.view()
rule1 = ctrl.Rule(ErroTev['NL'] | DeltaErroTev['NL'],Compressor['low'])
#rule2 = ctrl.Rule(ErroTev['NM'] | DeltaErroTev['NM'],Compressor['NL'])
#rule3 = ctrl.Rule(ErroTev['NS'] | DeltaErroTev['NS'],Compressor['NL'])
rule2 = ctrl.Rule(ErroTev['ZR'] | DeltaErroTev['ZR'],Compressor['medium'])
#rule5 = ctrl.Rule(ErroTev['PS'] | DeltaErroTev['PS'],Compressor['PS'])
#rule6 = ctrl.Rule(ErroTev['PM'] | DeltaErroTev['PM'],Compressor['PM'])
rule3 = ctrl.Rule(ErroTev['PL'] | DeltaErroTev['PL'],Compressor['high'])
#rule8 = ctrl.Rule(ErroTev['NL'] | DeltaErroTev['NL'],Compressor['NL'])
rule1.view()
Compressor_ctrl = ctrl.ControlSystem([rule1, rule2, rule3])
Compressorout = ctrl.ControlSystemSimulation(Compressor_ctrl)
Compressor_ctrl.input['ErroTev'] = 1
Compressor_ctrl.input['DeltaErroTev'] = 1.5
Compressorout.compute()
"""
Once computed, we can view the result as well as visualize it.
"""
print (Compressorout.output['Compressor'])
Compressor.view(sim=Compressorout) | [
"skfuzzy.control.ControlSystemSimulation",
"skfuzzy.trimf",
"skfuzzy.control.Rule",
"numpy.arange",
"skfuzzy.control.ControlSystem"
] | [((1371, 1410), 'skfuzzy.trimf', 'fuzz.trimf', (['ErroTev.universe', '[0, 1, 2]'], {}), '(ErroTev.universe, [0, 1, 2])\n', (1381, 1410), True, 'import skfuzzy as fuzz\n'), ((1558, 1597), 'skfuzzy.trimf', 'fuzz.trimf', (['ErroTev.universe', '[1, 2, 3]'], {}), '(ErroTev.universe, [1, 2, 3])\n', (1568, 1597), True, 'import skfuzzy as fuzz\n'), ((1740, 1779), 'skfuzzy.trimf', 'fuzz.trimf', (['ErroTev.universe', '[2, 3, 4]'], {}), '(ErroTev.universe, [2, 3, 4])\n', (1750, 1779), True, 'import skfuzzy as fuzz\n'), ((1804, 1852), 'skfuzzy.trimf', 'fuzz.trimf', (['DeltaErroTev.universe', '[0, 0.2, 0.4]'], {}), '(DeltaErroTev.universe, [0, 0.2, 0.4])\n', (1814, 1852), True, 'import skfuzzy as fuzz\n'), ((2024, 2074), 'skfuzzy.trimf', 'fuzz.trimf', (['DeltaErroTev.universe', '[0.2, 0.4, 0.6]'], {}), '(DeltaErroTev.universe, [0.2, 0.4, 0.6])\n', (2034, 2074), True, 'import skfuzzy as fuzz\n'), ((2244, 2292), 'skfuzzy.trimf', 'fuzz.trimf', (['DeltaErroTev.universe', '[0.4, 0.6, 1]'], {}), '(DeltaErroTev.universe, [0.4, 0.6, 1])\n', (2254, 2292), True, 'import skfuzzy as fuzz\n'), ((2316, 2361), 'skfuzzy.trimf', 'fuzz.trimf', (['Compressor.universe', '[63, 64, 65]'], {}), '(Compressor.universe, [63, 64, 65])\n', (2326, 2361), True, 'import skfuzzy as fuzz\n'), ((2533, 2578), 'skfuzzy.trimf', 'fuzz.trimf', (['Compressor.universe', '[64, 65, 66]'], {}), '(Compressor.universe, [64, 65, 66])\n', (2543, 2578), True, 'import skfuzzy as fuzz\n'), ((2745, 2790), 'skfuzzy.trimf', 'fuzz.trimf', (['Compressor.universe', '[65, 75, 93]'], {}), '(Compressor.universe, [65, 75, 93])\n', (2755, 2790), True, 'import skfuzzy as fuzz\n'), ((2865, 2929), 'skfuzzy.control.Rule', 'ctrl.Rule', (["(ErroTev['NL'] | DeltaErroTev['NL'])", "Compressor['low']"], {}), "(ErroTev['NL'] | DeltaErroTev['NL'], Compressor['low'])\n", (2874, 2929), True, 'from skfuzzy import control as ctrl\n'), ((3084, 3151), 'skfuzzy.control.Rule', 'ctrl.Rule', (["(ErroTev['ZR'] | DeltaErroTev['ZR'])", "Compressor['medium']"], {}), "(ErroTev['ZR'] | DeltaErroTev['ZR'], Compressor['medium'])\n", (3093, 3151), True, 'from skfuzzy import control as ctrl\n'), ((3306, 3371), 'skfuzzy.control.Rule', 'ctrl.Rule', (["(ErroTev['PL'] | DeltaErroTev['PL'])", "Compressor['high']"], {}), "(ErroTev['PL'] | DeltaErroTev['PL'], Compressor['high'])\n", (3315, 3371), True, 'from skfuzzy import control as ctrl\n'), ((3477, 3518), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', (['[rule1, rule2, rule3]'], {}), '([rule1, rule2, rule3])\n', (3495, 3518), True, 'from skfuzzy import control as ctrl\n'), ((3536, 3581), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['Compressor_ctrl'], {}), '(Compressor_ctrl)\n', (3564, 3581), True, 'from skfuzzy import control as ctrl\n'), ((1141, 1160), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(1)'], {}), '(0, 11, 1)\n', (1150, 1160), True, 'import numpy as np\n'), ((1205, 1224), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(1)'], {}), '(0, 11, 1)\n', (1214, 1224), True, 'import numpy as np\n'), ((1274, 1294), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (1283, 1294), True, 'import numpy as np\n')] |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core
from hypothesis import given
class TestResize(hu.HypothesisTestCase):
@given(height_scale=st.floats(0.25, 4.0) | st.just(2.0),
width_scale=st.floats(0.25, 4.0) | st.just(2.0),
height=st.integers(4, 32),
width=st.integers(4, 32),
num_channels=st.integers(1, 4),
batch_size=st.integers(1, 4),
seed=st.integers(0, 65535),
**hu.gcs)
def test_nearest(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
op = core.CreateOperator(
"ResizeNearest",
["X"],
["Y"],
width_scale=width_scale,
height_scale=height_scale,
)
X = np.random.rand(
batch_size, num_channels, height, width).astype(np.float32)
def ref(X):
output_height = np.int32(height * height_scale)
output_width = np.int32(width * width_scale)
output_h_idxs, output_w_idxs = np.meshgrid(np.arange(output_height),
np.arange(output_width),
indexing='ij')
input_h_idxs = np.minimum(
output_h_idxs / height_scale, height - 1).astype(np.int32)
input_w_idxs = np.minimum(
output_w_idxs / width_scale, width - 1).astype(np.int32)
Y = X[:, :, input_h_idxs, input_w_idxs]
return Y,
self.assertReferenceChecks(gc, op, [X], ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=0.1, threshold=1e-2)
@given(height_scale=st.floats(0.25, 4.0) | st.just(2.0),
width_scale=st.floats(0.25, 4.0) | st.just(2.0),
height=st.integers(4, 32),
width=st.integers(4, 32),
num_channels=st.integers(1, 4),
batch_size=st.integers(1, 4),
seed=st.integers(0, 65535),
**hu.gcs)
def test_nearest_grad(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed, gc, dc):
np.random.seed(seed)
output_height = np.int32(height * height_scale)
output_width = np.int32(width * width_scale)
X = np.random.rand(batch_size,
num_channels,
height,
width).astype(np.float32)
dY = np.random.rand(batch_size,
num_channels,
output_height,
output_width).astype(np.float32)
op = core.CreateOperator(
"ResizeNearestGradient",
["dY", "X"],
["dX"],
width_scale=width_scale,
height_scale=height_scale,
)
def ref(dY, X):
dX = np.zeros_like(X)
for i in range(output_height):
for j in range(output_width):
input_i = np.minimum(i / height_scale, height - 1).astype(np.int32)
input_j = np.minimum(j / width_scale, width - 1).astype(np.int32)
dX[:, :, input_i, input_j] += dY[:, :, i, j]
return dX,
self.assertDeviceChecks(dc, op, [dY, X], [0])
self.assertReferenceChecks(gc, op, [dY, X], ref)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.zeros_like",
"numpy.random.seed",
"numpy.minimum",
"hypothesis.strategies.just",
"caffe2.python.core.CreateOperator",
"numpy.arange",
"numpy.int32",
"numpy.random.rand",
"hypothesis.strategies.integers",
"hypothesis.strategies.floats"
] | [((4410, 4425), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4423, 4425), False, 'import unittest\n'), ((1503, 1523), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1517, 1523), True, 'import numpy as np\n'), ((1537, 1643), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""ResizeNearest"""', "['X']", "['Y']"], {'width_scale': 'width_scale', 'height_scale': 'height_scale'}), "('ResizeNearest', ['X'], ['Y'], width_scale=width_scale,\n height_scale=height_scale)\n", (1556, 1643), False, 'from caffe2.python import core\n'), ((3165, 3185), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3179, 3185), True, 'import numpy as np\n'), ((3211, 3242), 'numpy.int32', 'np.int32', (['(height * height_scale)'], {}), '(height * height_scale)\n', (3219, 3242), True, 'import numpy as np\n'), ((3266, 3295), 'numpy.int32', 'np.int32', (['(width * width_scale)'], {}), '(width * width_scale)\n', (3274, 3295), True, 'import numpy as np\n'), ((3664, 3785), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""ResizeNearestGradient"""', "['dY', 'X']", "['dX']"], {'width_scale': 'width_scale', 'height_scale': 'height_scale'}), "('ResizeNearestGradient', ['dY', 'X'], ['dX'],\n width_scale=width_scale, height_scale=height_scale)\n", (3683, 3785), False, 'from caffe2.python import core\n'), ((1861, 1892), 'numpy.int32', 'np.int32', (['(height * height_scale)'], {}), '(height * height_scale)\n', (1869, 1892), True, 'import numpy as np\n'), ((1920, 1949), 'numpy.int32', 'np.int32', (['(width * width_scale)'], {}), '(width * width_scale)\n', (1928, 1949), True, 'import numpy as np\n'), ((1141, 1159), 'hypothesis.strategies.integers', 'st.integers', (['(4)', '(32)'], {}), '(4, 32)\n', (1152, 1159), True, 'import hypothesis.strategies as st\n'), ((1178, 1196), 'hypothesis.strategies.integers', 'st.integers', (['(4)', '(32)'], {}), '(4, 32)\n', (1189, 1196), True, 'import hypothesis.strategies as st\n'), ((1222, 1239), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(4)'], {}), '(1, 4)\n', (1233, 1239), True, 'import hypothesis.strategies as st\n'), ((1263, 1280), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(4)'], {}), '(1, 4)\n', (1274, 1280), True, 'import hypothesis.strategies as st\n'), ((1298, 1319), 'hypothesis.strategies.integers', 'st.integers', (['(0)', '(65535)'], {}), '(0, 65535)\n', (1309, 1319), True, 'import hypothesis.strategies as st\n'), ((3895, 3911), 'numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (3908, 3911), True, 'import numpy as np\n'), ((2814, 2832), 'hypothesis.strategies.integers', 'st.integers', (['(4)', '(32)'], {}), '(4, 32)\n', (2825, 2832), True, 'import hypothesis.strategies as st\n'), ((2851, 2869), 'hypothesis.strategies.integers', 'st.integers', (['(4)', '(32)'], {}), '(4, 32)\n', (2862, 2869), True, 'import hypothesis.strategies as st\n'), ((2895, 2912), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(4)'], {}), '(1, 4)\n', (2906, 2912), True, 'import hypothesis.strategies as st\n'), ((2936, 2953), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(4)'], {}), '(1, 4)\n', (2947, 2953), True, 'import hypothesis.strategies as st\n'), ((2971, 2992), 'hypothesis.strategies.integers', 'st.integers', (['(0)', '(65535)'], {}), '(0, 65535)\n', (2982, 2992), True, 'import hypothesis.strategies as st\n'), ((1724, 1779), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'num_channels', 'height', 'width'], {}), '(batch_size, num_channels, height, width)\n', (1738, 1779), True, 'import numpy as np\n'), ((2006, 2030), 'numpy.arange', 'np.arange', (['output_height'], {}), '(output_height)\n', (2015, 2030), True, 'import numpy as np\n'), ((2087, 2110), 'numpy.arange', 'np.arange', (['output_width'], {}), '(output_width)\n', (2096, 2110), True, 'import numpy as np\n'), ((1026, 1046), 'hypothesis.strategies.floats', 'st.floats', (['(0.25)', '(4.0)'], {}), '(0.25, 4.0)\n', (1035, 1046), True, 'import hypothesis.strategies as st\n'), ((1049, 1061), 'hypothesis.strategies.just', 'st.just', (['(2.0)'], {}), '(2.0)\n', (1056, 1061), True, 'import hypothesis.strategies as st\n'), ((1086, 1106), 'hypothesis.strategies.floats', 'st.floats', (['(0.25)', '(4.0)'], {}), '(0.25, 4.0)\n', (1095, 1106), True, 'import hypothesis.strategies as st\n'), ((1109, 1121), 'hypothesis.strategies.just', 'st.just', (['(2.0)'], {}), '(2.0)\n', (1116, 1121), True, 'import hypothesis.strategies as st\n'), ((3308, 3363), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'num_channels', 'height', 'width'], {}), '(batch_size, num_channels, height, width)\n', (3322, 3363), True, 'import numpy as np\n'), ((3477, 3546), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'num_channels', 'output_height', 'output_width'], {}), '(batch_size, num_channels, output_height, output_width)\n', (3491, 3546), True, 'import numpy as np\n'), ((2699, 2719), 'hypothesis.strategies.floats', 'st.floats', (['(0.25)', '(4.0)'], {}), '(0.25, 4.0)\n', (2708, 2719), True, 'import hypothesis.strategies as st\n'), ((2722, 2734), 'hypothesis.strategies.just', 'st.just', (['(2.0)'], {}), '(2.0)\n', (2729, 2734), True, 'import hypothesis.strategies as st\n'), ((2759, 2779), 'hypothesis.strategies.floats', 'st.floats', (['(0.25)', '(4.0)'], {}), '(0.25, 4.0)\n', (2768, 2779), True, 'import hypothesis.strategies as st\n'), ((2782, 2794), 'hypothesis.strategies.just', 'st.just', (['(2.0)'], {}), '(2.0)\n', (2789, 2794), True, 'import hypothesis.strategies as st\n'), ((2210, 2262), 'numpy.minimum', 'np.minimum', (['(output_h_idxs / height_scale)', '(height - 1)'], {}), '(output_h_idxs / height_scale, height - 1)\n', (2220, 2262), True, 'import numpy as np\n'), ((2324, 2374), 'numpy.minimum', 'np.minimum', (['(output_w_idxs / width_scale)', '(width - 1)'], {}), '(output_w_idxs / width_scale, width - 1)\n', (2334, 2374), True, 'import numpy as np\n'), ((4032, 4072), 'numpy.minimum', 'np.minimum', (['(i / height_scale)', '(height - 1)'], {}), '(i / height_scale, height - 1)\n', (4042, 4072), True, 'import numpy as np\n'), ((4120, 4158), 'numpy.minimum', 'np.minimum', (['(j / width_scale)', '(width - 1)'], {}), '(j / width_scale, width - 1)\n', (4130, 4158), True, 'import numpy as np\n')] |
"""Utils for the inclusion of exogenous processes."""
import functools
import numpy as np
import pandas as pd
from scipy import special
from respy.parallelization import parallelize_across_dense_dimensions
from respy.shared import get_exogenous_from_dense_covariates
from respy.shared import load_objects
from respy.shared import pandas_dot
def compute_transition_probabilities(
states, transit_keys, optim_paras, dense_key_to_dense_covariates
):
"""Get transition probabilities by dense_key.
We calculate transition probabilities for one dense key
in this function. Therefore we retrieve probabilities for individuals
and combine them to get a full transition matrix.
Returns
-------
df : pandas.core.DataFrame
Rows represent states within a dense key and columns potential
dense states in the next period.
"""
exogenous_processes = optim_paras["exogenous_processes"]
dense_key_to_exogenous = {
key: get_exogenous_from_dense_covariates(
dense_key_to_dense_covariates[key], optim_paras
)
for key in transit_keys
}
# Compute the probabilities for every exogenous process.
probabilities = []
for exog_proc in exogenous_processes:
# Create the dot product of covariates and parameters.
x_betas = [
pandas_dot(states[params.index], params)
for params in exogenous_processes[exog_proc].values()
]
probs = special.softmax(np.column_stack(x_betas), axis=1)
probabilities.append(probs)
# Prepare full Dataframe. If issues arrise we might want to switch typed dicts
df = pd.DataFrame(index=states.index)
for dense in dense_key_to_exogenous:
array = functools.reduce(
np.multiply,
[
probabilities[proc][:, val]
for proc, val in enumerate(dense_key_to_exogenous[dense])
],
)
df[str(dense)] = array
return df
@parallelize_across_dense_dimensions
def weight_continuation_values(
complex_, options, continuation_values, transit_key_to_choice_set
):
"""Weight continuation values by their probability.
We weight continuation values for a dense key according
to the probablity that she could end up in of these.
Exogenous processes only depend upon the state in this period and
not the choice thus we can calculate the cont values
symetrically across choices.
Caution has to be exercised when choice sets are restricted.
Another imortant point are states that can only be reached with a change
of exogenous process.
Returns
-------
continuation_values : np.array
(n_states, n_choices) with the weighted continuation values.
"""
transition_df = load_objects("transition", complex_, options)
choice_set = complex_[1]
# Reconsider this setup
choice_positons = {
key: _get_representation_cols(value, choice_set)
for key, value in transit_key_to_choice_set.items()
}
continuation_values_adjusted = {
future_key: continuation_values[int(future_key)][
:, list(choice_positons[int(future_key)])
]
for future_key in transition_df.columns
}
weighted_columns = [
transition_df[future_key].values.reshape((transition_df.shape[0], 1))
* continuation_values_adjusted[future_key]
for future_key in transition_df.columns
]
continuation_values = functools.reduce(np.add, weighted_columns)
return continuation_values
def create_transit_choice_set(
dense_key_to_transit_representation, dense_key_to_choice_set
):
"""Return max representation choice set of each dense choice core."""
continuation_choice_sets = {}
for dense_key in dense_key_to_transit_representation:
continuation_choice_sets[dense_key] = []
for transit_representation in dense_key_to_transit_representation:
if dense_key in dense_key_to_transit_representation[transit_representation]:
continuation_choice_sets[dense_key].append(
dense_key_to_choice_set[transit_representation]
)
out = {}
for dense_key in continuation_choice_sets:
out[dense_key] = _get_maximal_choice_set(continuation_choice_sets[dense_key])
return out
def _get_maximal_choice_set(list_of_choice_sets):
array = np.concatenate(
[np.expand_dims(x, axis=0) for x in list_of_choice_sets], axis=0
)
out = array.any(axis=0)
return out
def _get_representation_cols(rep_choice_set, choice_set):
"""Return index of array cols."""
# Subset choice sets.
return np.array(choice_set)[np.array(rep_choice_set)]
@parallelize_across_dense_dimensions
def create_transition_objects(
dense_covariates,
core_key,
exogenous_grid,
n_exog,
dense_covariates_to_dense_index,
core_key_and_dense_index_to_dense_key,
):
"""Create objects necessary for tranistion probabilities."""
static_dense = dense_covariates[:-n_exog]
reachable_dense_indices = [
dense_covariates_to_dense_index[(*static_dense, *exog)]
for exog in exogenous_grid
]
reachable_dense_keys = [
core_key_and_dense_index_to_dense_key[(core_key, dense_index)]
for dense_index in reachable_dense_indices
]
return reachable_dense_keys
| [
"pandas.DataFrame",
"respy.shared.pandas_dot",
"numpy.expand_dims",
"respy.shared.get_exogenous_from_dense_covariates",
"numpy.array",
"numpy.column_stack",
"functools.reduce",
"respy.shared.load_objects"
] | [((1658, 1690), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'states.index'}), '(index=states.index)\n', (1670, 1690), True, 'import pandas as pd\n'), ((2798, 2843), 'respy.shared.load_objects', 'load_objects', (['"""transition"""', 'complex_', 'options'], {}), "('transition', complex_, options)\n", (2810, 2843), False, 'from respy.shared import load_objects\n'), ((3498, 3540), 'functools.reduce', 'functools.reduce', (['np.add', 'weighted_columns'], {}), '(np.add, weighted_columns)\n', (3514, 3540), False, 'import functools\n'), ((977, 1065), 'respy.shared.get_exogenous_from_dense_covariates', 'get_exogenous_from_dense_covariates', (['dense_key_to_dense_covariates[key]', 'optim_paras'], {}), '(dense_key_to_dense_covariates[key],\n optim_paras)\n', (1012, 1065), False, 'from respy.shared import get_exogenous_from_dense_covariates\n'), ((4696, 4716), 'numpy.array', 'np.array', (['choice_set'], {}), '(choice_set)\n', (4704, 4716), True, 'import numpy as np\n'), ((4717, 4741), 'numpy.array', 'np.array', (['rep_choice_set'], {}), '(rep_choice_set)\n', (4725, 4741), True, 'import numpy as np\n'), ((1345, 1385), 'respy.shared.pandas_dot', 'pandas_dot', (['states[params.index]', 'params'], {}), '(states[params.index], params)\n', (1355, 1385), False, 'from respy.shared import pandas_dot\n'), ((1495, 1519), 'numpy.column_stack', 'np.column_stack', (['x_betas'], {}), '(x_betas)\n', (1510, 1519), True, 'import numpy as np\n'), ((4448, 4473), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4462, 4473), True, 'import numpy as np\n')] |
import numpy as np
from scipy.interpolate import RegularGridInterpolator
class PowerSpectrumGridInterpolator():
def __init__(self, ps_type='nonlin_matter', file_name=None, k_max=1e7):
file_name = "../data/log_pk_grids/log_pk_" + ps_type + "_grid_ary.npz"
if file_name is not None:
file = np.load(file_name)
log_pk_grid = file['log_pk_grid']
z_grid = file['z_grid']
k_grid = file['k_grid']
self.z_min = np.min(z_grid)
self.k_max = k_max
self.interpolator = RegularGridInterpolator(points=[np.log10(z_grid), np.log10(k_grid)], values=log_pk_grid,
bounds_error=False,
fill_value=None)
def __call__(self, z_ary, k_ary):
log_z_mesh_ary, log_k_mesh_ary = np.log10(np.meshgrid(z_ary, k_ary))
return np.where(np.transpose(log_z_mesh_ary) < np.log10(self.z_min),
self.interpolator(np.transpose(
[np.ones_like(log_z_mesh_ary) * np.log10(self.z_min), log_k_mesh_ary])),
self.interpolator(np.transpose([log_z_mesh_ary, log_k_mesh_ary])))
| [
"numpy.load",
"numpy.meshgrid",
"numpy.ones_like",
"numpy.transpose",
"numpy.min",
"numpy.log10"
] | [((472, 486), 'numpy.min', 'np.min', (['z_grid'], {}), '(z_grid)\n', (478, 486), True, 'import numpy as np\n'), ((324, 342), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (331, 342), True, 'import numpy as np\n'), ((863, 888), 'numpy.meshgrid', 'np.meshgrid', (['z_ary', 'k_ary'], {}), '(z_ary, k_ary)\n', (874, 888), True, 'import numpy as np\n'), ((915, 943), 'numpy.transpose', 'np.transpose', (['log_z_mesh_ary'], {}), '(log_z_mesh_ary)\n', (927, 943), True, 'import numpy as np\n'), ((946, 966), 'numpy.log10', 'np.log10', (['self.z_min'], {}), '(self.z_min)\n', (954, 966), True, 'import numpy as np\n'), ((1167, 1213), 'numpy.transpose', 'np.transpose', (['[log_z_mesh_ary, log_k_mesh_ary]'], {}), '([log_z_mesh_ary, log_k_mesh_ary])\n', (1179, 1213), True, 'import numpy as np\n'), ((575, 591), 'numpy.log10', 'np.log10', (['z_grid'], {}), '(z_grid)\n', (583, 591), True, 'import numpy as np\n'), ((593, 609), 'numpy.log10', 'np.log10', (['k_grid'], {}), '(k_grid)\n', (601, 609), True, 'import numpy as np\n'), ((1053, 1081), 'numpy.ones_like', 'np.ones_like', (['log_z_mesh_ary'], {}), '(log_z_mesh_ary)\n', (1065, 1081), True, 'import numpy as np\n'), ((1084, 1104), 'numpy.log10', 'np.log10', (['self.z_min'], {}), '(self.z_min)\n', (1092, 1104), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import os, sys
import glob
from scripts.constants import *
from absl import flags
FLAGS = flags.FLAGS
## Hparams
flags.DEFINE_string("base_path", None, "Base path")
flags.DEFINE_integer("start_x", 0, "start step of record")
flags.DEFINE_integer("interval_x", 1, "interval of record")
flags.DEFINE_integer("max_episode_len", 1000, "max episode length")
flags.DEFINE_string("output_csv", "final.csv", "final csv name")
flags.FLAGS(sys.argv)
print(FLAGS.flags_into_string())
# standard fixed-interval env steps
start_x = FLAGS.start_x
interval_x = FLAGS.interval_x
## Find all csvs
file_paths = glob.glob(os.path.join(FLAGS.base_path, "**/progress.csv"), recursive=True)
print(file_paths)
results = []
for path in file_paths:
if "oracle" not in FLAGS.base_path and "oracle/" in path:
continue
df = pd.read_csv(path)
result = pd.DataFrame() # to be filled
# 1. record metrics data
# (after interpolation to stardard fixed-interval steps)
## 1D interp on y data
for y_tag in y_tags:
if y_tag[0] not in df.columns:
continue
# use y_tag to get valid entries, not x_tag
# assume all y_tags aligned
valid_entries = df[y_tag[0]].notna() # remove NaN
y_raw = df[y_tag[0]][valid_entries]
x_raw = df[x_tag[0]][valid_entries]
## interpolate
end_x = x_raw.max() # fix bug!
x_interp = np.arange(start_x, end_x + 1, interval_x)
y_interp = np.interp(x_interp, x_raw, y_raw)
result[x_tag[1]] = x_interp
result[y_tag[1]] = np.around(y_interp, decimals=2)
diagnosis_indices = valid_entries[valid_entries == True].index - 1
for diagnosis_tag in diagnosis_tags:
if diagnosis_tag[0] not in df.columns:
continue
valid_entries_attempt = df[x_tag[0]].notna() & df[diagnosis_tag[0]].notna()
assert valid_entries_attempt.any() == False # logging issue
diagnosis_y_raw = df.iloc[diagnosis_indices][diagnosis_tag[0]]
assert len(diagnosis_y_raw) == len(x_raw)
## interpolate using x_raw as proxy
diagnosis_y_interp = np.interp(x_interp, x_raw, diagnosis_y_raw)
result[diagnosis_tag[1]] = np.around(diagnosis_y_interp, decimals=2)
# 2. record meta-data
# simply using df[tag] = str will broadcast
## parse trial_str to get variant tags
trial_str_list = list(filter(None, path.replace(FLAGS.base_path, "").split("/")))
trial_str = "/".join(trial_str_list)
print(trial_str)
trial_time_str = trial_str_list[-2] # -1 is "progress.csv"
result[trial_tag] = trial_time_str
## hparams
if any(name in trial_str for name in baseline_names):
# belong to our baselines
result[method_tag] = "ours"
variant_tags = get_variant_tags(trial_str, FLAGS.max_episode_len)
for k, v in variant_tags.items():
result[k] = v
else:
# specialized or other methods
specialized_name = trial_str_list[0]
assert specialized_name in specialized_tags
result[method_tag] = specialized_name
for k, v in specialized_tags[specialized_name].items():
if k in variant_tag_names:
result[k] = v
results.append(result)
results = pd.concat(results)
os.makedirs(FLAGS.base_path.replace("logs", "data"), exist_ok=True)
results.to_csv(
os.path.join(FLAGS.base_path.replace("logs", "data"), FLAGS.output_csv), index=False
)
| [
"pandas.DataFrame",
"pandas.read_csv",
"absl.flags.DEFINE_string",
"numpy.around",
"absl.flags.DEFINE_integer",
"numpy.arange",
"numpy.interp",
"absl.flags.FLAGS",
"os.path.join",
"pandas.concat"
] | [((154, 205), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""base_path"""', 'None', '"""Base path"""'], {}), "('base_path', None, 'Base path')\n", (173, 205), False, 'from absl import flags\n'), ((206, 264), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""start_x"""', '(0)', '"""start step of record"""'], {}), "('start_x', 0, 'start step of record')\n", (226, 264), False, 'from absl import flags\n'), ((265, 324), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""interval_x"""', '(1)', '"""interval of record"""'], {}), "('interval_x', 1, 'interval of record')\n", (285, 324), False, 'from absl import flags\n'), ((325, 392), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_episode_len"""', '(1000)', '"""max episode length"""'], {}), "('max_episode_len', 1000, 'max episode length')\n", (345, 392), False, 'from absl import flags\n'), ((393, 457), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_csv"""', '"""final.csv"""', '"""final csv name"""'], {}), "('output_csv', 'final.csv', 'final csv name')\n", (412, 457), False, 'from absl import flags\n'), ((458, 479), 'absl.flags.FLAGS', 'flags.FLAGS', (['sys.argv'], {}), '(sys.argv)\n', (469, 479), False, 'from absl import flags\n'), ((3307, 3325), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (3316, 3325), True, 'import pandas as pd\n'), ((646, 694), 'os.path.join', 'os.path.join', (['FLAGS.base_path', '"""**/progress.csv"""'], {}), "(FLAGS.base_path, '**/progress.csv')\n", (658, 694), False, 'import os, sys\n'), ((857, 874), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (868, 874), True, 'import pandas as pd\n'), ((888, 902), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (900, 902), True, 'import pandas as pd\n'), ((1444, 1485), 'numpy.arange', 'np.arange', (['start_x', '(end_x + 1)', 'interval_x'], {}), '(start_x, end_x + 1, interval_x)\n', (1453, 1485), True, 'import numpy as np\n'), ((1505, 1538), 'numpy.interp', 'np.interp', (['x_interp', 'x_raw', 'y_raw'], {}), '(x_interp, x_raw, y_raw)\n', (1514, 1538), True, 'import numpy as np\n'), ((1602, 1633), 'numpy.around', 'np.around', (['y_interp'], {'decimals': '(2)'}), '(y_interp, decimals=2)\n', (1611, 1633), True, 'import numpy as np\n'), ((2163, 2206), 'numpy.interp', 'np.interp', (['x_interp', 'x_raw', 'diagnosis_y_raw'], {}), '(x_interp, x_raw, diagnosis_y_raw)\n', (2172, 2206), True, 'import numpy as np\n'), ((2242, 2283), 'numpy.around', 'np.around', (['diagnosis_y_interp'], {'decimals': '(2)'}), '(diagnosis_y_interp, decimals=2)\n', (2251, 2283), True, 'import numpy as np\n')] |
# -*- coding: utf8 -*-
"""
======================================
Project Name: NLP
File Name: format_conv
Author: czh
Create Date: 2021/9/29
--------------------------------------
Change Activity:
======================================
"""
import copy
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
def jaccard_similarity(s1, s2):
def add_space(s):
return ' '.join(list(s))
# 将字中间加入空格
s1, s2 = add_space(s1), add_space(s2)
# 转化为TF矩阵
cv = CountVectorizer(tokenizer=lambda s: s.split())
corpus = [s1, s2]
vectors = cv.fit_transform(corpus).toarray()
# 获取词表内容
# ret = cv.get_feature_names()
# print(ret)
# 求交集
numerator = np.sum(np.min(vectors, axis=0))
# 求并集
denominator = np.sum(np.max(vectors, axis=0))
# 计算杰卡德系数
return 1.0 * numerator / denominator
def drop_duplicate_event(event_so_list, sim_entity):
event_so_list_copy = copy.deepcopy(event_so_list)
for i in range(len(event_so_list_copy)):
for j in range(i + 1, len(event_so_list_copy)):
is_duplicate = True
s_i_role_set = set()
for event_s_i, event_o_i in event_so_list_copy[i].items():
s_i_role_set.add(event_s_i)
s_j_role_set = set()
# 如果两个事件没有相同role,就合并,如果有相同role,判断role对应的value是否有重复的,有重复的就合并。
# TODO 实体里也有些,可以加入
for event_s_j, event_o_j in event_so_list_copy[j].items():
s_j_role_set.add(event_s_j)
event_so_i_value = event_so_list_copy[i].get(event_s_j, [])
if event_so_i_value: # 判断某个相同role的value是否有重复的
has_duplicate_value = False
for entity_i in event_so_i_value:
for entity_j in event_o_j:
if (entity_i in entity_j) or \
(entity_j in entity_i) or \
jaccard_similarity(entity_i, entity_j) > sim_entity or \
set(entity_i).issubset(entity_j) or \
set(entity_j).issubset(entity_i):
has_duplicate_value = True
break
if has_duplicate_value:
break
if not has_duplicate_value:
is_duplicate = False
break
if is_duplicate: # 两个结果进行合并
for event_s_i, event_o_i in event_so_list_copy[i].items():
if event_s_i not in event_so_list[j]:
event_so_list[j].update({event_s_i: event_o_i})
else:
for event_o in event_o_i:
if event_o not in event_so_list[j][event_s_i]:
event_so_list[j][event_s_i].append(event_o)
event_so_list.remove(event_so_list_copy[i])
drop_duplicate_event(event_so_list, sim_entity)
return
else:
continue
return
def merge_events4doc_ee(event_list, sim_entity=0.6):
new_event_list = []
event_type_dict = {}
for event_dict in event_list:
new_argument_dict = {}
for argument_dict in event_dict["argument_list"]:
new_argument_dict.setdefault(argument_dict["type"], []).append(argument_dict["text"])
event_type_dict.setdefault(event_dict["event_type"], []).append(new_argument_dict)
for event_type, event_so_list in event_type_dict.items():
# event_so_list = sorted(event_so_list, key=lambda x: len(x), reverse=False)
drop_duplicate_event(event_so_list, sim_entity)
for event_so_dict in event_so_list:
event_dict = {"event_type": event_type, "argument_list": []}
for role, arg_list in event_so_dict.items():
new_arg_dict = {}
arg_list_sort = sorted(arg_list, key=lambda x: len(x), reverse=True)
for index, argument in enumerate(arg_list_sort):
add_new_arg = True
new_arg_dict_copy = copy.deepcopy(new_arg_dict)
for new_arg, new_arg_value in new_arg_dict_copy.items():
new_arg_value_copy = copy.deepcopy(new_arg_value)
new_arg_value_copy.append(new_arg)
break_circle = False
for temp_arg in new_arg_value_copy:
condition = set(argument).issubset(set(temp_arg)) or set(temp_arg).issubset(
set(argument))
if condition:
break_circle = True
if len(argument) > len(new_arg):
new_arg_dict[argument] = new_arg_value_copy
new_arg_dict.pop(new_arg)
add_new_arg = False
break
else:
new_arg_dict.setdefault(new_arg, []).append(argument)
add_new_arg = False
break
if break_circle:
break
if add_new_arg:
new_arg_dict.setdefault(argument, [])
for new_arg, new_arg_value in new_arg_dict.items():
argument_dict = {"type": role, "text": new_arg}
if argument_dict not in event_dict["argument_list"]:
event_dict["argument_list"].append(argument_dict)
new_event_list.append(event_dict)
return new_event_list
| [
"copy.deepcopy",
"numpy.max",
"numpy.min"
] | [((960, 988), 'copy.deepcopy', 'copy.deepcopy', (['event_so_list'], {}), '(event_so_list)\n', (973, 988), False, 'import copy\n'), ((740, 763), 'numpy.min', 'np.min', (['vectors'], {'axis': '(0)'}), '(vectors, axis=0)\n', (746, 763), True, 'import numpy as np\n'), ((800, 823), 'numpy.max', 'np.max', (['vectors'], {'axis': '(0)'}), '(vectors, axis=0)\n', (806, 823), True, 'import numpy as np\n'), ((4211, 4238), 'copy.deepcopy', 'copy.deepcopy', (['new_arg_dict'], {}), '(new_arg_dict)\n', (4224, 4238), False, 'import copy\n'), ((4361, 4389), 'copy.deepcopy', 'copy.deepcopy', (['new_arg_value'], {}), '(new_arg_value)\n', (4374, 4389), False, 'import copy\n')] |
import os
import numpy as np
import torch
import torch.utils.data
import pickle as pkl
from torch import nn, optim
from tqdm import tqdm
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error as mse
from networks import HSICClassifier, MLP2Layer
from ECG.train.datasets import create_dataloaders
from ECG.train.train_utils import get_device, update_subset_list
# experiment parameters
exp_name = 'main_task_lambda500_rr'
feature_subset = 'rr'
included_feature_set = 'rr'
excluded_feature_set = 'p_wave'
run_rep2label = False
# training parameters
torch.manual_seed(42)
cuda_id = 0
batch_size = 32
lr = 1e-3
num_epochs = 40
rep_size = 512
file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'saved_models', exp_name)
is_baseline = 'baseline' in exp_name.lower()
feature_opt = 'None' if is_baseline else 'HSIC+Concat'
device = get_device(cuda_id)
if not os.path.exists(file_dir):
os.mkdir(file_dir)
feature_subset_dataloader = feature_subset # if feature_subset == 'all_fcnet' else 'all'
train_loader, val_loader, test_loader = create_dataloaders(batch_size=batch_size, feature_subset=feature_subset_dataloader,
feature_opt='HSIC+Concat', naf=True)
main_model = HSICClassifier(num_classes=2, feature_len=train_loader.dataset.feature_len,
feature_opt=feature_opt, gap_norm_opt='batch_norm', in_channels=1).to(device)
main_model.load_state_dict(torch.load(os.path.join(file_dir, f'{exp_name}_params.pkl'), map_location='cpu'))
main_model.eval()
criterion = nn.MSELoss()
def train_predict_features(epoch, subset):
feature_predictor.train()
train_loss = 0
for batch_idx, (signal, _, features, real_features, signal_names, _) in enumerate(tqdm(train_loader)):
signal, features, real_features = signal.to(device), features.to(device), real_features[:, subset].to(device)
optimizer.zero_grad()
_, _, gap = main_model(x=signal, features=features)
features_pred = feature_predictor(gap)
loss = criterion(real_features, features_pred)
loss.backward()
train_loss += loss.item()
optimizer.step()
print(f'====> Epoch: {epoch} Average loss: {train_loss / len(train_loader.dataset):.4f}')
def val_predict_features(subset, best_r2):
feature_predictor.eval()
test_loss = 0
r2_list = []
with torch.no_grad():
for batch_idx, (signal, _, features, real_features, signal_names, _) in enumerate(val_loader):
signal, features, real_features = signal.to(device), features.to(device), real_features[:, subset].to(device)
_, _, gap = main_model(x=signal, features=features)
features_pred = feature_predictor(gap)
pred_errors = mse(real_features.detach().cpu(), features_pred.detach().cpu(), multioutput='raw_values')
test_loss += pred_errors
curr_r2 = r2_score(real_features.detach().cpu(), features_pred.detach().cpu())
r2_list.append(curr_r2)
test_loss /= len(test_loader)
r2 = np.mean(r2_list)
if r2 > best_r2:
best_r2 = r2
print(f'Saved @ {r2}')
torch.save(feature_predictor.state_dict(), os.path.join(file_dir, f'{exp_name}_rep2features_params.pkl'))
print(f'====> Val set MSE loss: {test_loss.mean():.5f}')
print(f'====> Val set R^2: {r2:.5f}')
return best_r2
def test_predict_features(subset, result_type):
feature_predictor.eval()
feature_predictor.load_state_dict(
torch.load(os.path.join(file_dir, f'{exp_name}_rep2features_params.pkl'), map_location='cpu'))
r2_list = []
with torch.no_grad():
for batch_idx, (signal, _, features, real_features, signal_names, _) in enumerate(test_loader):
signal, features, real_features = signal.to(device), features.to(device), real_features[:, subset].to(device)
_, _, gap = main_model(x=signal, features=features)
features_pred = feature_predictor(gap)
curr_r2 = r2_score(real_features.detach().cpu(), features_pred.detach().cpu())
r2_list.append(curr_r2)
r2 = np.mean(r2_list)
print('====> Test set R^2: {:.5f}'.format(r2))
with open(os.path.join(file_dir, f'{exp_name}_test_r2{result_type}_fixed.pkl'), 'wb') as handle:
pkl.dump(r2, handle, protocol=pkl.HIGHEST_PROTOCOL)
return
r2_list = []
subset_list = []
result_type_list = []
result_type_list.append(update_subset_list(subset_type=included_feature_set, subset_list=subset_list, included=True,
real_features=list(train_loader.dataset.real_features)))
result_type_list.append(update_subset_list(subset_type=excluded_feature_set, subset_list=subset_list, included=False,
real_features=list(train_loader.dataset.real_features)))
def col_names_to_idx(subset, df):
return[df.columns.get_loc(col) for col in subset]
curr_df = train_loader.dataset.real_features
for index, subset in enumerate(subset_list):
num_features2predict = len(subset)
feature_predictor = MLP2Layer(in_size=rep_size, hidden_size1=128, hidden_size2=128, out_size=num_features2predict).to(device)
optimizer = optim.Adam(feature_predictor.parameters(), lr=lr, weight_decay=1e-6)
best_r2 = -1e8
for epoch in range(num_epochs):
train_predict_features(epoch, col_names_to_idx(subset, curr_df))
best_r2 = val_predict_features(best_r2=best_r2, subset=col_names_to_idx(subset, curr_df))
test_predict_features(col_names_to_idx(subset, curr_df), result_type_list[index])
| [
"networks.MLP2Layer",
"os.mkdir",
"torch.nn.MSELoss",
"tqdm.tqdm",
"pickle.dump",
"os.path.join",
"torch.manual_seed",
"os.path.realpath",
"os.path.exists",
"numpy.mean",
"networks.HSICClassifier",
"ECG.train.datasets.create_dataloaders",
"torch.no_grad",
"ECG.train.train_utils.get_device"... | [((579, 600), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (596, 600), False, 'import torch\n'), ((878, 897), 'ECG.train.train_utils.get_device', 'get_device', (['cuda_id'], {}), '(cuda_id)\n', (888, 897), False, 'from ECG.train.train_utils import get_device, update_subset_list\n'), ((1087, 1212), 'ECG.train.datasets.create_dataloaders', 'create_dataloaders', ([], {'batch_size': 'batch_size', 'feature_subset': 'feature_subset_dataloader', 'feature_opt': '"""HSIC+Concat"""', 'naf': '(True)'}), "(batch_size=batch_size, feature_subset=\n feature_subset_dataloader, feature_opt='HSIC+Concat', naf=True)\n", (1105, 1212), False, 'from ECG.train.datasets import create_dataloaders\n'), ((1603, 1615), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1613, 1615), False, 'from torch import nn, optim\n'), ((906, 930), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (920, 930), False, 'import os\n'), ((936, 954), 'os.mkdir', 'os.mkdir', (['file_dir'], {}), '(file_dir)\n', (944, 954), False, 'import os\n'), ((3104, 3120), 'numpy.mean', 'np.mean', (['r2_list'], {}), '(r2_list)\n', (3111, 3120), True, 'import numpy as np\n'), ((4176, 4192), 'numpy.mean', 'np.mean', (['r2_list'], {}), '(r2_list)\n', (4183, 4192), True, 'import numpy as np\n'), ((711, 737), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (727, 737), False, 'import os\n'), ((1281, 1427), 'networks.HSICClassifier', 'HSICClassifier', ([], {'num_classes': '(2)', 'feature_len': 'train_loader.dataset.feature_len', 'feature_opt': 'feature_opt', 'gap_norm_opt': '"""batch_norm"""', 'in_channels': '(1)'}), "(num_classes=2, feature_len=train_loader.dataset.feature_len,\n feature_opt=feature_opt, gap_norm_opt='batch_norm', in_channels=1)\n", (1295, 1427), False, 'from networks import HSICClassifier, MLP2Layer\n'), ((1502, 1550), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{exp_name}_params.pkl"""'], {}), "(file_dir, f'{exp_name}_params.pkl')\n", (1514, 1550), False, 'import os\n'), ((1796, 1814), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (1800, 1814), False, 'from tqdm import tqdm\n'), ((2422, 2437), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2435, 2437), False, 'import torch\n'), ((3680, 3695), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3693, 3695), False, 'import torch\n'), ((4355, 4406), 'pickle.dump', 'pkl.dump', (['r2', 'handle'], {'protocol': 'pkl.HIGHEST_PROTOCOL'}), '(r2, handle, protocol=pkl.HIGHEST_PROTOCOL)\n', (4363, 4406), True, 'import pickle as pkl\n'), ((3246, 3307), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{exp_name}_rep2features_params.pkl"""'], {}), "(file_dir, f'{exp_name}_rep2features_params.pkl')\n", (3258, 3307), False, 'import os\n'), ((3570, 3631), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{exp_name}_rep2features_params.pkl"""'], {}), "(file_dir, f'{exp_name}_rep2features_params.pkl')\n", (3582, 3631), False, 'import os\n'), ((4260, 4328), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{exp_name}_test_r2{result_type}_fixed.pkl"""'], {}), "(file_dir, f'{exp_name}_test_r2{result_type}_fixed.pkl')\n", (4272, 4328), False, 'import os\n'), ((5155, 5254), 'networks.MLP2Layer', 'MLP2Layer', ([], {'in_size': 'rep_size', 'hidden_size1': '(128)', 'hidden_size2': '(128)', 'out_size': 'num_features2predict'}), '(in_size=rep_size, hidden_size1=128, hidden_size2=128, out_size=\n num_features2predict)\n', (5164, 5254), False, 'from networks import HSICClassifier, MLP2Layer\n')] |
# Test for testing the test tools ... uhhh boy!
import numpy as np
import copy
from pwtools.test import tools
def test_tools():
x1 = {'a': 3,
type(1): 'foo',
'b': {'aa': 1,
'bb': np.array([1,2]),
'cc': {'aaa': np.array([1,2.0]),
'bbb': np.array([2,4.0])}}}
x2 = copy.deepcopy(x1)
tools.assert_dict_with_all_types_equal(x1, x2)
tools.assert_all_types_equal(x1, x2)
# not equal array
x2['b']['cc']['bbb'] *= 2.0
assert not tools.all_types_equal(x1, x2)
# almost equal array and float
x2 = copy.deepcopy(x1)
x2['b']['cc']['bbb'] += 1e-5
x2['b']['aa'] += 1e-5
tools.assert_all_types_almost_equal(x1, x2)
# sub-dict different (keys don't match)
x2 = copy.deepcopy(x1)
x2['b']['cc']['fff'] = 'extra'
assert not tools.all_types_equal(x1, x2)
# test only some keys of a dict
tools.assert_dict_with_all_types_equal({'a':1,'b':1}, {'a':1, 'b':3},
keys=['a'])
# simple stuff
tools.assert_all_types_equal(1, 1)
tools.assert_all_types_equal(1.0, 1.0)
tools.assert_all_types_equal(1.0, 1)
tools.assert_all_types_equal(1, 1.0)
tools.assert_all_types_equal([1], [1])
tools.assert_all_types_equal([1], [1.0])
tools.assert_all_types_equal('a', 'a')
tools.assert_all_types_almost_equal(1.0, 1.0+1e-5)
tools.assert_all_types_almost_equal(np.array([1.0]), np.array([1.0+1e-5]))
assert not tools.all_types_equal(1, 2)
assert not tools.all_types_equal(1.0, 1.1)
assert not tools.all_types_equal([1], [1,2])
assert not tools.all_types_equal('a', 'b')
# different types not allowed if strict=True
assert not tools.all_types_equal(1.0, 1, strict=True)
# test keys=[...], i.e. ignore some keys in both dicts
x2 = copy.deepcopy(x1)
x2['c'] = 1.0
assert tools.dict_with_all_types_equal(x1, x2, keys=['a','b',type(1)])
# fail on same array content but different dimensions
a = np.random.rand(1,2,3)
b = a[None,...]
# this is True and IMHO a numpy bug b/c the dimensions are
# different
assert (a==b).all()
# must catch that here
assert not tools.array_equal(a,b)
a = np.random.rand(1,2,3)
b = (a + 1e-8)[None,...]
assert not (a==b).all() # make sure they are numerically differrent
assert np.allclose(a,b) # True but should be False
assert not tools.array_almost_equal(a,b) # ok
flt = 1.0
np_flt = np.array([1.0+1e-9])[0]
assert not tools.all_types_equal(flt, np_flt)
assert not tools.all_types_equal(np_flt, flt)
assert tools.all_types_almost_equal(flt, np_flt)
assert tools.all_types_almost_equal(np_flt, flt)
| [
"copy.deepcopy",
"pwtools.test.tools.assert_dict_with_all_types_equal",
"pwtools.test.tools.array_equal",
"numpy.random.rand",
"numpy.allclose",
"pwtools.test.tools.assert_all_types_equal",
"pwtools.test.tools.array_almost_equal",
"numpy.array",
"pwtools.test.tools.assert_all_types_almost_equal",
... | [((347, 364), 'copy.deepcopy', 'copy.deepcopy', (['x1'], {}), '(x1)\n', (360, 364), False, 'import copy\n'), ((369, 415), 'pwtools.test.tools.assert_dict_with_all_types_equal', 'tools.assert_dict_with_all_types_equal', (['x1', 'x2'], {}), '(x1, x2)\n', (407, 415), False, 'from pwtools.test import tools\n'), ((420, 456), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['x1', 'x2'], {}), '(x1, x2)\n', (448, 456), False, 'from pwtools.test import tools\n'), ((602, 619), 'copy.deepcopy', 'copy.deepcopy', (['x1'], {}), '(x1)\n', (615, 619), False, 'import copy\n'), ((683, 726), 'pwtools.test.tools.assert_all_types_almost_equal', 'tools.assert_all_types_almost_equal', (['x1', 'x2'], {}), '(x1, x2)\n', (718, 726), False, 'from pwtools.test import tools\n'), ((781, 798), 'copy.deepcopy', 'copy.deepcopy', (['x1'], {}), '(x1)\n', (794, 798), False, 'import copy\n'), ((920, 1010), 'pwtools.test.tools.assert_dict_with_all_types_equal', 'tools.assert_dict_with_all_types_equal', (["{'a': 1, 'b': 1}", "{'a': 1, 'b': 3}"], {'keys': "['a']"}), "({'a': 1, 'b': 1}, {'a': 1, 'b': 3},\n keys=['a'])\n", (958, 1010), False, 'from pwtools.test import tools\n'), ((1034, 1068), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['(1)', '(1)'], {}), '(1, 1)\n', (1062, 1068), False, 'from pwtools.test import tools\n'), ((1073, 1111), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (1101, 1111), False, 'from pwtools.test import tools\n'), ((1116, 1152), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['(1.0)', '(1)'], {}), '(1.0, 1)\n', (1144, 1152), False, 'from pwtools.test import tools\n'), ((1157, 1193), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['(1)', '(1.0)'], {}), '(1, 1.0)\n', (1185, 1193), False, 'from pwtools.test import tools\n'), ((1198, 1236), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['[1]', '[1]'], {}), '([1], [1])\n', (1226, 1236), False, 'from pwtools.test import tools\n'), ((1241, 1281), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['[1]', '[1.0]'], {}), '([1], [1.0])\n', (1269, 1281), False, 'from pwtools.test import tools\n'), ((1286, 1324), 'pwtools.test.tools.assert_all_types_equal', 'tools.assert_all_types_equal', (['"""a"""', '"""a"""'], {}), "('a', 'a')\n", (1314, 1324), False, 'from pwtools.test import tools\n'), ((1329, 1382), 'pwtools.test.tools.assert_all_types_almost_equal', 'tools.assert_all_types_almost_equal', (['(1.0)', '(1.0 + 1e-05)'], {}), '(1.0, 1.0 + 1e-05)\n', (1364, 1382), False, 'from pwtools.test import tools\n'), ((1822, 1839), 'copy.deepcopy', 'copy.deepcopy', (['x1'], {}), '(x1)\n', (1835, 1839), False, 'import copy\n'), ((2000, 2023), 'numpy.random.rand', 'np.random.rand', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (2014, 2023), True, 'import numpy as np\n'), ((2219, 2242), 'numpy.random.rand', 'np.random.rand', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (2233, 2242), True, 'import numpy as np\n'), ((2353, 2370), 'numpy.allclose', 'np.allclose', (['a', 'b'], {}), '(a, b)\n', (2364, 2370), True, 'import numpy as np\n'), ((2610, 2651), 'pwtools.test.tools.all_types_almost_equal', 'tools.all_types_almost_equal', (['flt', 'np_flt'], {}), '(flt, np_flt)\n', (2638, 2651), False, 'from pwtools.test import tools\n'), ((2663, 2704), 'pwtools.test.tools.all_types_almost_equal', 'tools.all_types_almost_equal', (['np_flt', 'flt'], {}), '(np_flt, flt)\n', (2691, 2704), False, 'from pwtools.test import tools\n'), ((527, 556), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['x1', 'x2'], {}), '(x1, x2)\n', (548, 556), False, 'from pwtools.test import tools\n'), ((849, 878), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['x1', 'x2'], {}), '(x1, x2)\n', (870, 878), False, 'from pwtools.test import tools\n'), ((1420, 1435), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1428, 1435), True, 'import numpy as np\n'), ((1437, 1460), 'numpy.array', 'np.array', (['[1.0 + 1e-05]'], {}), '([1.0 + 1e-05])\n', (1445, 1460), True, 'import numpy as np\n'), ((1474, 1501), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['(1)', '(2)'], {}), '(1, 2)\n', (1495, 1501), False, 'from pwtools.test import tools\n'), ((1517, 1548), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['(1.0)', '(1.1)'], {}), '(1.0, 1.1)\n', (1538, 1548), False, 'from pwtools.test import tools\n'), ((1564, 1598), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['[1]', '[1, 2]'], {}), '([1], [1, 2])\n', (1585, 1598), False, 'from pwtools.test import tools\n'), ((1613, 1644), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['"""a"""', '"""b"""'], {}), "('a', 'b')\n", (1634, 1644), False, 'from pwtools.test import tools\n'), ((1710, 1752), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['(1.0)', '(1)'], {'strict': '(True)'}), '(1.0, 1, strict=True)\n', (1731, 1752), False, 'from pwtools.test import tools\n'), ((2187, 2210), 'pwtools.test.tools.array_equal', 'tools.array_equal', (['a', 'b'], {}), '(a, b)\n', (2204, 2210), False, 'from pwtools.test import tools\n'), ((2412, 2442), 'pwtools.test.tools.array_almost_equal', 'tools.array_almost_equal', (['a', 'b'], {}), '(a, b)\n', (2436, 2442), False, 'from pwtools.test import tools\n'), ((2475, 2498), 'numpy.array', 'np.array', (['[1.0 + 1e-09]'], {}), '([1.0 + 1e-09])\n', (2483, 2498), True, 'import numpy as np\n'), ((2514, 2548), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['flt', 'np_flt'], {}), '(flt, np_flt)\n', (2535, 2548), False, 'from pwtools.test import tools\n'), ((2564, 2598), 'pwtools.test.tools.all_types_equal', 'tools.all_types_equal', (['np_flt', 'flt'], {}), '(np_flt, flt)\n', (2585, 2598), False, 'from pwtools.test import tools\n'), ((221, 237), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (229, 237), True, 'import numpy as np\n'), ((268, 286), 'numpy.array', 'np.array', (['[1, 2.0]'], {}), '([1, 2.0])\n', (276, 286), True, 'import numpy as np\n'), ((317, 335), 'numpy.array', 'np.array', (['[2, 4.0]'], {}), '([2, 4.0])\n', (325, 335), True, 'import numpy as np\n')] |
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2020
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import matplotlib.pyplot as plt
from multiprocessing import Queue, Pool
import json
import os
import logging
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score
import pandas as pd
import matplotlib
matplotlib.use('Agg')
fig_size = (9, 6)
fig_font = 15
plot_params = {'legend.fontsize': 'large',
'figure.figsize': fig_size,
'axes.labelsize': fig_font,
'axes.titlesize': fig_font,
'xtick.labelsize': fig_font*0.75,
'ytick.labelsize': fig_font*0.75,
'axes.titlepad': fig_font}
plt.rcParams.update(plot_params)
def plot_anomaly_histogram(inlier_score, outlier_score, title="Anomaly Score Histogram", threshold=0.5, model_name="_", show_plot=True):
plt.figure()
ndf = pd.DataFrame(data=inlier_score, columns=["score"])
adf = pd.DataFrame(data=outlier_score, columns=["score"])
plt.hist(ndf["score"])
plt.hist(adf["score"])
plt.legend(["Inlier Data", "Outlier Data"])
plt.title(model_name.upper() + " | " + title +
" | Threshold: " + str(threshold))
plt.axvline(threshold, color="r", linestyle="dashed")
plt.savefig("metrics/" + model_name + "/histogram.png")
plt.rcParams.update(plot_params)
if (show_plot):
plt.show()
else:
plt.close()
def compute_accuracy(threshold, loss, y, dataset_name, show_plot=False, model_name="_"):
y_pred = np.array([1 if e > threshold else 0 for e in loss]).astype(int)
acc_tot = accuracy_score(y, y_pred)
prec_tot = precision_score(y, y_pred)
rec_tot = recall_score(y, y_pred)
f1_tot = f1_score(y, y_pred)
f2_tot = fbeta_score(y, y_pred, beta=2)
fpr, tpr, thresholds = roc_curve(y, loss)
roc_auc = roc_auc_score(y, loss)
plt.figure()
# Plot ROC curve
plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--') # random predictions curve
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate or (1 - Specifity)')
plt.ylabel('True Positive Rate or (Sensitivity)')
plt.title(model_name.upper() + " | " +
'Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.rcParams.update(plot_params)
plt.savefig("metrics/" + model_name + "/roc.png")
if (show_plot):
plt.show()
else:
plt.close()
metrics = {"acc": acc_tot,
"precision": prec_tot,
"recall": rec_tot,
"f1": f1_tot,
"f2": f2_tot,
"roc": roc_auc,
"threshold": round(threshold, 3)
}
return metrics
def test_threshold(threshold, loss, y):
y_pred = np.array([1 if e > threshold else 0 for e in loss]).astype(int)
acc_tot = accuracy_score(y, y_pred)
metrics = {"acc": acc_tot,
"threshold": round(threshold, 3)}
return metrics
def get_scores_and_labels(outlier_score, inlier_score):
zero_vec = np.zeros(len(inlier_score))
one_vec = np.ones(len(outlier_score))
all_scores = list(inlier_score) + list(outlier_score)
all_labels = list(zero_vec) + list(one_vec)
return all_scores, all_labels
def plot_metrics(best_metrics, model_name="_", show_plot=False):
fig, ax = plt.subplots()
metrics = best_metrics.copy()
del metrics["threshold"]
ax.barh(list(metrics.keys()), list(metrics.values()), color="blue")
plt.title(model_name.upper() + " | " + ' Model Performance Metrics')
plt.xlabel('', fontsize=14)
plt.ylabel('Model Metrics', fontsize=16)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.box(False)
for i, v in enumerate(list(metrics.values())):
ax.text(v + 0.01, i, str(round(v, 3)), color='blue', fontsize=15)
plt.savefig("metrics/" + model_name + "/metrics.png")
if (show_plot):
plt.show()
else:
plt.close()
# save metrics to json file
with open("metrics/" + model_name + "/metrics.json", 'w') as outfile:
json.dump(best_metrics, outfile)
def evaluate_model(inlier_score, outlier_score, model_name="_", show_plot=True):
image_directory = "metrics/" + model_name
if not os.path.exists(image_directory):
os.makedirs(image_directory)
all_scores, all_labels = get_scores_and_labels(
outlier_score, inlier_score)
all_thresholds = list(set(all_scores))
all_thresholds.sort()
logging.info(str(len(all_thresholds)) + "unique thresholds")
logging.info("Testing all thresholds to find best accuracy ...")
metric_holder = []
for threshold in all_thresholds:
metrics = test_threshold(threshold, all_scores, all_labels)
metric_holder.append(metrics)
logging.info("Threshold testing complete ...")
metric_df = pd.DataFrame(metric_holder)
max_acc = metric_df.sort_values(
by='acc', ascending=False, na_position='first').iloc[0]
logging.info("Best accuracy is .. " + str(dict(max_acc)))
# visualize_tested_metrics(metric_df, epoch)
# show ROC for best accuracy model
best_metrics = compute_accuracy(
dict(max_acc)["threshold"], all_scores, all_labels, "test data", model_name=model_name, show_plot=show_plot)
plot_anomaly_histogram(inlier_score, outlier_score,
threshold=best_metrics["threshold"], model_name=model_name, show_plot=show_plot)
plot_metrics(best_metrics, show_plot=show_plot, model_name=model_name)
return best_metrics
def save_metrics(loss, threshold, save_path):
y_pred = [1 if e > threshold else 0 for e in loss]
scores = loss
class_vals = y_pred
result = pd.DataFrame(
{"scores": scores, "class": class_vals, "threshold": threshold})
result = result.to_json(save_path, orient='records', lines=True)
def load_metrics(metric_path):
with open(metric_path) as json_file:
data = json.load(json_file)
return data
| [
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.box",
"matplotlib.pyplot.figure",
"sklearn.metrics.f1_score",
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"sklearn.metrics.fbeta_score",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"os.path.exists",
"matplotlib.pyplot.rcParams.... | [((2439, 2460), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (2453, 2460), False, 'import matplotlib\n'), ((2807, 2839), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['plot_params'], {}), '(plot_params)\n', (2826, 2839), True, 'import matplotlib.pyplot as plt\n'), ((2983, 2995), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2993, 2995), True, 'import matplotlib.pyplot as plt\n'), ((3006, 3056), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'inlier_score', 'columns': "['score']"}), "(data=inlier_score, columns=['score'])\n", (3018, 3056), True, 'import pandas as pd\n'), ((3067, 3118), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'outlier_score', 'columns': "['score']"}), "(data=outlier_score, columns=['score'])\n", (3079, 3118), True, 'import pandas as pd\n'), ((3124, 3146), 'matplotlib.pyplot.hist', 'plt.hist', (["ndf['score']"], {}), "(ndf['score'])\n", (3132, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3151, 3173), 'matplotlib.pyplot.hist', 'plt.hist', (["adf['score']"], {}), "(adf['score'])\n", (3159, 3173), True, 'import matplotlib.pyplot as plt\n'), ((3178, 3221), 'matplotlib.pyplot.legend', 'plt.legend', (["['Inlier Data', 'Outlier Data']"], {}), "(['Inlier Data', 'Outlier Data'])\n", (3188, 3221), True, 'import matplotlib.pyplot as plt\n'), ((3326, 3379), 'matplotlib.pyplot.axvline', 'plt.axvline', (['threshold'], {'color': '"""r"""', 'linestyle': '"""dashed"""'}), "(threshold, color='r', linestyle='dashed')\n", (3337, 3379), True, 'import matplotlib.pyplot as plt\n'), ((3384, 3439), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('metrics/' + model_name + '/histogram.png')"], {}), "('metrics/' + model_name + '/histogram.png')\n", (3395, 3439), True, 'import matplotlib.pyplot as plt\n'), ((3445, 3477), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['plot_params'], {}), '(plot_params)\n', (3464, 3477), True, 'import matplotlib.pyplot as plt\n'), ((3729, 3754), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3743, 3754), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((3770, 3796), 'sklearn.metrics.precision_score', 'precision_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3785, 3796), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((3811, 3834), 'sklearn.metrics.recall_score', 'recall_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3823, 3834), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((3848, 3867), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3856, 3867), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((3881, 3911), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y', 'y_pred'], {'beta': '(2)'}), '(y, y_pred, beta=2)\n', (3892, 3911), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((3940, 3958), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'loss'], {}), '(y, loss)\n', (3949, 3958), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((3973, 3995), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'loss'], {}), '(y, loss)\n', (3986, 3995), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((4001, 4013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4011, 4013), True, 'import matplotlib.pyplot as plt\n'), ((4040, 4102), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'label': "('ROC curve (area = %0.3f)' % roc_auc)"}), "(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc)\n", (4048, 4102), True, 'import matplotlib.pyplot as plt\n'), ((4107, 4138), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (4115, 4138), True, 'import matplotlib.pyplot as plt\n'), ((4171, 4191), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (4179, 4191), True, 'import matplotlib.pyplot as plt\n'), ((4196, 4216), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (4204, 4216), True, 'import matplotlib.pyplot as plt\n'), ((4221, 4273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate or (1 - Specifity)"""'], {}), "('False Positive Rate or (1 - Specifity)')\n", (4231, 4273), True, 'import matplotlib.pyplot as plt\n'), ((4278, 4327), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate or (Sensitivity)"""'], {}), "('True Positive Rate or (Sensitivity)')\n", (4288, 4327), True, 'import matplotlib.pyplot as plt\n'), ((4426, 4455), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4436, 4455), True, 'import matplotlib.pyplot as plt\n'), ((4461, 4493), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['plot_params'], {}), '(plot_params)\n', (4480, 4493), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4547), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('metrics/' + model_name + '/roc.png')"], {}), "('metrics/' + model_name + '/roc.png')\n", (4509, 4547), True, 'import matplotlib.pyplot as plt\n'), ((5027, 5052), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (5041, 5052), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score, roc_curve, auc, roc_auc_score\n'), ((5518, 5532), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5530, 5532), True, 'import matplotlib.pyplot as plt\n'), ((5745, 5772), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {'fontsize': '(14)'}), "('', fontsize=14)\n", (5755, 5772), True, 'import matplotlib.pyplot as plt\n'), ((5777, 5817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Model Metrics"""'], {'fontsize': '(16)'}), "('Model Metrics', fontsize=16)\n", (5787, 5817), True, 'import matplotlib.pyplot as plt\n'), ((5822, 5845), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (5832, 5845), True, 'import matplotlib.pyplot as plt\n'), ((5850, 5873), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (5860, 5873), True, 'import matplotlib.pyplot as plt\n'), ((5878, 5892), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (5885, 5892), True, 'import matplotlib.pyplot as plt\n'), ((6023, 6076), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('metrics/' + model_name + '/metrics.png')"], {}), "('metrics/' + model_name + '/metrics.png')\n", (6034, 6076), True, 'import matplotlib.pyplot as plt\n'), ((6733, 6797), 'logging.info', 'logging.info', (['"""Testing all thresholds to find best accuracy ..."""'], {}), "('Testing all thresholds to find best accuracy ...')\n", (6745, 6797), False, 'import logging\n'), ((6970, 7016), 'logging.info', 'logging.info', (['"""Threshold testing complete ..."""'], {}), "('Threshold testing complete ...')\n", (6982, 7016), False, 'import logging\n'), ((7034, 7061), 'pandas.DataFrame', 'pd.DataFrame', (['metric_holder'], {}), '(metric_holder)\n', (7046, 7061), True, 'import pandas as pd\n'), ((7892, 7969), 'pandas.DataFrame', 'pd.DataFrame', (["{'scores': scores, 'class': class_vals, 'threshold': threshold}"], {}), "({'scores': scores, 'class': class_vals, 'threshold': threshold})\n", (7904, 7969), True, 'import pandas as pd\n'), ((3506, 3516), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3514, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3535, 3546), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3544, 3546), True, 'import matplotlib.pyplot as plt\n'), ((4576, 4586), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4584, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4605, 4616), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4614, 4616), True, 'import matplotlib.pyplot as plt\n'), ((6105, 6115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6113, 6115), True, 'import matplotlib.pyplot as plt\n'), ((6134, 6145), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6143, 6145), True, 'import matplotlib.pyplot as plt\n'), ((6261, 6293), 'json.dump', 'json.dump', (['best_metrics', 'outfile'], {}), '(best_metrics, outfile)\n', (6270, 6293), False, 'import json\n'), ((6434, 6465), 'os.path.exists', 'os.path.exists', (['image_directory'], {}), '(image_directory)\n', (6448, 6465), False, 'import os\n'), ((6475, 6503), 'os.makedirs', 'os.makedirs', (['image_directory'], {}), '(image_directory)\n', (6486, 6503), False, 'import os\n'), ((8137, 8157), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (8146, 8157), False, 'import json\n'), ((3651, 3704), 'numpy.array', 'np.array', (['[(1 if e > threshold else 0) for e in loss]'], {}), '([(1 if e > threshold else 0) for e in loss])\n', (3659, 3704), True, 'import numpy as np\n'), ((4949, 5002), 'numpy.array', 'np.array', (['[(1 if e > threshold else 0) for e in loss]'], {}), '([(1 if e > threshold else 0) for e in loss])\n', (4957, 5002), True, 'import numpy as np\n')] |
# SPDX-License-Identifier: MIT
# Copyright © 2022 <NAME>
"""This script validates TFLite compatibility.
The script is used to replicate Github Issue #36
(https://github.com/patlevin/tfjs-to-tf/issues/36)
It expects the path to a converted blazeface model
(https://tfhub.dev/tensorflow/tfjs-model/blazeface/1/default/1?tfjs-format=compressed)
that was converted to a saved model, e.g. `tfjs_graph_converter
./models/blazeface ./models/blazeface_savedmodel --output_format tf_saved_model
`.
The script tries to load and convert the TF savedmodel to TFLite and reports
whether the conversion succeeded, e.g.:
`python issue-36.py ./models/blazeface_savedmodel`
Possible ouput formats are `FP32` (the default) and `INT8`
1) `python issue-36.py ./models/blazeface_savedmodel FP32`
2) `python issue-36.py ./models/blazeface_savedmodel INT8`
A model that was converted without the `--compat_mode=tflite` option will fail
to convert given INT8 (2)
"""
from enum import Enum
import os
import sys
import numpy as np
import tensorflow as tf
class Mode(Enum):
FLOAT32 = 1
QUANTISED = 2
def convert_to_tflite(savedmodel_path: str, mode: Mode):
def representative_dummy_dataset():
for _ in range(100):
yield [
np.zeros(128*128*3).reshape(1, 128, 128, 3).astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_saved_model(savedmodel_path)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dummy_dataset
if mode == Mode.FLOAT32:
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
elif mode == Mode.QUANTISED:
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
else:
raise Exception('Invalid conversion mode')
tflite_model = converter.convert()
return tflite_model
def main(args: list[str]):
if len(args) < 2:
print('Test conversion from TF saved_model to TFLite')
print()
print(f'Usage: {args[0]} <path_to_saved_model> [FP32|INT8]')
exit(1)
savedmodel_path = args[1]
requested_mode = args[2].lower() if len(args) > 2 else 'fp32'
if requested_mode == 'fp32':
mode = Mode.FLOAT32
elif requested_mode == 'int8':
mode = Mode.QUANTISED
else:
print(f'Usage: {args[0]} <path_to_saved_model> [FP32|INT8]')
exit(1)
try:
_ = convert_to_tflite(savedmodel_path, mode=mode)
except Exception:
print(f'CONVERSION FAILED: target mode={requested_mode}')
exit(2)
print(f'Conversion successful: target mode={requested_mode}')
if __name__ == '__main__':
# reduce TF logging spam
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
# disable CUDA, since conversion may fail if a CUDA-capable GPU is
# installed that doesn't have the required capabilities or lacks VRAM
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
main(sys.argv)
| [
"tensorflow.lite.TFLiteConverter.from_saved_model",
"numpy.zeros"
] | [((1381, 1438), 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['savedmodel_path'], {}), '(savedmodel_path)\n', (1421, 1438), True, 'import tensorflow as tf\n'), ((1298, 1321), 'numpy.zeros', 'np.zeros', (['(128 * 128 * 3)'], {}), '(128 * 128 * 3)\n', (1306, 1321), True, 'import numpy as np\n')] |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ... import opcodes
from ...core import ENTITY_TYPE, get_output_types, recursive_tile
from ...serialization.serializables import AnyField, Int8Field, KeyField
from ...utils import has_unknown_shape
from ..operands import DataFrameOperandMixin, DataFrameOperand
from ..utils import parse_index, validate_axis
class DataFrameSetAxis(DataFrameOperand, DataFrameOperandMixin):
_op_code_ = opcodes.DATAFRAME_SET_AXIS
_input = KeyField('input')
_axis = Int8Field('axis')
_value = AnyField('value')
def __init__(self, value=None, axis=None, **kw):
super().__init__(_value=value, _axis=axis, **kw)
@property
def input(self):
return self._input
@property
def value(self):
return self._value
@property
def axis(self):
return self._axis
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = inputs[0]
if isinstance(self.value, ENTITY_TYPE):
self._value = inputs[-1]
def __call__(self, df_or_series):
new_size = self.value.shape[0]
expect_size = df_or_series.axes[self.axis].shape[0]
if not np.isnan(new_size) and not np.isnan(expect_size) \
and new_size != expect_size:
raise ValueError(
f'Length mismatch: Expected axis has {expect_size} elements, '
f'new values have {new_size} elements'
)
params = df_or_series.params
if self.axis == 0:
params['index_value'] = parse_index(self.value) \
if isinstance(self.value, pd.Index) else self.value.index_value
else:
params['columns_value'] = parse_index(self.value, store_data=True) \
if isinstance(self.value, pd.Index) else self.value.index_value
pd_columns = self.value.index_value.to_pandas() \
if isinstance(self.value, ENTITY_TYPE) else self.value
params['dtypes'] = params['dtypes'].set_axis(pd_columns)
self._output_types = get_output_types(df_or_series)
inputs = [df_or_series]
if isinstance(self.value, ENTITY_TYPE):
inputs += [self.value]
return self.new_tileable(inputs, **params)
@classmethod
def tile(cls, op: 'DataFrameSetAxis'):
output = op.outputs[0]
input_tileables = [op.input]
value = op.value
if isinstance(value, ENTITY_TYPE):
input_tileables.append(value)
if has_unknown_shape(value):
yield
if any(np.isnan(s) for s in op.input.nsplits[op.axis]):
yield
if op.input.shape[op.axis] != value.shape[0]:
raise ValueError(
f'Length mismatch: Expected axis has {value.shape[0]} elements, '
f'new values have {op.input.shape[op.axis]} elements'
)
if isinstance(value, ENTITY_TYPE):
value = yield from recursive_tile(
value.rechunk({0: op.input.nsplits[op.axis]}))
input_tileables[-1] = value
slices = np.array((0,) + op.input.nsplits[op.axis]).cumsum()
slice_left = slices[:-1]
slice_right = slices[1:]
chunks = []
param_cache = [None] * len(op.input.nsplits[op.axis])
for inp_chunk in op.input.chunks:
input_chunks = [inp_chunk]
value_index = inp_chunk.index[op.axis]
params = inp_chunk.params
if isinstance(value, ENTITY_TYPE):
value_data = value.chunks[value_index]
input_chunks.append(value_data)
else:
value_data = value[slice_left[value_index]:slice_right[value_index]]
if param_cache[value_index] is None:
cached_params = param_cache[value_index] = dict()
if isinstance(value, ENTITY_TYPE):
if op.axis == 0:
cached_params['index_value'] = value_data.index_value
else:
cached_params['columns_value'] = value_data.index_value
cached_params['dtypes'] = output.dtypes.iloc[
slice_left[value_index]:slice_right[value_index]
]
else:
if op.axis == 0:
cached_params['index_value'] = parse_index(value_data)
else:
cached_params['columns_value'] = parse_index(value_data, store_data=True)
cached_params['dtypes'] = params['dtypes'].set_axis(value_data)
params.update(param_cache[value_index])
new_op = op.copy().reset_key()
new_op._value = value_data
chunks.append(new_op.new_chunk(input_chunks, **params))
params = op.outputs[0].params
params['chunks'] = chunks
params['nsplits'] = op.input.nsplits
new_op = op.copy().reset_key()
return new_op.new_tileables(input_tileables, **params)
@classmethod
def execute(cls, ctx, op: 'DataFrameSetAxis'):
in_data = ctx[op.input.key]
value = op.value
if isinstance(value, ENTITY_TYPE):
value = ctx[value.key]
ctx[op.outputs[0].key] = in_data.set_axis(value, axis=op.axis)
def _set_axis(df_or_axis, labels, axis=0, inplace=False):
axis = validate_axis(axis, df_or_axis)
if not isinstance(labels, ENTITY_TYPE) and not isinstance(labels, pd.Index):
labels = pd.Index(labels)
op = DataFrameSetAxis(value=labels, axis=axis)
result = op(df_or_axis)
if inplace:
df_or_axis.data = result.data
else:
return result
def df_set_axis(df, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for column or row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to update. The value 0 identifies the rows, and 1 identifies the columns.
inplace : bool, default False
Whether to return a new DataFrame instance.
Returns
-------
renamed : DataFrame or None
An object of type DataFrame or None if ``inplace=True``.
See Also
--------
DataFrame.rename_axis : Alter the name of the index or columns.
Examples
--------
>>> import mars.dataframe as md
>>> df = md.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index').execute()
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns').execute()
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df.execute()
i ii
0 1 4
1 2 5
2 3 6
"""
return _set_axis(df, labels, axis=axis, inplace=inplace)
def series_set_axis(series, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index'}, default 0
The axis to update. The value 0 identifies the rows.
inplace : bool, default False
Whether to return a new Series instance.
Returns
-------
renamed : Series or None
An object of type Series or None if ``inplace=True``.
See Also
--------
Series.rename_axis : Alter the name of the index.
Examples
--------
>>> import mars.dataframe as md
>>> s = md.Series([1, 2, 3])
>>> s.execute()
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0).execute()
a 1
b 2
c 3
dtype: int64
"""
return _set_axis(series, labels, axis=axis, inplace=inplace)
| [
"numpy.array",
"numpy.isnan",
"pandas.Index"
] | [((6166, 6182), 'pandas.Index', 'pd.Index', (['labels'], {}), '(labels)\n', (6174, 6182), True, 'import pandas as pd\n'), ((1792, 1810), 'numpy.isnan', 'np.isnan', (['new_size'], {}), '(new_size)\n', (1800, 1810), True, 'import numpy as np\n'), ((1819, 1840), 'numpy.isnan', 'np.isnan', (['expect_size'], {}), '(expect_size)\n', (1827, 1840), True, 'import numpy as np\n'), ((3196, 3207), 'numpy.isnan', 'np.isnan', (['s'], {}), '(s)\n', (3204, 3207), True, 'import numpy as np\n'), ((3726, 3768), 'numpy.array', 'np.array', (['((0,) + op.input.nsplits[op.axis])'], {}), '((0,) + op.input.nsplits[op.axis])\n', (3734, 3768), True, 'import numpy as np\n')] |
import numpy as np
from PuzzleLib import Config
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules.Module import ModuleError
from PuzzleLib.Modules.ConvND import ConvND
class Conv2D(ConvND):
def __init__(self, inmaps, outmaps, size, stride=1, pad=0, dilation=1, wscale=1.0, useBias=True,
name=None, initscheme=None, empty=False, groups=1):
super().__init__(
2, inmaps, outmaps, size, stride, pad, dilation, wscale, useBias, name, initscheme, empty, groups
)
self.registerBlueprint(locals())
def checkDataShape(self, shape):
if len(shape) != 4:
raise ModuleError("Data must be 4d tensor")
_, inmaps, inh, inw = shape
_, _, fh, fw = self.W.shape
hpad, wpad = self.pad
hdilation, wdilation = self.dilation
if inmaps != self.W.shape[1] * self.groups:
raise ModuleError("Data has %d maps (expected: %d)" % (inmaps, self.W.shape[1] * self.groups))
exth, extw = inh + 2 * hpad, inw + 2 * wpad
extfh, extfw = hdilation * (fh - 1) + 1, wdilation * (fw - 1) + 1
if exth < extfh:
raise ModuleError("Data maps height is too small (got %d, expected at least %d)" % (exth, extfh))
if extw < extfw:
raise ModuleError("Data maps width is too small (got %d, expected at least %d)" % (extw, extfw))
def dataShapeFrom(self, shape):
batchsize, inmaps, inh, inw = shape
outmaps, _, fh, fw = self.W.shape
hpad, wpad = self.pad
hdilation, wdilation = self.dilation
hstride, wstride = self.stride
outh = (inh + 2 * hpad - hdilation * (fh - 1) - 1) // hstride + 1
outw = (inw + 2 * wpad - wdilation * (fw - 1) - 1) // wstride + 1
return batchsize, outmaps, outh, outw
def checkGradShape(self, shape):
if len(shape) != 4:
raise ModuleError("Grad must be 4d tensor")
_, outmaps, _, _ = shape
if outmaps != self.W.shape[0]:
raise ModuleError("Grad has %d maps (expected: %d)" % (outmaps, self.W.shape[0]))
def gradShapeFrom(self, shape):
batchsize, outmaps, outh, outw = shape
_, inmaps, fh, fw = self.W.shape
hpad, wpad = self.pad
hdilation, wdilation = self.dilation
hstride, wstride = self.stride
inmaps *= self.groups
inh = (outh - 1) * hstride + hdilation * (fh - 1) - 2 * hpad + 1
inw = (outw - 1) * wstride + wdilation * (fw - 1) - 2 * wpad + 1
return batchsize, inmaps, inh, inw
def unittest():
oneMapTest()
multiOutMapsTest()
multiInMapsTest()
if Config.backend in {Config.Backend.cuda, Config.Backend.hip}:
multiMapsWithPadsTest()
groupTest()
trainTest()
def oneMapTest():
batchsize, inmaps, h, w = 1, 1, 5, 5
outmaps, size, postpad = 1, 2, 1
hostData = np.random.randn(batchsize, inmaps, h, w).astype(np.float32)
data = gpuarray.to_gpu(hostData)
conv = Conv2D(inmaps, outmaps, size)
conv(data)
hostW, hostBias = conv.W.get(), conv.b.get()
hostOutData = np.empty(conv.data.shape, dtype=np.float32)
hostOutData[:, 0, :, :] = hostBias[0, 0, 0, 0]
for y in range(hostOutData.shape[2]):
for x in range(hostOutData.shape[3]):
for dy in range(size):
for dx in range(size):
hostOutData[0, 0, y, x] += hostData[0, 0, y + dy, x + dx] * hostW[0, 0, dy, dx]
assert np.allclose(hostOutData, conv.data.get())
hostGrad = np.random.randn(*conv.data.shape).astype(np.float32)
grad = gpuarray.to_gpu(hostGrad)
conv.backward(grad)
hostInGrad = np.zeros(data.shape).astype(np.float32)
for y in range(hostGrad.shape[2]):
for x in range(hostGrad.shape[3]):
for dy in range(size):
for dx in range(size):
hostInGrad[0, 0, y + dy, x + dx] += hostW[0, 0, dy, dx] * hostGrad[0, 0, y, x]
assert np.allclose(hostInGrad, conv.grad.get())
def multiOutMapsTest():
batchsize, inmaps, h, w = 1, 1, 8, 8
outmaps, size = 2, 4
hostData = np.random.randn(batchsize, inmaps, h, w).astype(np.float32)
data = gpuarray.to_gpu(hostData)
conv = Conv2D(inmaps, outmaps, size)
conv(data)
hostW, hostBias = conv.W.get(), conv.b.get()
hostOutData = np.empty(conv.data.shape, dtype=np.float32)
for c in range(outmaps):
hostOutData[:, c, :, :] = hostBias[0, c, 0, 0]
for oc in range(outmaps):
for y in range(conv.data.shape[2]):
for x in range(conv.data.shape[3]):
for dy in range(size):
for dx in range(size):
hostOutData[0, oc, y, x] += hostData[0, 0, y + dy, x + dx] * hostW[oc, 0, dy, dx]
assert np.allclose(hostOutData, conv.data.get())
def multiInMapsTest():
batchsize, inmaps, h, w = 1, 2, 10, 10
outmaps, size = 1, 4
hostData = np.random.randn(batchsize, inmaps, h, w).astype(np.float32)
data = gpuarray.to_gpu(hostData)
conv = Conv2D(inmaps, outmaps, size)
conv(data)
hostW, hostBias = conv.W.get(), conv.b.get()
hostOutData = np.empty(conv.data.shape, dtype=np.float32)
for c in range(outmaps):
hostOutData[:, c, :, :] = hostBias[0, c, 0, 0]
for ic in range(inmaps):
for y in range(conv.data.shape[2]):
for x in range(conv.data.shape[3]):
for dy in range(size):
for dx in range(size):
hostOutData[0, 0, y, x] += hostData[0, ic, y + dy, x + dx] * hostW[0, ic, dy, dx]
assert np.allclose(hostOutData, conv.data.get())
def multiMapsWithPadsTest():
batchsize, inmaps, h, w = 3, 4, 3, 3
outmaps, size, stride, pad, dilation = 4, 3, 2, 2, 2
hostData = np.random.randn(batchsize, inmaps, h, w).astype(np.float32)
data = gpuarray.to_gpu(hostData)
conv = Conv2D(inmaps, outmaps, size=size, stride=stride, pad=pad, dilation=dilation, initscheme="gaussian")
conv(data)
hostW, hostBias = conv.W.get(), conv.b.get()
dl = dilation
hostExtData = np.zeros(shape=(batchsize, inmaps, h + 2 * pad, w + 2 * pad))
hostExtData[:, :, pad:-pad, pad:-pad] = hostData
hostData = hostExtData
hostOutData = np.empty(conv.data.shape, dtype=np.float32)
for c in range(outmaps):
hostOutData[:, c, :, :] = hostBias[0, c, 0, 0]
for b in range(batchsize):
for oc in range(outmaps):
for ic in range(inmaps):
for y in range(conv.data.shape[2]):
for x in range(conv.data.shape[3]):
for dy in range(size):
for dx in range(size):
hostOutData[b,oc,y,x] += hostData[b,ic,y*stride+dy*dl,x*stride+dx*dl]*hostW[oc,ic,dy,dx]
assert np.allclose(hostOutData, conv.data.get())
hostGrad = np.random.randn(*conv.data.shape).astype(np.float32)
grad = gpuarray.to_gpu(hostGrad)
conv.backward(grad)
hostInGrad = np.zeros(hostData.shape, dtype=np.float32)
for b in range(batchsize):
for ic in range(inmaps):
for oc in range(outmaps):
for y in range(hostGrad.shape[2]):
for x in range(hostGrad.shape[3]):
for dy in range(size):
for dx in range(size):
hostInGrad[b,ic,y*stride+dy*dl,x*stride+dx*dl] += hostW[oc,ic,dy,dx]*hostGrad[b,oc,y,x]
assert np.allclose(hostInGrad[:, :, pad:-pad, pad:-pad], conv.grad.get())
hostWGrad = np.zeros(conv.getVar("W").grad.shape, dtype=np.float32)
for b in range(batchsize):
for oc in range(outmaps):
for ic in range(inmaps):
for dy in range(size):
for dx in range(size):
for y in range(hostGrad.shape[2]):
for x in range(hostGrad.shape[3]):
hostWGrad[oc,ic,dy,dx]+=hostData[b,ic,y*stride+dy*dl,x*stride+dx*dl]*hostGrad[b,oc,y,x]
assert np.allclose(hostWGrad, conv.getVar("W").grad.get())
hostBGrad = np.empty(hostBias.shape, dtype=np.float32)
for oc in range(outmaps):
hostBGrad[0, oc, 0, 0] = np.sum(hostGrad[:, oc, :, :])
assert np.allclose(hostBGrad, conv.getVar("b").grad.get())
def groupTest():
batchsize, inmaps, inh, inw = 3, 4, 4, 5
size, outmaps = 3, 4
groups = 2
hostData = np.random.randn(batchsize, inmaps, inh, inw).astype(np.float32)
data = gpuarray.to_gpu(hostData)
conv = Conv2D(inmaps, outmaps, size=size, initscheme="gaussian", groups=groups)
conv(data)
hostOutData = np.empty(conv.data.shape, dtype=np.float32)
hostW, hostBias = conv.W.get(), conv.b.get()
for c in range(outmaps):
hostOutData[:, c, :, :] = hostBias[0, c, 0, 0]
ingrpsize = inmaps // groups
outgrpsize = outmaps // groups
for g in range(groups):
hostOutGroup = hostOutData[:, g * outgrpsize:(g + 1) * outgrpsize, :, :]
hostGroup = hostData[:, g * ingrpsize:(g + 1) * ingrpsize, :, :]
for b in range(batchsize):
for oc in range(outgrpsize):
for ic in range(ingrpsize):
for y in range(conv.data.shape[2]):
for x in range(conv.data.shape[3]):
for dy in range(size):
for dx in range(size):
hostOutGroup[b, oc, y, x] += hostGroup[b, ic, y + dy, x + dx] * \
hostW[g * outgrpsize + oc, ic, dy, dx]
assert np.allclose(hostOutData, conv.data.get())
hostGrad = np.random.randn(*conv.data.shape).astype(np.float32)
grad = gpuarray.to_gpu(hostGrad)
conv.backward(grad)
hostInGrad = np.zeros(hostData.shape, dtype=np.float32)
for g in range(groups):
hostGroup = hostGrad[:, g * outgrpsize:(g + 1) * outgrpsize, :, :]
hostInGroup = hostInGrad[:, g * ingrpsize:(g + 1) * ingrpsize, :, :]
for b in range(batchsize):
for ic in range(ingrpsize):
for oc in range(outgrpsize):
for y in range(hostGrad.shape[2]):
for x in range(hostGrad.shape[3]):
for dy in range(size):
for dx in range(size):
hostInGroup[b, ic, y + dy, x + dx] += hostW[g * outgrpsize + oc, ic, dy, dx] * \
hostGroup[b, oc, y, x]
assert np.allclose(hostInGrad, conv.grad.get())
hostWGrad = np.zeros(conv.getVar("W").grad.shape, dtype=np.float32)
for g in range(groups):
hostGradGroup = hostGrad[:, g * outgrpsize:(g + 1) * outgrpsize, :, :]
hostDataGroup = hostData[:, g * ingrpsize:(g + 1) * ingrpsize, :, :]
for b in range(batchsize):
for oc in range(outgrpsize):
for ic in range(ingrpsize):
for dy in range(size):
for dx in range(size):
for y in range(hostGrad.shape[2]):
for x in range(hostGrad.shape[3]):
hostWGrad[g * outgrpsize + oc, ic, dy, dx] += hostDataGroup[b, ic, y+dy, x+dx] * \
hostGradGroup[b, oc, y, x]
assert np.allclose(hostWGrad, conv.getVar("W").grad.get())
hostBGrad = np.empty(hostBias.shape, dtype=np.float32)
for oc in range(outmaps):
hostBGrad[0, oc, 0, 0] = np.sum(hostGrad[:, oc, :, :])
assert np.allclose(hostBGrad, conv.getVar("b").grad.get())
def trainTest():
batchsize, inmaps, h, w = 5, 1, 8, 8
outmaps = 1
size = 8
data = gpuarray.to_gpu(np.random.normal(0.0, 1.0, (batchsize, inmaps, h, w)).astype(np.float32))
conv = Conv2D(inmaps, outmaps, size)
from PuzzleLib.Cost.MSE import MSE
mse = MSE()
target = gpuarray.to_gpu(np.random.normal(0.0, 1.0, (batchsize, outmaps, 1, 1)).astype(np.float32))
for i in range(100):
learnRate = 1e-2
conv(data)
error, grad = mse(conv.data, target)
conv.backward(grad)
conv.updateParams(learnRate)
if (i + 1) % 5 == 0:
print("Iteration #%d error: %s" % (i + 1, error))
if __name__ == "__main__":
unittest()
| [
"numpy.sum",
"PuzzleLib.Backend.gpuarray.to_gpu",
"numpy.random.randn",
"numpy.empty",
"numpy.zeros",
"numpy.random.normal",
"PuzzleLib.Modules.Module.ModuleError",
"PuzzleLib.Cost.MSE.MSE"
] | [((2656, 2681), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostData'], {}), '(hostData)\n', (2671, 2681), False, 'from PuzzleLib.Backend import gpuarray\n'), ((2796, 2839), 'numpy.empty', 'np.empty', (['conv.data.shape'], {'dtype': 'np.float32'}), '(conv.data.shape, dtype=np.float32)\n', (2804, 2839), True, 'import numpy as np\n'), ((3231, 3256), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostGrad'], {}), '(hostGrad)\n', (3246, 3256), False, 'from PuzzleLib.Backend import gpuarray\n'), ((3761, 3786), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostData'], {}), '(hostData)\n', (3776, 3786), False, 'from PuzzleLib.Backend import gpuarray\n'), ((3900, 3943), 'numpy.empty', 'np.empty', (['conv.data.shape'], {'dtype': 'np.float32'}), '(conv.data.shape, dtype=np.float32)\n', (3908, 3943), True, 'import numpy as np\n'), ((4487, 4512), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostData'], {}), '(hostData)\n', (4502, 4512), False, 'from PuzzleLib.Backend import gpuarray\n'), ((4626, 4669), 'numpy.empty', 'np.empty', (['conv.data.shape'], {'dtype': 'np.float32'}), '(conv.data.shape, dtype=np.float32)\n', (4634, 4669), True, 'import numpy as np\n'), ((5248, 5273), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostData'], {}), '(hostData)\n', (5263, 5273), False, 'from PuzzleLib.Backend import gpuarray\n'), ((5474, 5535), 'numpy.zeros', 'np.zeros', ([], {'shape': '(batchsize, inmaps, h + 2 * pad, w + 2 * pad)'}), '(shape=(batchsize, inmaps, h + 2 * pad, w + 2 * pad))\n', (5482, 5535), True, 'import numpy as np\n'), ((5627, 5670), 'numpy.empty', 'np.empty', (['conv.data.shape'], {'dtype': 'np.float32'}), '(conv.data.shape, dtype=np.float32)\n', (5635, 5670), True, 'import numpy as np\n'), ((6193, 6218), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostGrad'], {}), '(hostGrad)\n', (6208, 6218), False, 'from PuzzleLib.Backend import gpuarray\n'), ((6255, 6297), 'numpy.zeros', 'np.zeros', (['hostData.shape'], {'dtype': 'np.float32'}), '(hostData.shape, dtype=np.float32)\n', (6263, 6297), True, 'import numpy as np\n'), ((7156, 7198), 'numpy.empty', 'np.empty', (['hostBias.shape'], {'dtype': 'np.float32'}), '(hostBias.shape, dtype=np.float32)\n', (7164, 7198), True, 'import numpy as np\n'), ((7524, 7549), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostData'], {}), '(hostData)\n', (7539, 7549), False, 'from PuzzleLib.Backend import gpuarray\n'), ((7660, 7703), 'numpy.empty', 'np.empty', (['conv.data.shape'], {'dtype': 'np.float32'}), '(conv.data.shape, dtype=np.float32)\n', (7668, 7703), True, 'import numpy as np\n'), ((8550, 8575), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostGrad'], {}), '(hostGrad)\n', (8565, 8575), False, 'from PuzzleLib.Backend import gpuarray\n'), ((8612, 8654), 'numpy.zeros', 'np.zeros', (['hostData.shape'], {'dtype': 'np.float32'}), '(hostData.shape, dtype=np.float32)\n', (8620, 8654), True, 'import numpy as np\n'), ((9929, 9971), 'numpy.empty', 'np.empty', (['hostBias.shape'], {'dtype': 'np.float32'}), '(hostBias.shape, dtype=np.float32)\n', (9937, 9971), True, 'import numpy as np\n'), ((10378, 10383), 'PuzzleLib.Cost.MSE.MSE', 'MSE', ([], {}), '()\n', (10381, 10383), False, 'from PuzzleLib.Cost.MSE import MSE\n'), ((7253, 7282), 'numpy.sum', 'np.sum', (['hostGrad[:, oc, :, :]'], {}), '(hostGrad[:, oc, :, :])\n', (7259, 7282), True, 'import numpy as np\n'), ((10026, 10055), 'numpy.sum', 'np.sum', (['hostGrad[:, oc, :, :]'], {}), '(hostGrad[:, oc, :, :])\n', (10032, 10055), True, 'import numpy as np\n'), ((588, 625), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (['"""Data must be 4d tensor"""'], {}), "('Data must be 4d tensor')\n", (599, 625), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((807, 899), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (["('Data has %d maps (expected: %d)' % (inmaps, self.W.shape[1] * self.groups))"], {}), "('Data has %d maps (expected: %d)' % (inmaps, self.W.shape[1] *\n self.groups))\n", (818, 899), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((1040, 1135), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (["('Data maps height is too small (got %d, expected at least %d)' % (exth, extfh)\n )"], {}), "('Data maps height is too small (got %d, expected at least %d)' %\n (exth, extfh))\n", (1051, 1135), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((1161, 1255), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (["('Data maps width is too small (got %d, expected at least %d)' % (extw, extfw))"], {}), "('Data maps width is too small (got %d, expected at least %d)' %\n (extw, extfw))\n", (1172, 1255), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((1703, 1740), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (['"""Grad must be 4d tensor"""'], {}), "('Grad must be 4d tensor')\n", (1714, 1740), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((1811, 1886), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (["('Grad has %d maps (expected: %d)' % (outmaps, self.W.shape[0]))"], {}), "('Grad has %d maps (expected: %d)' % (outmaps, self.W.shape[0]))\n", (1822, 1886), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((2588, 2628), 'numpy.random.randn', 'np.random.randn', (['batchsize', 'inmaps', 'h', 'w'], {}), '(batchsize, inmaps, h, w)\n', (2603, 2628), True, 'import numpy as np\n'), ((3170, 3203), 'numpy.random.randn', 'np.random.randn', (['*conv.data.shape'], {}), '(*conv.data.shape)\n', (3185, 3203), True, 'import numpy as np\n'), ((3293, 3313), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (3301, 3313), True, 'import numpy as np\n'), ((3693, 3733), 'numpy.random.randn', 'np.random.randn', (['batchsize', 'inmaps', 'h', 'w'], {}), '(batchsize, inmaps, h, w)\n', (3708, 3733), True, 'import numpy as np\n'), ((4419, 4459), 'numpy.random.randn', 'np.random.randn', (['batchsize', 'inmaps', 'h', 'w'], {}), '(batchsize, inmaps, h, w)\n', (4434, 4459), True, 'import numpy as np\n'), ((5180, 5220), 'numpy.random.randn', 'np.random.randn', (['batchsize', 'inmaps', 'h', 'w'], {}), '(batchsize, inmaps, h, w)\n', (5195, 5220), True, 'import numpy as np\n'), ((6132, 6165), 'numpy.random.randn', 'np.random.randn', (['*conv.data.shape'], {}), '(*conv.data.shape)\n', (6147, 6165), True, 'import numpy as np\n'), ((7452, 7496), 'numpy.random.randn', 'np.random.randn', (['batchsize', 'inmaps', 'inh', 'inw'], {}), '(batchsize, inmaps, inh, inw)\n', (7467, 7496), True, 'import numpy as np\n'), ((8489, 8522), 'numpy.random.randn', 'np.random.randn', (['*conv.data.shape'], {}), '(*conv.data.shape)\n', (8504, 8522), True, 'import numpy as np\n'), ((10222, 10275), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(batchsize, inmaps, h, w)'], {}), '(0.0, 1.0, (batchsize, inmaps, h, w))\n', (10238, 10275), True, 'import numpy as np\n'), ((10411, 10465), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(batchsize, outmaps, 1, 1)'], {}), '(0.0, 1.0, (batchsize, outmaps, 1, 1))\n', (10427, 10465), True, 'import numpy as np\n')] |
import sys
import numpy
import scipy
from jadapy import Target
from jadapy.schur import schur, schur_sort
from jadapy.orthogonalization import normalize, orthogonalize, orthonormalize
from jadapy.correction_equation import solve_correction_equation, solve_generalized_correction_equation
from jadapy.utils import dot, norm
from jadapy.NumPyInterface import NumPyInterface
def _prec(x, *args):
return x
def jdqr(A, num=5, target=Target.SmallestMagnitude, tol=1e-8, lock_tol=None, M=None, prec=None,
maxit=1000, subspace_dimensions=(20, 40), initial_subspace=None, arithmetic='real',
return_eigenvectors=False, return_subspace=False,
interface=None):
if arithmetic not in ['real', 'complex', 'r', 'c']:
raise ValueError("argument must be 'real', or 'complex'")
if not prec:
prec = _prec
if not lock_tol:
lock_tol = tol * 1e2
solver_tolerance = 1.0
n = A.shape[0]
subspace_dimensions = (min(subspace_dimensions[0], n // 2), min(subspace_dimensions[1], n))
it = 1
k = 0 # Number of eigenvalues found
m = 0 # Size of the search subspace
nev = 1 # Amount of eigenvalues currently converging
alpha = None
evs = None
sort_target = target
dtype = A.dtype
if interface:
dtype = interface.dtype
ctype = numpy.dtype(dtype.char.upper())
if arithmetic in ['complex', 'c']:
dtype = ctype
if not interface:
interface = NumPyInterface(n, dtype)
extra = 0
if dtype != ctype:
# Allocate extra space in case a complex eigenpair may exist for a real matrix
extra = 1
# Eigenvalues
aconv = numpy.zeros(num + extra, ctype)
# Schur matrices
R = numpy.zeros((num + extra, num + extra), dtype)
# Schur vectors
Q = interface.vector(num + extra)
# Preconditioned Q
Y = interface.vector(num + extra)
H = numpy.zeros((num + extra, num + extra), dtype)
MQ = Q
if M is not None:
MQ = interface.vector(num + extra)
# Orthonormal search subspace
V = interface.vector(subspace_dimensions[1])
# AV = A*V without orthogonalization
AV = interface.vector(subspace_dimensions[1])
# MV = M*V without orthogonalization
MV = None
if M is not None:
MV = interface.vector(subspace_dimensions[1])
# Residual vector
r = interface.vector(1 + extra)
# Low-dimensional projection: VAV = V'*A*V
VAV = numpy.zeros((subspace_dimensions[1], subspace_dimensions[1]), dtype)
while k < num and it <= maxit:
if it == 1:
if initial_subspace is not None:
nev = min(initial_subspace.shape[1], subspace_dimensions[1])
V[:, 0:nev] = initial_subspace[:, 0:nev]
else:
V[:, 0] = interface.random()
normalize(V[:, 0], M=M)
else:
solver_maxit = 100
sigma = evs[0]
# Build an initial search subspace in an inexpensive way
# and as close to the target as possible
if m < subspace_dimensions[0]:
solver_tolerance = 0.5
solver_maxit = 1
if target != 0.0:
sigma = target
if M is not None:
V[:, m:m+nev] = solve_generalized_correction_equation(
A, M, prec, MQ[:, 0:k+nev], Q[:, 0:k+nev], Y[:, 0:k+nev], H[0:k+nev, 0:k+nev],
sigma, 1.0, r[:, 0:nev], solver_tolerance, solver_maxit, interface)
else:
V[:, m:m+nev] = solve_correction_equation(
A, prec, Q[:, 0:k+nev], Y[:, 0:k+nev], H[0:k+nev, 0:k+nev],
sigma, r[:, 0:nev], solver_tolerance, solver_maxit, interface)
orthonormalize(V[:, 0:m], V[:, m:m+nev], M=M, MV=None if MV is None else MV[:, 0:m])
AV[:, m:m+nev] = A @ V[:, m:m+nev]
if M is not None:
MV[:, m:m+nev] = M @ V[:, m:m+nev]
# Update VAV = V' * A * V
for i in range(m):
VAV[i, m:m+nev] = dot(V[:, i], AV[:, m:m+nev])
VAV[m:m+nev, i] = dot(V[:, m:m+nev], AV[:, i])
VAV[m:m+nev, m:m+nev] = dot(V[:, m:m+nev], AV[:, m:m+nev])
m += nev
[S, U] = schur(VAV[0:m, 0:m])
found = True
while found:
[S, U] = schur_sort(S, U, sort_target)
nev = 1
if dtype != ctype and S.shape[0] > 1 and abs(S[1, 0]) > 0.0:
# Complex eigenvalue in real arithmetic
nev = 2
alpha = S[0:nev, 0:nev]
evcond = norm(alpha)
Q[:, k:k+nev] = V[:, 0:m] @ U[:, 0:nev]
Y[:, k:k+nev] = prec(Q[:, k:k+nev], alpha)
if M is not None:
MQ[:, k:k+nev] = MV[:, 0:m] @ U[:, 0:nev]
for i in range(k):
H[i, k:k+nev] = dot(MQ[:, i], Y[:, k:k+nev])
H[k:k+nev, i] = dot(MQ[:, k:k+nev], Y[:, i])
H[k:k+nev, k:k+nev] = dot(MQ[:, k:k+nev], Y[:, k:k+nev])
r[:, 0:nev] = A @ Q[:, k:k+nev] - MQ[:, k:k+nev] @ alpha
orthogonalize(MQ[:, 0:k+nev], r[:, 0:nev], M=None, MV=Q[:, 0:k+nev])
rnorm = norm(r[:, 0:nev]) / evcond
evs = scipy.linalg.eigvals(alpha)
ev_est = evs[0]
print("Step: %4d, subspace dimension: %3d, eigenvalue estimate: %13.6e + %13.6ei, residual norm: %e" %
(it, m, ev_est.real, ev_est.imag, rnorm))
sys.stdout.flush()
if rnorm <= lock_tol:
sort_target = ev_est
# Store converged Ritz pairs
if rnorm <= tol and m > nev:
# Compute R so we can compute the eigenvectors
if return_eigenvectors:
if k > 0:
AQ = AV[:, 0:m] @ U[:, 0:nev]
for i in range(k):
R[i, k:k+nev] = dot(Q[:, i], AQ)
R[k:k+nev, k:k+nev] = alpha
# Store the converged eigenvalues
for i in range(nev):
print("Found an eigenvalue:", evs[i])
sys.stdout.flush()
aconv[k] = evs[i]
k += 1
if k >= num:
break
# Reset the iterative solver tolerance
solver_tolerance = 1.0
# Unlock the target
sort_target = target
# Remove the eigenvalue from the search space
V[:, 0:m-nev] = V[:, 0:m] @ U[:, nev:m]
AV[:, 0:m-nev] = AV[:, 0:m] @ U[:, nev:m]
if M is not None:
MV[:, 0:m-nev] = MV[:, 0:m] @ U[:, nev:m]
VAV[0:m-nev, 0:m-nev] = S[nev:m, nev:m]
S = VAV[0:m-nev, 0:m-nev]
U = numpy.identity(m-nev, dtype)
m -= nev
else:
found = False
solver_tolerance = max(solver_tolerance / 2, tol / 100)
if m >= min(subspace_dimensions[1], n - k) and k < num:
# Maximum search space dimension has been reached.
new_m = min(subspace_dimensions[0], n - k)
print("Shrinking the search space from %d to %d" % (m, new_m))
sys.stdout.flush()
V[:, 0:new_m] = V[:, 0:m] @ U[:, 0:new_m]
AV[:, 0:new_m] = AV[:, 0:m] @ U[:, 0:new_m]
if M is not None:
MV[:, 0:new_m] = MV[:, 0:m] @ U[:, 0:new_m]
VAV[0:new_m, 0:new_m] = S[0:new_m, 0:new_m]
m = new_m
elif m + nev - 1 >= min(subspace_dimensions[1], n - k):
# Only one extra vector fits in the search space.
nev = 1
it += 1
if return_eigenvectors:
evs, v = scipy.linalg.eig(R[0:k, 0:k], left=False, right=True)
if ctype == dtype:
if return_subspace:
return evs, Q[:, 0:k] @ v, Q[:, 0:k]
return evs, Q[:, 0:k] @ v
i = 0
while i < k:
Y[:, i] = Q[:, 0:k] @ v[:, i].real
if evs[i].imag:
Y[:, i+1] = Q[:, 0:k] @ v[:, i].imag
i += 1
i += 1
if return_subspace:
return evs, Y[:, 0:k], Q[:, 0:k]
return evs, Y[:, 0:k]
if return_subspace:
return aconv[0:num], Q[:, 0:k]
return aconv[0:num]
| [
"jadapy.utils.norm",
"jadapy.utils.dot",
"scipy.linalg.eigvals",
"jadapy.schur.schur_sort",
"jadapy.orthogonalization.normalize",
"numpy.zeros",
"jadapy.orthogonalization.orthogonalize",
"scipy.linalg.eig",
"jadapy.correction_equation.solve_correction_equation",
"numpy.identity",
"jadapy.schur.s... | [((1667, 1698), 'numpy.zeros', 'numpy.zeros', (['(num + extra)', 'ctype'], {}), '(num + extra, ctype)\n', (1678, 1698), False, 'import numpy\n'), ((1729, 1775), 'numpy.zeros', 'numpy.zeros', (['(num + extra, num + extra)', 'dtype'], {}), '((num + extra, num + extra), dtype)\n', (1740, 1775), False, 'import numpy\n'), ((1904, 1950), 'numpy.zeros', 'numpy.zeros', (['(num + extra, num + extra)', 'dtype'], {}), '((num + extra, num + extra), dtype)\n', (1915, 1950), False, 'import numpy\n'), ((2452, 2520), 'numpy.zeros', 'numpy.zeros', (['(subspace_dimensions[1], subspace_dimensions[1])', 'dtype'], {}), '((subspace_dimensions[1], subspace_dimensions[1]), dtype)\n', (2463, 2520), False, 'import numpy\n'), ((1468, 1492), 'jadapy.NumPyInterface.NumPyInterface', 'NumPyInterface', (['n', 'dtype'], {}), '(n, dtype)\n', (1482, 1492), False, 'from jadapy.NumPyInterface import NumPyInterface\n'), ((4194, 4232), 'jadapy.utils.dot', 'dot', (['V[:, m:m + nev]', 'AV[:, m:m + nev]'], {}), '(V[:, m:m + nev], AV[:, m:m + nev])\n', (4197, 4232), False, 'from jadapy.utils import dot, norm\n'), ((4265, 4285), 'jadapy.schur.schur', 'schur', (['VAV[0:m, 0:m]'], {}), '(VAV[0:m, 0:m])\n', (4270, 4285), False, 'from jadapy.schur import schur, schur_sort\n'), ((7839, 7892), 'scipy.linalg.eig', 'scipy.linalg.eig', (['R[0:k, 0:k]'], {'left': '(False)', 'right': '(True)'}), '(R[0:k, 0:k], left=False, right=True)\n', (7855, 7892), False, 'import scipy\n'), ((3780, 3870), 'jadapy.orthogonalization.orthonormalize', 'orthonormalize', (['V[:, 0:m]', 'V[:, m:m + nev]'], {'M': 'M', 'MV': '(None if MV is None else MV[:, 0:m])'}), '(V[:, 0:m], V[:, m:m + nev], M=M, MV=None if MV is None else\n MV[:, 0:m])\n', (3794, 3870), False, 'from jadapy.orthogonalization import normalize, orthogonalize, orthonormalize\n'), ((4074, 4104), 'jadapy.utils.dot', 'dot', (['V[:, i]', 'AV[:, m:m + nev]'], {}), '(V[:, i], AV[:, m:m + nev])\n', (4077, 4104), False, 'from jadapy.utils import dot, norm\n'), ((4133, 4163), 'jadapy.utils.dot', 'dot', (['V[:, m:m + nev]', 'AV[:, i]'], {}), '(V[:, m:m + nev], AV[:, i])\n', (4136, 4163), False, 'from jadapy.utils import dot, norm\n'), ((4350, 4379), 'jadapy.schur.schur_sort', 'schur_sort', (['S', 'U', 'sort_target'], {}), '(S, U, sort_target)\n', (4360, 4379), False, 'from jadapy.schur import schur, schur_sort\n'), ((4613, 4624), 'jadapy.utils.norm', 'norm', (['alpha'], {}), '(alpha)\n', (4617, 4624), False, 'from jadapy.utils import dot, norm\n'), ((5010, 5048), 'jadapy.utils.dot', 'dot', (['MQ[:, k:k + nev]', 'Y[:, k:k + nev]'], {}), '(MQ[:, k:k + nev], Y[:, k:k + nev])\n', (5013, 5048), False, 'from jadapy.utils import dot, norm\n'), ((5127, 5199), 'jadapy.orthogonalization.orthogonalize', 'orthogonalize', (['MQ[:, 0:k + nev]', 'r[:, 0:nev]'], {'M': 'None', 'MV': 'Q[:, 0:k + nev]'}), '(MQ[:, 0:k + nev], r[:, 0:nev], M=None, MV=Q[:, 0:k + nev])\n', (5140, 5199), False, 'from jadapy.orthogonalization import normalize, orthogonalize, orthonormalize\n'), ((5263, 5290), 'scipy.linalg.eigvals', 'scipy.linalg.eigvals', (['alpha'], {}), '(alpha)\n', (5283, 5290), False, 'import scipy\n'), ((5506, 5524), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5522, 5524), False, 'import sys\n'), ((7329, 7347), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7345, 7347), False, 'import sys\n'), ((2835, 2858), 'jadapy.orthogonalization.normalize', 'normalize', (['V[:, 0]'], {'M': 'M'}), '(V[:, 0], M=M)\n', (2844, 2858), False, 'from jadapy.orthogonalization import normalize, orthogonalize, orthonormalize\n'), ((3301, 3505), 'jadapy.correction_equation.solve_generalized_correction_equation', 'solve_generalized_correction_equation', (['A', 'M', 'prec', 'MQ[:, 0:k + nev]', 'Q[:, 0:k + nev]', 'Y[:, 0:k + nev]', 'H[0:k + nev, 0:k + nev]', 'sigma', '(1.0)', 'r[:, 0:nev]', 'solver_tolerance', 'solver_maxit', 'interface'], {}), '(A, M, prec, MQ[:, 0:k + nev], Q[:, 0:\n k + nev], Y[:, 0:k + nev], H[0:k + nev, 0:k + nev], sigma, 1.0, r[:, 0:\n nev], solver_tolerance, solver_maxit, interface)\n', (3338, 3505), False, 'from jadapy.correction_equation import solve_correction_equation, solve_generalized_correction_equation\n'), ((3577, 3741), 'jadapy.correction_equation.solve_correction_equation', 'solve_correction_equation', (['A', 'prec', 'Q[:, 0:k + nev]', 'Y[:, 0:k + nev]', 'H[0:k + nev, 0:k + nev]', 'sigma', 'r[:, 0:nev]', 'solver_tolerance', 'solver_maxit', 'interface'], {}), '(A, prec, Q[:, 0:k + nev], Y[:, 0:k + nev], H[0:k +\n nev, 0:k + nev], sigma, r[:, 0:nev], solver_tolerance, solver_maxit,\n interface)\n', (3602, 3741), False, 'from jadapy.correction_equation import solve_correction_equation, solve_generalized_correction_equation\n'), ((4886, 4916), 'jadapy.utils.dot', 'dot', (['MQ[:, i]', 'Y[:, k:k + nev]'], {}), '(MQ[:, i], Y[:, k:k + nev])\n', (4889, 4916), False, 'from jadapy.utils import dot, norm\n'), ((4947, 4977), 'jadapy.utils.dot', 'dot', (['MQ[:, k:k + nev]', 'Y[:, i]'], {}), '(MQ[:, k:k + nev], Y[:, i])\n', (4950, 4977), False, 'from jadapy.utils import dot, norm\n'), ((5217, 5234), 'jadapy.utils.norm', 'norm', (['r[:, 0:nev]'], {}), '(r[:, 0:nev])\n', (5221, 5234), False, 'from jadapy.utils import dot, norm\n'), ((6890, 6920), 'numpy.identity', 'numpy.identity', (['(m - nev)', 'dtype'], {}), '(m - nev, dtype)\n', (6904, 6920), False, 'import numpy\n'), ((6185, 6203), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6201, 6203), False, 'import sys\n'), ((5954, 5970), 'jadapy.utils.dot', 'dot', (['Q[:, i]', 'AQ'], {}), '(Q[:, i], AQ)\n', (5957, 5970), False, 'from jadapy.utils import dot, norm\n')] |
"""
This code runs basic analysis on simulations that were computed using the 'one at a time analysis'.
You must provide the path to the csv database with the parameters of each simulation.
Functionality:
1. Plot the last concentration profile over the layer stack.
2. Plot the Rsh(t) estimated with the series resistor model.
3. Estimate the integrated sodium concentration in SiNx and Si at the end of the simulation.
"""
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
# import pidsim.rsh as prsh
import pidsim.ml_simulator as pmpp_rf
import h5py
import os
import platform
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as mticker
import matplotlib.gridspec as gridspec
from scipy import integrate
import pnptransport.utils as utils
from tqdm import tqdm
path_to_csv = r'G:\My Drive\Research\PVRD1\Manuscripts\Device_Simulations_draft\simulations\inputs_20201028\ofat_db.csv'
path_to_results = r'G:\My Drive\Research\PVRD1\Manuscripts\Device_Simulations_draft\simulations\inputs_20201028\results'
t_max_h = 96. # h
pid_experiment_csv = None #'G:\My Drive\Research\PVRD1\DATA\PID\MC4_Raw_IV_modified.csv'
color_map = 'viridis_r'
defaultPlotStyle = {
'font.size': 11,
'font.family': 'Arial',
'font.weight': 'regular',
'legend.fontsize': 11,
'mathtext.fontset': 'stix',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 4.5,
'xtick.major.width': 1.75,
'ytick.major.size': 4.5,
'ytick.major.width': 1.75,
'xtick.minor.size': 2.75,
'xtick.minor.width': 1.0,
'ytick.minor.size': 2.75,
'ytick.minor.width': 1.0,
'xtick.top': False,
'ytick.right': False,
'lines.linewidth': 2.5,
'lines.markersize': 10,
'lines.markeredgewidth': 0.85,
'axes.labelpad': 5.0,
'axes.labelsize': 12,
'axes.labelweight': 'regular',
'legend.handletextpad': 0.2,
'legend.borderaxespad': 0.2,
'axes.linewidth': 1.25,
'axes.titlesize': 12,
'axes.titleweight': 'bold',
'axes.titlepad': 6,
'figure.titleweight': 'bold',
'figure.dpi': 100
}
if __name__ == '__main__':
if platform.system() == 'Windows':
path_to_csv = r'\\?\\' + path_to_csv
path_to_results = r'\\?\\' + path_to_results
if pid_experiment_csv is not None:
pid_experiment_csv = r'\\?\\' + pid_experiment_csv
t_max = t_max_h * 3600.
# Create an analysis folder within the base dir for the database file
working_path = os.path.dirname(path_to_csv)
analysis_path = os.path.join(working_path, 'batch_analysis')
# If the folder does not exists, create it
if not os.path.exists(analysis_path):
os.makedirs(analysis_path)
# If an experimental profile is provided load the csv
if pid_experiment_csv is not None:
pid_experiment_df = pd.read_csv(pid_experiment_csv)
# Read the database of simulations
simulations_df = pd.read_csv(filepath_or_buffer=path_to_csv)
# pick only the simulations that converged
simulations_df = simulations_df[simulations_df['converged'] == 1].reset_index(drop=True)
# Count the simulations
n_simulations = len(simulations_df)
integrated_final_concentrations = np.empty(n_simulations, dtype=np.dtype([
('C_SiNx average final (atoms/cm^3)', 'd'), ('C_Si average final (atoms/cm^3)', 'd')
]))
# Load the style
mpl.rcParams.update(defaultPlotStyle)
# Get the color map
cm = mpl.cm.get_cmap(color_map)
# Show at least the first 6 figures
max_displayed_figures = 6
fig_counter = 0
for i, r in simulations_df.iterrows():
filetag = os.path.splitext(r['config file'])[0]
simga_s = r['sigma_s (cm^-2)']
zeta = r['zeta (1/s)']
dsf = r['D_SF (cm^2/s)']
e_field = r['E (V/cm)']
h = r['h (cm/s)']
m = r['m']
time_max = r['time (s)']
temp_c = r['temp (C)']
source_str1 = r'$S_{{\mathrm{{s}}}} = {0} \; (\mathrm{{cm^{{-2}}}})$'.format(
utils.latex_order_of_magnitude(simga_s))
source_str2 = r'$k = {0} \; (\mathrm{{1/s}})$'.format(utils.latex_order_of_magnitude(zeta))
e_field_str = r'$E = {0} \; (\mathrm{{V/cm}})$'.format(utils.latex_order_of_magnitude(e_field))
h_str = r'$h = {0} \; (\mathrm{{cm/s}})$'.format(utils.latex_order_of_magnitude(h))
temp_str = r'${0:.0f} \; (\mathrm{{°C}})$'.format(temp_c)
dsf_str = r'$D_{{\mathrm{{SF}}}} = {0} \; (\mathrm{{cm^2/s}})$'.format(utils.latex_order_of_magnitude(dsf))
# Normalize the time scale
normalize = mpl.colors.Normalize(vmin=1E-3, vmax=(t_max / 3600.))
# Get a 20 time points geometrically spaced
requested_time = utils.geometric_series_spaced(max_val=t_max, min_delta=600, steps=20)
# Get the full path to the h5 file
path_to_h5 = os.path.join(path_to_results, filetag + '.h5')
# Create the concentration figure
fig_c = plt.figure()
fig_c.set_size_inches(5.0, 3.0, forward=True)
fig_c.subplots_adjust(hspace=0.1, wspace=0.1)
gs_c_0 = gridspec.GridSpec(ncols=1, nrows=1, figure=fig_c)
# 1 column for the concentration profile in SiNx
# 1 column for the concentration profile in Si
# 1 column for the colorbar
gs_c_00 = gridspec.GridSpecFromSubplotSpec(
nrows=1, ncols=2, subplot_spec=gs_c_0[0], wspace=0.0, hspace=0.1, width_ratios=[2.5, 3]
)
ax_c_0 = fig_c.add_subplot(gs_c_00[0, 0])
ax_c_1 = fig_c.add_subplot(gs_c_00[0, 1])
# Axis labels
ax_c_0.set_xlabel(r'Depth (nm)')
ax_c_0.set_ylabel(r'[Na] ($\mathregular{cm^{-3}}$)')
# Title to the sinx axis
ax_c_0.set_title(r'${0}\; \mathrm{{V/cm}}, {1:.0f}\; \mathrm{{°C}}$'.format(
utils.latex_order_of_magnitude(e_field), temp_c
))
# Set the ticks for the Si concentration profile axis to the right
ax_c_1.yaxis.set_ticks_position('right')
# Title to the si axis
ax_c_1.set_title(r'$D_{{\mathrm{{SF}}}} = {0}\; \mathrm{{cm^2/s}},\; E=0$'.format(
utils.latex_order_of_magnitude(dsf)
))
ax_c_1.set_xlabel(r'Depth (um)')
# Log plot in the y axis
ax_c_0.set_yscale('log')
ax_c_1.set_yscale('log')
ax_c_0.set_ylim(bottom=1E10, top=1E20)
ax_c_1.set_ylim(bottom=1E10, top=1E20)
# Set the ticks for the SiNx log axis
ax_c_0.yaxis.set_major_locator(mpl.ticker.LogLocator(base=10.0, numticks=6))
ax_c_0.yaxis.set_minor_locator(mpl.ticker.LogLocator(base=10.0, numticks=60, subs=np.arange(2, 10) * .1))
# Set the ticks for the Si log axis
ax_c_1.yaxis.set_major_locator(mpl.ticker.LogLocator(base=10.0, numticks=6))
ax_c_1.yaxis.set_minor_locator(mpl.ticker.LogLocator(base=10.0, numticks=60, subs=np.arange(2, 10) * .1))
ax_c_1.tick_params(axis='y', left=False, labelright=False)
# Configure the ticks for the x axis
ax_c_0.xaxis.set_major_locator(mticker.MaxNLocator(4, prune=None))
ax_c_0.xaxis.set_minor_locator(mticker.AutoMinorLocator(4))
ax_c_1.xaxis.set_major_locator(mticker.MaxNLocator(3, prune='lower'))
ax_c_1.xaxis.set_minor_locator(mticker.AutoMinorLocator(4))
# Change the background colors
# ax_c_0.set_facecolor((0.89, 0.75, 1.0))
# ax_c_1.set_facecolor((0.82, 0.83, 1.0))
# Create the integrated concentration figure
fig_s = plt.figure()
fig_s.set_size_inches(4.75, 3.0, forward=True)
fig_s.subplots_adjust(hspace=0.1, wspace=0.1)
gs_s_0 = gridspec.GridSpec(ncols=1, nrows=1, figure=fig_s)
gs_s_00 = gridspec.GridSpecFromSubplotSpec(
nrows=1, ncols=1, subplot_spec=gs_s_0[0], hspace=0.1,
)
ax_s_0 = fig_s.add_subplot(gs_s_00[0, 0])
# Set the axis labels
ax_s_0.set_xlabel(r'Time (h)')
ax_s_0.set_ylabel(r'$\bar{C}$ ($\mathregular{cm^{-3}}$)')
# Set the limits for the x axis
ax_s_0.set_xlim(left=0, right=t_max / 3600.)
# Make the y axis log
ax_s_0.set_yscale('log')
# Set the ticks for the y axis
ax_s_0.yaxis.set_major_locator(mpl.ticker.LogLocator(base=10.0, numticks=6))
ax_s_0.yaxis.set_minor_locator(mpl.ticker.LogLocator(base=10.0, numticks=60, subs=np.arange(2, 10) * .1))
# Set the ticks for the x axis
# Configure the ticks for the x axis
ax_s_0.xaxis.set_major_locator(mticker.MaxNLocator(6, prune=None))
ax_s_0.xaxis.set_minor_locator(mticker.AutoMinorLocator(2))
# Create the mpp figure
fig_mpp = plt.figure()
fig_mpp.set_size_inches(4.75, 3.0, forward=True)
fig_mpp.subplots_adjust(hspace=0.1, wspace=0.1)
gs_mpp_0 = gridspec.GridSpec(ncols=1, nrows=1, figure=fig_mpp)
gs_mpp_00 = gridspec.GridSpecFromSubplotSpec(
nrows=1, ncols=1, subplot_spec=gs_mpp_0[0], hspace=0.1,
)
ax_mpp_0 = fig_mpp.add_subplot(gs_mpp_00[0, 0])
# Set the axis labels
ax_mpp_0.set_xlabel(r'Time (h)')
ax_mpp_0.set_ylabel(r'$R_{\mathrm{sh}}$ ($\mathrm{\Omega\cdot cm^2}}$)')
# Vfb figure
fig_vfb = plt.figure()
fig_vfb.set_size_inches(4.75, 3.0, forward=True)
fig_vfb.subplots_adjust(hspace=0.1, wspace=0.1)
gs_vfb_0 = gridspec.GridSpec(ncols=1, nrows=1, figure=fig_vfb)
gs_vfb_00 = gridspec.GridSpecFromSubplotSpec(
nrows=1, ncols=1, subplot_spec=gs_vfb_0[0], hspace=0.1,
)
ax_vfb_0 = fig_vfb.add_subplot(gs_vfb_00[0, 0])
# Set the axis labels
ax_vfb_0.set_xlabel(r'Time (h)')
ax_vfb_0.set_ylabel(r'$V_{\mathrm{FB}}$ (V)')
with h5py.File(path_to_h5, 'r') as hf:
# Get the time dataset
time_s = np.array(hf['time'])
# Get the vfb dataset
vfb = np.array(hf.get(name='vfb'))
# Get the sinx group
grp_sinx = hf['L1']
# get the si group
grp_si = hf['L2']
# Get the position vector in SiNx in nm
x_sin = np.array(grp_sinx['x']) * 1000.
thickness_sin = np.max(x_sin)
x_si = np.array(grp_si['x']) - thickness_sin / 1000.
x_sin = x_sin - thickness_sin
thickness_si = np.amax(x_si)
n_profiles = len(time_s)
requested_indices = utils.get_indices_at_values(x=time_s, requested_values=requested_time)
time_profile = np.empty(len(requested_indices))
model_colors = [cm(normalize(t)) for t in time_s / 3600.]
scalar_maps = mpl.cm.ScalarMappable(cmap=cm, norm=normalize)
with tqdm(requested_indices, leave=True, position=0) as pbar:
for j, idx in enumerate(requested_indices):
time_j = time_s[idx] / 3600.
time_profile[j] = time_j
# Get the specific profile
ct_ds = 'ct_{0:d}'.format(idx)
try:
c_sin = np.array(grp_sinx['concentration'][ct_ds])
c_si = np.array(grp_si['concentration'][ct_ds])
color_j = cm(normalize(time_j))
ax_c_0.plot(x_sin, c_sin, color=color_j, zorder=0)
ax_c_1.plot(x_si, c_si, color=color_j, zorder=0)
pbar.set_description('Extracting profile {0} at time {1:.1f} h...'.format(ct_ds, time_j))
pbar.update()
pbar.refresh()
except KeyError as ke:
print("Error reading file '{0}'.".format(filetag))
raise ke
# Estimate the integrated concentrations as a function of time for each layer
c_sin_int = np.empty(n_profiles)
c_si_int = np.empty(n_profiles)
with tqdm(range(n_profiles), leave=True, position=0) as pbar:
for j in range(n_profiles):
# Get the specific profile
ct_ds = 'ct_{0:d}'.format(j)
c_sin = np.array(grp_sinx['concentration'][ct_ds])
c_si = np.array(grp_si['concentration'][ct_ds])
c_sin_int[j] = abs(integrate.simps(c_sin, -x_sin )) / thickness_sin
c_si_int[j] = abs(integrate.simps(c_si, x_si)) / thickness_si
pbar.set_description('Integrating profile at time {0:.1f} h: S_N: {1:.2E}, S_S: {2:.3E} cm^-2'.format(
time_s[j] / 3600.,
c_sin_int[j],
c_si_int[j]
))
pbar.update()
pbar.refresh()
ax_s_0.plot(time_s / 3600., c_sin_int, label=r'$\mathregular{SiN_x}$')
ax_s_0.plot(time_s / 3600., c_si_int, label=r'Si')
# ax_s_0.plot(time_s / 3600., c_si_int + c_sin_int, label=r'Si + $\mathregular{SiN_x}$')
ax_vfb_0.plot(time_s / 3600., vfb)
ax_vfb_0.set_xlim(left=0, right=t_max_h)
integrated_final_concentrations[i] = (c_sin_int[-1], c_si_int[-1])
leg = ax_s_0.legend(loc='lower right', frameon=True)
# Set the limits for the x axis of the concentration plot
ax_c_0.set_xlim(left=np.amin(x_sin), right=np.amax(x_sin))
ax_c_1.set_xlim(left=np.amin(x_si), right=np.amax(x_si))
# Add the color bar
divider = make_axes_locatable(ax_c_1)
cax = divider.append_axes("right", size="7.5%", pad=0.03)
cbar = fig_c.colorbar(scalar_maps, cax=cax)
cbar.set_label('Time (h)\n', rotation=90, fontsize=14)
cbar.ax.tick_params(labelsize=11)
plot_c_sin_txt = source_str1 + '\n' + source_str2
ax_c_0.text(
0.95, 0.95,
plot_c_sin_txt,
horizontalalignment='right',
verticalalignment='top',
transform=ax_c_0.transAxes,
fontsize=11,
color='k'
)
plot_c_si_txt = h_str + '\n$m=1$'
ax_c_1.text(
0.95, 0.95,
plot_c_si_txt,
horizontalalignment='right',
verticalalignment='top',
transform=ax_c_1.transAxes,
fontsize=11,
color='k'
)
# Identify layers
ax_c_0.text(
0.05, 0.015,
r'$\mathregular{SiN_x}$',
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_c_0.transAxes,
fontsize=11,
fontweight='bold',
color='k'
)
ax_c_1.text(
0.05, 0.015,
'Si',
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_c_1.transAxes,
fontsize=11,
fontweight='bold',
color='k'
)
# set the y axis limits for the integrated concentration plot
ax_s_0.set_ylim(bottom=1E5, top=1E20)
title_str = source_str1 + ', ' + source_str2 + ', ' + dsf_str
plot_txt = e_field_str + '\n' + temp_str + '\n' + h_str
ax_s_0.set_title(title_str)
ax_s_0.text(
0.65, 0.95,
plot_txt,
horizontalalignment='left',
verticalalignment='top',
transform=ax_s_0.transAxes,
fontsize=11,
color='k'
)
# rsh_analysis = prsh.Rsh(h5_transport_file=path_to_h5)
ml_analysis = pmpp_rf.MLSim(h5_transport_file=path_to_h5)
time_s = ml_analysis.time_s
time_h = time_s / 3600.
requested_indices = ml_analysis.get_requested_time_indices(time_s)
pmpp = ml_analysis.pmpp_time_series(requested_indices=requested_indices)
rsh = ml_analysis.rsh_time_series(requested_indices=requested_indices)
simulated_pmpp_df = pd.DataFrame(data={
'time (s)': time_s, 'Pmpp (mW/cm^2)': pmpp, 'Rsh (Ohm cm^2)': rsh,
'vfb (V)': vfb
})
simulated_pmpp_df.to_csv(os.path.join(analysis_path, filetag + '_simulated_pid.csv'), index=False)
ax_mpp_0.plot(time_h, rsh, label='Simulation')
ax_mpp_0.set_xlim(0, np.amax(time_h))
if pid_experiment_csv is not None:
time_exp = pid_experiment_df['time (s)']/3600.
pmax_exp = pid_experiment_df['Pmax']
ax_mpp_0.plot(time_exp, pmax_exp / pmax_exp.max(), ls='None', marker='o', fillstyle='none', label='Experiment')
leg = ax_mpp_0.legend(loc='lower right', frameon=True)
ax_mpp_0.set_yscale('log')
ax_mpp_0.set_xlabel('time (h)')
ax_mpp_0.set_ylabel('$R_{\mathrm{sh}}\;(\Omega \cdot \mathregular{cm^2})$')
# ax_mpp_0.set_ylabel('Normalized Power')
ax_mpp_0.xaxis.set_major_locator(mticker.MaxNLocator(6, prune=None))
ax_mpp_0.xaxis.set_minor_locator(mticker.AutoMinorLocator(2))
title_str = source_str1 + ', ' + source_str2 + ', ' + dsf_str
plot_txt = e_field_str + '\n' + temp_str + '\n' + h_str
ax_mpp_0.set_title(title_str)
ax_mpp_0.text(
0.65, 0.95,
plot_txt,
horizontalalignment='left',
verticalalignment='top',
transform=ax_mpp_0.transAxes,
fontsize=11,
color='k'
)
ax_vfb_0.set_title(title_str)
ax_vfb_0.text(
0.65, 0.95,
plot_txt,
horizontalalignment='left',
verticalalignment='top',
transform=ax_vfb_0.transAxes,
fontsize=11,
color='k'
)
fig_c.tight_layout()
fig_s.tight_layout()
fig_mpp.tight_layout()
fig_vfb.tight_layout()
fig_c.savefig(os.path.join(analysis_path, filetag + '_c.png'), dpi=600)
fig_c.savefig(os.path.join(analysis_path, filetag + '_c.svg'), dpi=600)
fig_s.savefig(os.path.join(analysis_path, filetag + '_s.png'), dpi=600)
fig_mpp.savefig(os.path.join(analysis_path, filetag + '_p.png'), dpi=600)
fig_mpp.savefig(os.path.join(analysis_path, filetag + '_p.svg'), dpi=600)
fig_vfb.savefig(os.path.join(analysis_path, filetag + '_vfb.png'), dpi=600)
fig_vfb.savefig(os.path.join(analysis_path, filetag + '_vfb.svg'), dpi=600)
plt.close(fig_c)
plt.close(fig_s)
plt.close(fig_mpp)
plt.close(fig_vfb)
del fig_c, fig_s, fig_mpp, fig_vfb
simulations_df['C_SiNx average final (atoms/cm^3)'] = integrated_final_concentrations['C_SiNx average final (atoms/cm^3)']
simulations_df['C_Si average final (atoms/cm^3)'] = integrated_final_concentrations['C_Si average final (atoms/cm^3)']
simulations_df.to_csv(os.path.join(analysis_path, 'ofat_analysis.csv'), index=False)
| [
"pnptransport.utils.geometric_series_spaced",
"numpy.amin",
"matplotlib.cm.get_cmap",
"pandas.read_csv",
"numpy.empty",
"matplotlib.pyplot.figure",
"numpy.arange",
"os.path.join",
"pandas.DataFrame",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.close",
"os.path.dirname",
"matplotlib.rcP... | [((2533, 2561), 'os.path.dirname', 'os.path.dirname', (['path_to_csv'], {}), '(path_to_csv)\n', (2548, 2561), False, 'import os\n'), ((2582, 2626), 'os.path.join', 'os.path.join', (['working_path', '"""batch_analysis"""'], {}), "(working_path, 'batch_analysis')\n", (2594, 2626), False, 'import os\n'), ((2970, 3013), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'path_to_csv'}), '(filepath_or_buffer=path_to_csv)\n', (2981, 3013), True, 'import pandas as pd\n'), ((3427, 3464), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['defaultPlotStyle'], {}), '(defaultPlotStyle)\n', (3446, 3464), True, 'import matplotlib as mpl\n'), ((3498, 3524), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['color_map'], {}), '(color_map)\n', (3513, 3524), True, 'import matplotlib as mpl\n'), ((2175, 2192), 'platform.system', 'platform.system', ([], {}), '()\n', (2190, 2192), False, 'import platform\n'), ((2685, 2714), 'os.path.exists', 'os.path.exists', (['analysis_path'], {}), '(analysis_path)\n', (2699, 2714), False, 'import os\n'), ((2724, 2750), 'os.makedirs', 'os.makedirs', (['analysis_path'], {}), '(analysis_path)\n', (2735, 2750), False, 'import os\n'), ((2877, 2908), 'pandas.read_csv', 'pd.read_csv', (['pid_experiment_csv'], {}), '(pid_experiment_csv)\n', (2888, 2908), True, 'import pandas as pd\n'), ((4631, 4684), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0.001)', 'vmax': '(t_max / 3600.0)'}), '(vmin=0.001, vmax=t_max / 3600.0)\n', (4651, 4684), True, 'import matplotlib as mpl\n'), ((4762, 4831), 'pnptransport.utils.geometric_series_spaced', 'utils.geometric_series_spaced', ([], {'max_val': 't_max', 'min_delta': '(600)', 'steps': '(20)'}), '(max_val=t_max, min_delta=600, steps=20)\n', (4791, 4831), True, 'import pnptransport.utils as utils\n'), ((4896, 4942), 'os.path.join', 'os.path.join', (['path_to_results', "(filetag + '.h5')"], {}), "(path_to_results, filetag + '.h5')\n", (4908, 4942), False, 'import os\n'), ((5001, 5013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5011, 5013), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5188), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(1)', 'nrows': '(1)', 'figure': 'fig_c'}), '(ncols=1, nrows=1, figure=fig_c)\n', (5156, 5188), True, 'import matplotlib.gridspec as gridspec\n'), ((5355, 5480), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', ([], {'nrows': '(1)', 'ncols': '(2)', 'subplot_spec': 'gs_c_0[0]', 'wspace': '(0.0)', 'hspace': '(0.1)', 'width_ratios': '[2.5, 3]'}), '(nrows=1, ncols=2, subplot_spec=gs_c_0[0],\n wspace=0.0, hspace=0.1, width_ratios=[2.5, 3])\n', (5387, 5480), True, 'import matplotlib.gridspec as gridspec\n'), ((7549, 7561), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7559, 7561), True, 'import matplotlib.pyplot as plt\n'), ((7688, 7737), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(1)', 'nrows': '(1)', 'figure': 'fig_s'}), '(ncols=1, nrows=1, figure=fig_s)\n', (7705, 7737), True, 'import matplotlib.gridspec as gridspec\n'), ((7756, 7846), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', ([], {'nrows': '(1)', 'ncols': '(1)', 'subplot_spec': 'gs_s_0[0]', 'hspace': '(0.1)'}), '(nrows=1, ncols=1, subplot_spec=gs_s_0[0],\n hspace=0.1)\n', (7788, 7846), True, 'import matplotlib.gridspec as gridspec\n'), ((8722, 8734), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8732, 8734), True, 'import matplotlib.pyplot as plt\n'), ((8867, 8918), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(1)', 'nrows': '(1)', 'figure': 'fig_mpp'}), '(ncols=1, nrows=1, figure=fig_mpp)\n', (8884, 8918), True, 'import matplotlib.gridspec as gridspec\n'), ((8939, 9031), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', ([], {'nrows': '(1)', 'ncols': '(1)', 'subplot_spec': 'gs_mpp_0[0]', 'hspace': '(0.1)'}), '(nrows=1, ncols=1, subplot_spec=gs_mpp_0[0],\n hspace=0.1)\n', (8971, 9031), True, 'import matplotlib.gridspec as gridspec\n'), ((9299, 9311), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9309, 9311), True, 'import matplotlib.pyplot as plt\n'), ((9444, 9495), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(1)', 'nrows': '(1)', 'figure': 'fig_vfb'}), '(ncols=1, nrows=1, figure=fig_vfb)\n', (9461, 9495), True, 'import matplotlib.gridspec as gridspec\n'), ((9516, 9608), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', ([], {'nrows': '(1)', 'ncols': '(1)', 'subplot_spec': 'gs_vfb_0[0]', 'hspace': '(0.1)'}), '(nrows=1, ncols=1, subplot_spec=gs_vfb_0[0],\n hspace=0.1)\n', (9548, 9608), True, 'import matplotlib.gridspec as gridspec\n'), ((13570, 13597), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax_c_1'], {}), '(ax_c_1)\n', (13589, 13597), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((15626, 15669), 'pidsim.ml_simulator.MLSim', 'pmpp_rf.MLSim', ([], {'h5_transport_file': 'path_to_h5'}), '(h5_transport_file=path_to_h5)\n', (15639, 15669), True, 'import pidsim.ml_simulator as pmpp_rf\n'), ((16002, 16108), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'time (s)': time_s, 'Pmpp (mW/cm^2)': pmpp, 'Rsh (Ohm cm^2)': rsh,\n 'vfb (V)': vfb}"}), "(data={'time (s)': time_s, 'Pmpp (mW/cm^2)': pmpp,\n 'Rsh (Ohm cm^2)': rsh, 'vfb (V)': vfb})\n", (16014, 16108), True, 'import pandas as pd\n'), ((18453, 18469), 'matplotlib.pyplot.close', 'plt.close', (['fig_c'], {}), '(fig_c)\n', (18462, 18469), True, 'import matplotlib.pyplot as plt\n'), ((18478, 18494), 'matplotlib.pyplot.close', 'plt.close', (['fig_s'], {}), '(fig_s)\n', (18487, 18494), True, 'import matplotlib.pyplot as plt\n'), ((18503, 18521), 'matplotlib.pyplot.close', 'plt.close', (['fig_mpp'], {}), '(fig_mpp)\n', (18512, 18521), True, 'import matplotlib.pyplot as plt\n'), ((18530, 18548), 'matplotlib.pyplot.close', 'plt.close', (['fig_vfb'], {}), '(fig_vfb)\n', (18539, 18548), True, 'import matplotlib.pyplot as plt\n'), ((18871, 18919), 'os.path.join', 'os.path.join', (['analysis_path', '"""ofat_analysis.csv"""'], {}), "(analysis_path, 'ofat_analysis.csv')\n", (18883, 18919), False, 'import os\n'), ((3290, 3391), 'numpy.dtype', 'np.dtype', (["[('C_SiNx average final (atoms/cm^3)', 'd'), (\n 'C_Si average final (atoms/cm^3)', 'd')]"], {}), "([('C_SiNx average final (atoms/cm^3)', 'd'), (\n 'C_Si average final (atoms/cm^3)', 'd')])\n", (3298, 3391), True, 'import numpy as np\n'), ((3676, 3710), 'os.path.splitext', 'os.path.splitext', (["r['config file']"], {}), "(r['config file'])\n", (3692, 3710), False, 'import os\n'), ((4057, 4096), 'pnptransport.utils.latex_order_of_magnitude', 'utils.latex_order_of_magnitude', (['simga_s'], {}), '(simga_s)\n', (4087, 4096), True, 'import pnptransport.utils as utils\n'), ((4160, 4196), 'pnptransport.utils.latex_order_of_magnitude', 'utils.latex_order_of_magnitude', (['zeta'], {}), '(zeta)\n', (4190, 4196), True, 'import pnptransport.utils as utils\n'), ((4261, 4300), 'pnptransport.utils.latex_order_of_magnitude', 'utils.latex_order_of_magnitude', (['e_field'], {}), '(e_field)\n', (4291, 4300), True, 'import pnptransport.utils as utils\n'), ((4359, 4392), 'pnptransport.utils.latex_order_of_magnitude', 'utils.latex_order_of_magnitude', (['h'], {}), '(h)\n', (4389, 4392), True, 'import pnptransport.utils as utils\n'), ((4539, 4574), 'pnptransport.utils.latex_order_of_magnitude', 'utils.latex_order_of_magnitude', (['dsf'], {}), '(dsf)\n', (4569, 4574), True, 'import pnptransport.utils as utils\n'), ((6537, 6581), 'matplotlib.ticker.LogLocator', 'mpl.ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(6)'}), '(base=10.0, numticks=6)\n', (6558, 6581), True, 'import matplotlib as mpl\n'), ((6780, 6824), 'matplotlib.ticker.LogLocator', 'mpl.ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(6)'}), '(base=10.0, numticks=6)\n', (6801, 6824), True, 'import matplotlib as mpl\n'), ((7091, 7125), 'matplotlib.ticker.MaxNLocator', 'mticker.MaxNLocator', (['(4)'], {'prune': 'None'}), '(4, prune=None)\n', (7110, 7125), True, 'import matplotlib.ticker as mticker\n'), ((7166, 7193), 'matplotlib.ticker.AutoMinorLocator', 'mticker.AutoMinorLocator', (['(4)'], {}), '(4)\n', (7190, 7193), True, 'import matplotlib.ticker as mticker\n'), ((7234, 7271), 'matplotlib.ticker.MaxNLocator', 'mticker.MaxNLocator', (['(3)'], {'prune': '"""lower"""'}), "(3, prune='lower')\n", (7253, 7271), True, 'import matplotlib.ticker as mticker\n'), ((7312, 7339), 'matplotlib.ticker.AutoMinorLocator', 'mticker.AutoMinorLocator', (['(4)'], {}), '(4)\n', (7336, 7339), True, 'import matplotlib.ticker as mticker\n'), ((8285, 8329), 'matplotlib.ticker.LogLocator', 'mpl.ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(6)'}), '(base=10.0, numticks=6)\n', (8306, 8329), True, 'import matplotlib as mpl\n'), ((8568, 8602), 'matplotlib.ticker.MaxNLocator', 'mticker.MaxNLocator', (['(6)'], {'prune': 'None'}), '(6, prune=None)\n', (8587, 8602), True, 'import matplotlib.ticker as mticker\n'), ((8643, 8670), 'matplotlib.ticker.AutoMinorLocator', 'mticker.AutoMinorLocator', (['(2)'], {}), '(2)\n', (8667, 8670), True, 'import matplotlib.ticker as mticker\n'), ((9823, 9849), 'h5py.File', 'h5py.File', (['path_to_h5', '"""r"""'], {}), "(path_to_h5, 'r')\n", (9832, 9849), False, 'import h5py\n'), ((9913, 9933), 'numpy.array', 'np.array', (["hf['time']"], {}), "(hf['time'])\n", (9921, 9933), True, 'import numpy as np\n'), ((10273, 10286), 'numpy.max', 'np.max', (['x_sin'], {}), '(x_sin)\n', (10279, 10286), True, 'import numpy as np\n'), ((10421, 10434), 'numpy.amax', 'np.amax', (['x_si'], {}), '(x_si)\n', (10428, 10434), True, 'import numpy as np\n'), ((10504, 10574), 'pnptransport.utils.get_indices_at_values', 'utils.get_indices_at_values', ([], {'x': 'time_s', 'requested_values': 'requested_time'}), '(x=time_s, requested_values=requested_time)\n', (10531, 10574), True, 'import pnptransport.utils as utils\n'), ((10732, 10778), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'cmap': 'cm', 'norm': 'normalize'}), '(cmap=cm, norm=normalize)\n', (10753, 10778), True, 'import matplotlib as mpl\n'), ((11938, 11958), 'numpy.empty', 'np.empty', (['n_profiles'], {}), '(n_profiles)\n', (11946, 11958), True, 'import numpy as np\n'), ((11982, 12002), 'numpy.empty', 'np.empty', (['n_profiles'], {}), '(n_profiles)\n', (11990, 12002), True, 'import numpy as np\n'), ((16172, 16231), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_simulated_pid.csv')"], {}), "(analysis_path, filetag + '_simulated_pid.csv')\n", (16184, 16231), False, 'import os\n'), ((16331, 16346), 'numpy.amax', 'np.amax', (['time_h'], {}), '(time_h)\n', (16338, 16346), True, 'import numpy as np\n'), ((16941, 16975), 'matplotlib.ticker.MaxNLocator', 'mticker.MaxNLocator', (['(6)'], {'prune': 'None'}), '(6, prune=None)\n', (16960, 16975), True, 'import matplotlib.ticker as mticker\n'), ((17018, 17045), 'matplotlib.ticker.AutoMinorLocator', 'mticker.AutoMinorLocator', (['(2)'], {}), '(2)\n', (17042, 17045), True, 'import matplotlib.ticker as mticker\n'), ((17894, 17941), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_c.png')"], {}), "(analysis_path, filetag + '_c.png')\n", (17906, 17941), False, 'import os\n'), ((17974, 18021), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_c.svg')"], {}), "(analysis_path, filetag + '_c.svg')\n", (17986, 18021), False, 'import os\n'), ((18054, 18101), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_s.png')"], {}), "(analysis_path, filetag + '_s.png')\n", (18066, 18101), False, 'import os\n'), ((18136, 18183), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_p.png')"], {}), "(analysis_path, filetag + '_p.png')\n", (18148, 18183), False, 'import os\n'), ((18218, 18265), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_p.svg')"], {}), "(analysis_path, filetag + '_p.svg')\n", (18230, 18265), False, 'import os\n'), ((18300, 18349), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_vfb.png')"], {}), "(analysis_path, filetag + '_vfb.png')\n", (18312, 18349), False, 'import os\n'), ((18384, 18433), 'os.path.join', 'os.path.join', (['analysis_path', "(filetag + '_vfb.svg')"], {}), "(analysis_path, filetag + '_vfb.svg')\n", (18396, 18433), False, 'import os\n'), ((5854, 5893), 'pnptransport.utils.latex_order_of_magnitude', 'utils.latex_order_of_magnitude', (['e_field'], {}), '(e_field)\n', (5884, 5893), True, 'import pnptransport.utils as utils\n'), ((6171, 6206), 'pnptransport.utils.latex_order_of_magnitude', 'utils.latex_order_of_magnitude', (['dsf'], {}), '(dsf)\n', (6201, 6206), True, 'import pnptransport.utils as utils\n'), ((10213, 10236), 'numpy.array', 'np.array', (["grp_sinx['x']"], {}), "(grp_sinx['x'])\n", (10221, 10236), True, 'import numpy as np\n'), ((10306, 10327), 'numpy.array', 'np.array', (["grp_si['x']"], {}), "(grp_si['x'])\n", (10314, 10327), True, 'import numpy as np\n'), ((10796, 10843), 'tqdm.tqdm', 'tqdm', (['requested_indices'], {'leave': '(True)', 'position': '(0)'}), '(requested_indices, leave=True, position=0)\n', (10800, 10843), False, 'from tqdm import tqdm\n'), ((13421, 13435), 'numpy.amin', 'np.amin', (['x_sin'], {}), '(x_sin)\n', (13428, 13435), True, 'import numpy as np\n'), ((13443, 13457), 'numpy.amax', 'np.amax', (['x_sin'], {}), '(x_sin)\n', (13450, 13457), True, 'import numpy as np\n'), ((13488, 13501), 'numpy.amin', 'np.amin', (['x_si'], {}), '(x_si)\n', (13495, 13501), True, 'import numpy as np\n'), ((13509, 13522), 'numpy.amax', 'np.amax', (['x_si'], {}), '(x_si)\n', (13516, 13522), True, 'import numpy as np\n'), ((12245, 12287), 'numpy.array', 'np.array', (["grp_sinx['concentration'][ct_ds]"], {}), "(grp_sinx['concentration'][ct_ds])\n", (12253, 12287), True, 'import numpy as np\n'), ((12315, 12355), 'numpy.array', 'np.array', (["grp_si['concentration'][ct_ds]"], {}), "(grp_si['concentration'][ct_ds])\n", (12323, 12355), True, 'import numpy as np\n'), ((6673, 6689), 'numpy.arange', 'np.arange', (['(2)', '(10)'], {}), '(2, 10)\n', (6682, 6689), True, 'import numpy as np\n'), ((6916, 6932), 'numpy.arange', 'np.arange', (['(2)', '(10)'], {}), '(2, 10)\n', (6925, 6932), True, 'import numpy as np\n'), ((8421, 8437), 'numpy.arange', 'np.arange', (['(2)', '(10)'], {}), '(2, 10)\n', (8430, 8437), True, 'import numpy as np\n'), ((11162, 11204), 'numpy.array', 'np.array', (["grp_sinx['concentration'][ct_ds]"], {}), "(grp_sinx['concentration'][ct_ds])\n", (11170, 11204), True, 'import numpy as np\n'), ((11236, 11276), 'numpy.array', 'np.array', (["grp_si['concentration'][ct_ds]"], {}), "(grp_si['concentration'][ct_ds])\n", (11244, 11276), True, 'import numpy as np\n'), ((12395, 12425), 'scipy.integrate.simps', 'integrate.simps', (['c_sin', '(-x_sin)'], {}), '(c_sin, -x_sin)\n', (12410, 12425), False, 'from scipy import integrate\n'), ((12482, 12509), 'scipy.integrate.simps', 'integrate.simps', (['c_si', 'x_si'], {}), '(c_si, x_si)\n', (12497, 12509), False, 'from scipy import integrate\n')] |
"""
Created on: 30 June 2019
Investigate stationarity of time series (example of security data), analytics on log-returns
Provide summary statistics on the data
Introduce tests (such as Augmented Dickey Fuller) to check stationarity of time series
Inspiration from: https://www.analyticsvidhya.com/blog/2018/09/non-stationary-time-series-python/
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy.stats import kurtosis, skew
from statsmodels.tsa.stattools import adfuller
from securityAnalysis.utils_finance import calculate_return_df
pd.set_option('display.max_columns', 10)
pd.set_option('display.width', 500)
plt.style.use('seaborn')
def test_stationarity_adf(time_series: np.array) -> None:
"""
Wrapper on adfuller method from statsmodels package, to perform Dickey-Fuller test for
Stationarity
Parameter:
time_series: time series containing non-null values which to perform stationarity test on
Returns
None: Print statement of ['Test Statistic', 'p-value', '# lags', '# observations', and
critical values for alpha 1, 5 and 10%
NOTE:
Test statistic: t
Critical value, c
Null hypothesis, H_0
If t < c, reject H_0 --> time series is stationary
If t > c, fail to reject H_0 --> time series is non-stationary (has some drift with time)
"""
print('Results of Dickey-Fuller Test:')
df_test = adfuller(time_series, autolag='AIC')
df_output = pd.Series(df_test[0:4],
index=['Test Statistic', 'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in df_test[4].items():
df_output['Critical Value (%s)' % key] = value
print(df_output)
def get_aug_dickey_fuller_result(time_series: np.array, alpha: int = 5) -> bool:
"""
Method to perform Augmented Dickey Fuller Test for stationarity on time_series, at a
given level of significance alpha
Parameters:
time_series: 1-D array of time series data to be tested for stationarity
alpha: chosen level of significance, must be one of 1,5 or 10%
Returns:
bool: True if stationary data (t-statistic less than critical value at significance level
alpha, reject H_0), False for non-stationary data
"""
assert alpha in [1, 5, 10], "Choose appropriate alpha significance: [1, 5 or 10%]"
print(f"Performing augmented Dickey Fuller test at significance level alpha: {alpha}")
df_test = adfuller(time_series, autolag='AIC')
test_stats = {
'test_statistic': df_test[0],
'p-values': df_test[4]
}
is_stationary = test_stats['test_statistic'] < test_stats['p-values'][f"{str(alpha)}%"]
return is_stationary
def get_descriptive_stats(data: pd.DataFrame, alpha: float = 0.05) -> dict:
"""Compute descriptive, high level stats (p-values given for two tailed tests),
incuding skewness and kurtosis, specifying alpha (for tests of skewness and kurtosis)
Args:
data: Clean dataframe with no NaNs
alpha: level of significance for the two-tailed test. must lie between 0 and 1
Returns
dict of results for descriptive level statistics
"""
assert 0 < alpha < 1, f"Alpha level of {alpha} is not valid, must lie between 0 and 1"
print("Getting descriptive level stats for dataframe...")
result_df = pd.DataFrame(columns=['Size', 'Mean', 'Std Dev', 'Skewness', 'Excess Kurtosis'])
result_df['Size'] = data.count()
result_df['Mean'] = data.mean()
result_df['Std Dev'] = np.std(data)
result_df['Min'] = np.min(data)
result_df['Max'] = np.max(data)
result_df['Skewness'] = skew(data)
result_df['Skewness t-statistic'] = \
result_df['Skewness'].values / np.sqrt(6 / result_df['Size'].values)
result_df['Skewness p-value'] = 2 * (1 - stats.t.cdf(result_df['Skewness t-statistic'], df=1))
# so, one can reject h_0 (skewness of log returns = 0) for a p-value of less than alpha
skew_h0_title = "Skewness reject H_0 at " + str(100 * alpha) + "% sig level"
skew_h0_values = result_df['Skewness p-value'].values < alpha
result_df['Skewness accept H_0'] = skew_h0_values
result_df.rename(columns={'Skewness accept H_0': skew_h0_title}, inplace=True)
result_df['Excess Kurtosis'] = kurtosis(data) # if high excess kurtosis --> thick tails
result_df['Excess Kurtosis t-statistic'] = \
result_df['Excess Kurtosis'].values / np.sqrt(24 / result_df['Size'].values)
result_df['Excess Kurtosis p-value'] = \
2 * (1 - stats.t.cdf(result_df['Excess Kurtosis t-statistic'], df=1))
kurt_h0_title = f"Kurtosis reject H_0 at {str(100 * alpha)}% sig level"
kurt_h0_values = result_df['Excess Kurtosis p-value'].values < alpha
result_df['Excess Kurtosis accept H_0'] = kurt_h0_values
result_df.rename(columns={'Excess Kurtosis accept H_0': kurt_h0_title}, inplace=True)
adf_results = []
for i in data.columns:
adf_results.append(get_aug_dickey_fuller_result(data.loc[:, i]))
result_df['Aug Dickey-Fuller Test'] = adf_results
result_dict = result_df.T.to_dict()
return result_dict
if __name__ == '__main__':
# real market data
import yfinance
price_series = yfinance.download(tickers='GOOGL', start="2010-01-01")['Adj Close'] # google data
price_df = pd.DataFrame(price_series)
# random data example
import datetime
date_rng = pd.date_range(datetime.datetime.now().strftime("%Y-%m-%d"), periods=500).to_list()
random_returns = pd.Series(np.random.randn(500), index=date_rng)
price_series = random_returns.cumsum()
price_df = pd.DataFrame(price_series)
# run analysis
returns_df = calculate_return_df(data=price_df,
is_relative_return=True)
# # could also look at log returns of the data and see if the time series is stationary
# log_returns_df = calculate_return_df(data=price_df,
# is_log_return=True)
# test for stationarity (using Augmented Dickey Fuller test) for one timeseries
test_stationarity_adf(time_series=price_series)
# augmented dickey fuller result
get_aug_dickey_fuller_result(time_series=price_series)
# more descriptive statistics on skewness, kurtosis, as well as # observations, max, min, mean,
# standard deviation etc
get_descriptive_stats(data=returns_df,
alpha=0.05)
| [
"pandas.DataFrame",
"statsmodels.tsa.stattools.adfuller",
"yfinance.download",
"numpy.random.randn",
"numpy.std",
"datetime.datetime.now",
"scipy.stats.skew",
"matplotlib.pyplot.style.use",
"numpy.min",
"numpy.max",
"pandas.Series",
"scipy.stats.kurtosis",
"numpy.sqrt",
"pandas.set_option"... | [((601, 641), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(10)'], {}), "('display.max_columns', 10)\n", (614, 641), True, 'import pandas as pd\n'), ((642, 677), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(500)'], {}), "('display.width', 500)\n", (655, 677), True, 'import pandas as pd\n'), ((678, 702), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (691, 702), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1499), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['time_series'], {'autolag': '"""AIC"""'}), "(time_series, autolag='AIC')\n", (1471, 1499), False, 'from statsmodels.tsa.stattools import adfuller\n'), ((1516, 1625), 'pandas.Series', 'pd.Series', (['df_test[0:4]'], {'index': "['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used']"}), "(df_test[0:4], index=['Test Statistic', 'p-value', '#Lags Used',\n 'Number of Observations Used'])\n", (1525, 1625), True, 'import pandas as pd\n'), ((2557, 2593), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['time_series'], {'autolag': '"""AIC"""'}), "(time_series, autolag='AIC')\n", (2565, 2593), False, 'from statsmodels.tsa.stattools import adfuller\n'), ((3447, 3532), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Size', 'Mean', 'Std Dev', 'Skewness', 'Excess Kurtosis']"}), "(columns=['Size', 'Mean', 'Std Dev', 'Skewness', 'Excess Kurtosis']\n )\n", (3459, 3532), True, 'import pandas as pd\n'), ((3629, 3641), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (3635, 3641), True, 'import numpy as np\n'), ((3665, 3677), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (3671, 3677), True, 'import numpy as np\n'), ((3701, 3713), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (3707, 3713), True, 'import numpy as np\n'), ((3743, 3753), 'scipy.stats.skew', 'skew', (['data'], {}), '(data)\n', (3747, 3753), False, 'from scipy.stats import kurtosis, skew\n'), ((4384, 4398), 'scipy.stats.kurtosis', 'kurtosis', (['data'], {}), '(data)\n', (4392, 4398), False, 'from scipy.stats import kurtosis, skew\n'), ((5428, 5454), 'pandas.DataFrame', 'pd.DataFrame', (['price_series'], {}), '(price_series)\n', (5440, 5454), True, 'import pandas as pd\n'), ((5727, 5753), 'pandas.DataFrame', 'pd.DataFrame', (['price_series'], {}), '(price_series)\n', (5739, 5753), True, 'import pandas as pd\n'), ((5791, 5850), 'securityAnalysis.utils_finance.calculate_return_df', 'calculate_return_df', ([], {'data': 'price_df', 'is_relative_return': '(True)'}), '(data=price_df, is_relative_return=True)\n', (5810, 5850), False, 'from securityAnalysis.utils_finance import calculate_return_df\n'), ((3835, 3872), 'numpy.sqrt', 'np.sqrt', (["(6 / result_df['Size'].values)"], {}), "(6 / result_df['Size'].values)\n", (3842, 3872), True, 'import numpy as np\n'), ((4537, 4575), 'numpy.sqrt', 'np.sqrt', (["(24 / result_df['Size'].values)"], {}), "(24 / result_df['Size'].values)\n", (4544, 4575), True, 'import numpy as np\n'), ((5331, 5385), 'yfinance.download', 'yfinance.download', ([], {'tickers': '"""GOOGL"""', 'start': '"""2010-01-01"""'}), "(tickers='GOOGL', start='2010-01-01')\n", (5348, 5385), False, 'import yfinance\n'), ((5631, 5651), 'numpy.random.randn', 'np.random.randn', (['(500)'], {}), '(500)\n', (5646, 5651), True, 'import numpy as np\n'), ((3918, 3970), 'scipy.stats.t.cdf', 'stats.t.cdf', (["result_df['Skewness t-statistic']"], {'df': '(1)'}), "(result_df['Skewness t-statistic'], df=1)\n", (3929, 3970), True, 'import scipy.stats as stats\n'), ((4638, 4697), 'scipy.stats.t.cdf', 'stats.t.cdf', (["result_df['Excess Kurtosis t-statistic']"], {'df': '(1)'}), "(result_df['Excess Kurtosis t-statistic'], df=1)\n", (4649, 4697), True, 'import scipy.stats as stats\n'), ((5531, 5554), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5552, 5554), False, 'import datetime\n')] |
import math
import numpy as np
#-------------------------------------------------------------------------
'''
Problem 2: Multi-armed bandit problem
In this problem, you will implement an AI player for Multi-armed bandit problem using UCB (Upper Confidence Bound).
The main goal of this problem is to get familiar with a simplified problem in reinforcement learning, and how to train the model parameters on the data from a game.
You could test the correctness of your code by typing `nosetests test1.py` in the terminal.
'''
#-------------------------------------------------------
class Bandit:
'''Bandit is the Multi-armed bandit machine. Instead of one slot machine lever, you have a number of arms. Each lever/arm corresponds to a probability of winning. However these odds/probabilities are hidden from the players. '''
# ----------------------------------------------
def __init__(self, p):
''' Initialize the game.
Inputs:
p: the vector of winning probabilities, a numpy vector of length n.
Here n is the number of arms of the bandit.
Outputs:
self.p: the vector of winning probabilities, a numpy vector of length n.
'''
self.p = p
# ----------------------------------------------
def play(self, a):
'''
Given an action (the id of the arm being pulled), return the reward based upon the winning probability of the arm.
Input:
a: the index of the lever being pulled by the agent. a is an integer scalar between 0 and n-1.
n is the number of arms in the bandit.
Output:
r: the reward returned to the agent, a float scalar. The "win" return 1., if "lose", return 0. as the reward.
The winning probabilty of this step should be the same as that of the lever being pulled by the agent.
'''
p = self.p[a]
r = np.random.choice([0.,1.], 1, p=[1.-p,p])
return r
#-------------------------------------------------------
class UCBplayer:
'''The agent is trying to maximize the sum of rewards (payoff) in the game using UCB (Upper Confidence Bound).
The agent will
(1) choose the lever with the largest bound value, (index of the arm is a tie-breaker);
(2) update the statistics of each arm after getting the result of a game.'''
# ----------------------------------------------
def __init__(self, n,c=1.142):
''' Initialize the agent.
Inputs:
n: the number of arms of the bandit, an integer scalar.
c: exploration parameter, a float scalar
Outputs:
self.n: the number of levers, an integer scalar.
self.c: exploration parameter, a float scalar.
self.ni: the number of simultions choosing the i-th arm, an integer vector of length n.
self.N: total number of simulations, an integer scalar
self.w: the sum of game results after choosing each arm, a float vector of length n.
w[i] represents the sum of scores achieved by pulling the i-th arm.
'''
self.n = n
self.c = c
self.ni =np.zeros(n)
self.w =np.zeros(n)
self.N = 0
# ----------------------------------------------
@staticmethod
def UCB(wi,ni,N,c=1.142):
'''
compute UCB (Upper Confidence Bound) of a child node (say the i-th child node).
the average payoffs of the current node vi = wi/ni
Inputs:
wi: the sum of game results after choosing the i-th child node, an integer scalar
ni: the number of simultions choosing the i-th child node, an integer scalar
N: total number of simulations for the parent node
c: exploration parameter
Outputs:
b: the UCB score of the node, a float scalar.
'''
#########################################
## INSERT YOUR CODE HERE
#########################################
return b
# ----------------------------------------------
def policy(self):
'''
The policy function of the agent.
The agent will choose the lever with the largest bound value, (when there is a tie, use index of the arms as tie-breaker);
Output:
a: the index of the lever to pull. a is an integer scalar between 0 and n-1.
'''
#########################################
## INSERT YOUR CODE HERE
#########################################
return a
#-----------------------------------------------------------------
def update(self, a,r):
'''
Update the parameters of the player after collecting one game result.
(1) increase the count of the lever and total count.
(2) update the sum of reward based upon the received reward r.
Input:
a: the index of the arm being pulled. a is an integer scalar between 0 and n-1.
r: the reward returned, a float scalar.
'''
#########################################
## INSERT YOUR CODE HERE
#########################################
#-----------------------------------------------------------------
def play(self, g, n_steps=1000):
'''
Play the game for n_steps steps. In each step,
(1) pull a lever and receive the reward from the game
(2) update the parameters
Input:
g: the game machine, a multi-armed bandit object.
n_steps: number of steps to play in the game, an integer scalar.
Note: please do NOT use g.p in this function, which is hidden from the player. The player can only call the g.play() function.
'''
#########################################
## INSERT YOUR CODE HERE
# run n_steps iterations
# take an action
# run the game to collect the result
# update statistics
#########################################
| [
"numpy.zeros",
"numpy.random.choice"
] | [((1996, 2043), 'numpy.random.choice', 'np.random.choice', (['[0.0, 1.0]', '(1)'], {'p': '[1.0 - p, p]'}), '([0.0, 1.0], 1, p=[1.0 - p, p])\n', (2012, 2043), True, 'import numpy as np\n'), ((3328, 3339), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3336, 3339), True, 'import numpy as np\n'), ((3356, 3367), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3364, 3367), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""A script to run a set of gaussian process experiments under differing configurations."""
import argparse
import json
from multiprocessing.pool import Pool
import numpy as np
from experiments import setup_experiment
EXPERIMENTS = {
'airline': setup_experiment.airline_experiment,
'boston': setup_experiment.boston_experiment,
'wisconsin': setup_experiment.wisconsin_experiment,
'mining': setup_experiment.mining_experiment,
'usps': setup_experiment.usps_experiment,
'abalone': setup_experiment.abalone_experiment,
'creep': setup_experiment.creep_experiment,
'mnist': setup_experiment.mnist_experiment,
'mnist8m': setup_experiment.mnist8m_experiment,
'mnist_binary': setup_experiment.mnist_binary_experiment,
'mnist_binary_inducing': setup_experiment.mnist_binary_inducing_experiment,
'sarcos': setup_experiment.sarcos_experiment,
'sarcos_inducing': setup_experiment.sarcos_inducing_experiment,
'sarcos_all_joints': setup_experiment.sarcos_all_joints_experiment,
'seismic': setup_experiment.seismic_experiment,
}
METHODS = ['diag', 'full']
def main():
"""Run the experiments requested by the user."""
args = setup_args()
np.random.seed(1)
if 'file' in args:
# Run multiple experiments from a configuration file.
with open(args['file']) as config_file:
config = json.loads(config_file.read())
run_parallel(**config)
elif 'experiment_name' in args:
# Run a single experiment from command line arguments.
experiment = EXPERIMENTS[args['experiment_name']]
del args['experiment_name']
if args['method'] == 'full' and args['components'] != 1:
print('Only one components allowed for full Gaussian posterior.')
else:
experiment(**args)
else:
print('You must either chose an experiment (-e) or a config file (-f).')
def setup_args():
"""Get the commandline arguments and return them in a dictionary."""
parser = argparse.ArgumentParser(description='Experimental framework for savigp.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--experiment_name', choices=EXPERIMENTS, default=argparse.SUPPRESS,
help='The name of the experiment to run.')
parser.add_argument('-f', '--file', default=argparse.SUPPRESS,
help='A json file containing a list of experiment configurations to run.')
parser.add_argument('-m', '--method', choices=METHODS, default=METHODS[0],
help='The type of mixture of gaussians to learn.')
parser.add_argument('-c', '--components', type=int, default=1,
help='The number of components to use in the mixture of Gaussians.')
parser.add_argument('-s', '--sparsity_factor', type=float, default=1.0,
help='The sparsity of inducing points. Value must be between 0 and 1.')
parser.add_argument('-r', '--run_id', type=int, default=1,
help='The id of the experiment configuration.')
parser.add_argument('-i', '--image', default=argparse.SUPPRESS,
help='A path to a partially completed large scale experiment')
parser.add_argument('-n', '--n_threads', type=int, default=argparse.SUPPRESS,
help='The number of threads to run for a large scale experiment.')
parser.add_argument('-p', '--partition_size', type=int, default=argparse.SUPPRESS,
help='The size of sample partitions for a large scale experiment.')
parser.add_argument('-o', '--optimize_stochastic', action='store_true',
help='Whether to optimize the model stochastically.')
parser.add_argument('-t', '--max_iter', type=int, default=argparse.SUPPRESS,
help='The maximum number of global iterations.')
parser.add_argument('-l', '--num_samples', type=int, default=argparse.SUPPRESS,
help='Number of samples for Monte Carlo Estimation.')
return vars(parser.parse_args())
def run_parallel(num_processes, experiment_names, methods, sparsity_factors, run_ids):
"""
Run multiple experiments in parallel.
Parameters
----------
num_processes : int
The maximum number of processes that can run concurrently.
experiment_names : list of str
The names of experiments to run.
methods : list of str
The methods to run the experiments under (mix1, mix2, or full).
sparsity_factors : list of float
The sparsity of inducing points to run the experiments at.
run_ids : list of int
The ids of the configurations under which to run the experiments.
"""
# Setup an array of individual experiment configurations.
experiment_configs = []
for experiment in experiment_names:
for method in methods:
for sparsity_factor in sparsity_factors:
for run_id in run_ids:
experiment_configs.append({'experiment_name': experiment,
'method': method,
'sparsity_factor': sparsity_factor,
'run_id': run_id})
# Now run the experiments.
pool = Pool(num_processes)
pool.map(run_config, experiment_configs)
def run_config(config):
"""Runs an experiment under the given configuration."""
experiment = EXPERIMENTS[config['experiment_name']]
del config['experiment_name']
experiment(**config)
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"multiprocessing.pool.Pool"
] | [((1223, 1240), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1237, 1240), True, 'import numpy as np\n'), ((2040, 2173), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Experimental framework for savigp."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Experimental framework for savigp.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (2063, 2173), False, 'import argparse\n'), ((5412, 5431), 'multiprocessing.pool.Pool', 'Pool', (['num_processes'], {}), '(num_processes)\n', (5416, 5431), False, 'from multiprocessing.pool import Pool\n')] |
"""
The Tornado Framework
By <NAME>
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import copy
import random
import numpy
from pympler import asizeof
from archiver.archiver import Archiver
from evaluators.classifier_evaluator import PredictionEvaluator
from evaluators.detector_evaluator import DriftDetectionEvaluator
from plotter.performance_plotter import *
from filters.attribute_handlers import *
from streams.readers.arff_reader import *
class PrequentialDriftEvaluator:
"""This class lets one run a classifier with a drift detector against a data stream,
and evaluate it prequentially over time. Also, one is able to measure the detection
false positive as well as false negative rates."""
def __init__(self, learner, drift_detector, attributes, attributes_scheme,
actual_drift_points, drift_acceptance_interval, project, memory_check_step=-1):
self.learner = learner
self.drift_detector = drift_detector
self.__instance_counter = 0
self.__num_rubbish = 0
self.__learner_error_rate_array = []
self.__learner_memory_usage = []
self.__learner_runtime = []
self.__actual_drift_points = actual_drift_points
self.__drift_acceptance_interval = drift_acceptance_interval
self.__located_drift_points = []
self.__drift_points_boolean = []
self.__drift_detection_memory_usage = []
self.__drift_detection_runtime = []
self.__attributes = attributes
self.__numeric_attribute_scheme = attributes_scheme['numeric']
self.__nominal_attribute_scheme = attributes_scheme['nominal']
self.__project_path = project.get_path()
self.__project_name = project.get_name()
self.__memory_check_step = memory_check_step
def run(self, stream, random_seed=1):
random.seed(random_seed)
for record in stream:
self.__instance_counter += 1
percentage = (self.__instance_counter / len(stream)) * 100
print("%0.2f" % percentage + "% of instances are prequentially processed!", end="\r")
if record.__contains__("?"):
self.__num_rubbish += 1
continue
# ---------------------
# Data Transformation
# ---------------------
r = copy.copy(record)
for k in range(0, len(r) - 1):
if self.learner.LEARNER_CATEGORY == TornadoDic.NOM_CLASSIFIER and self.__attributes[k].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
r[k] = Discretizer.find_bin(r[k], self.__nominal_attribute_scheme[k])
elif self.learner.LEARNER_CATEGORY == TornadoDic.NUM_CLASSIFIER and self.__attributes[k].TYPE == TornadoDic.NOMINAL_ATTRIBUTE:
r[k] = NominalToNumericTransformer.map_attribute_value(r[k], self.__numeric_attribute_scheme[k])
# NORMALIZING NUMERIC DATA
if self.learner.LEARNER_CATEGORY == TornadoDic.NUM_CLASSIFIER:
r[0:len(r) - 1] = Normalizer.normalize(r[0:len(r) - 1], self.__numeric_attribute_scheme)
# ----------------------
# Prequential Learning
# ----------------------
if self.learner.is_ready():
real_class = r[len(r) - 1]
predicted_class = self.learner.do_testing(r)
prediction_status = True
if real_class != predicted_class:
prediction_status = False
# -----------------------
# Drift Detected?
# -----------------------
warning_status, drift_status = self.drift_detector.detect(prediction_status)
if drift_status:
self.__drift_points_boolean.append(1)
self.__located_drift_points.append(self.__instance_counter)
print("\n ->>> " + self.learner.LEARNER_NAME.title() + " faced a drift at instance " +
str(self.__instance_counter) + ".")
print("%0.2f" % percentage, " of instances are prequentially processed!", end="\r")
learner_error_rate = PredictionEvaluator.calculate(TornadoDic.ERROR_RATE,
self.learner.get_global_confusion_matrix())
self.__learner_error_rate_array.append(round(learner_error_rate, 4))
self.__learner_memory_usage.append(asizeof.asizeof(self.learner, limit=20))
self.__learner_runtime.append(self.learner.get_running_time())
self.__drift_detection_memory_usage.append(asizeof.asizeof(self.drift_detector, limit=20))
self.__drift_detection_runtime.append(self.drift_detector.RUNTIME)
self.learner.reset()
self.drift_detector.reset()
continue
if self.learner.LEARNER_TYPE == TornadoDic.TRAINABLE:
self.learner.do_training(r)
else:
self.learner.do_loading(r)
else:
if self.learner.LEARNER_TYPE == TornadoDic.TRAINABLE:
self.learner.do_training(r)
else:
self.learner.do_loading(r)
self.learner.set_ready()
self.learner.update_confusion_matrix(r[len(r) - 1], r[len(r) - 1])
learner_error_rate = PredictionEvaluator.calculate(TornadoDic.ERROR_RATE,
self.learner.get_confusion_matrix())
learner_error_rate = round(learner_error_rate, 4)
self.__learner_error_rate_array.append(learner_error_rate)
if self.__memory_check_step != -1:
if self.__instance_counter % self.__memory_check_step == 0:
self.__drift_detection_memory_usage.append(asizeof.asizeof(self.drift_detector, limit=20))
self.__drift_points_boolean.append(0)
print("\n" + "The stream is completely processed.")
self.__store_stats()
self.__plot()
print("\n\r" + "THE END!")
print("\a")
def __store_stats(self):
learner_name = TornadoDic.get_short_names(self.learner.LEARNER_NAME)
detector_name = self.drift_detector.DETECTOR_NAME
detector_setting = self.drift_detector.get_settings()
file_name = learner_name + "_" + detector_name + "." + detector_setting[0]
st_wr = open(self.__project_path + file_name.lower() + ".txt", "w")
lrn_error_rate = PredictionEvaluator.calculate_error_rate(self.learner.get_global_confusion_matrix())
dl, tp, fp, fn = DriftDetectionEvaluator.calculate_dl_tp_fp_fn(self.__located_drift_points,
self.__actual_drift_points,
self.__drift_acceptance_interval)
if len(self.__located_drift_points) != 0:
# learner stats
lrn_mem = numpy.mean(self.__learner_memory_usage)
lrn_ave_runtime = numpy.mean(self.__learner_runtime)
lrn_total_runtime = self.learner.get_total_running_time()
# ddm stats
ddm_mem = numpy.mean(self.__drift_detection_memory_usage)
ddm_avg_runtime = numpy.mean(self.__drift_detection_runtime)
ddm_total_runtime = self.drift_detector.TOTAL_RUNTIME
else:
lrn_mem = asizeof.asizeof(self.learner, limit=20)
lrn_ave_runtime = self.learner.get_total_running_time()
lrn_total_runtime = lrn_ave_runtime
ddm_mem = asizeof.asizeof(self.drift_detector, limit=20)
ddm_avg_runtime = self.drift_detector.TOTAL_RUNTIME
ddm_total_runtime = ddm_avg_runtime
stats = learner_name + " + " + detector_name + ": " + "\n\t" + \
"Classifier Error-rate: " + "%0.2f" % (100 * lrn_error_rate) + "%" + "," + "\n\t" + \
"Classifier Average Memory Usage (bytes): " + "%0.2f" % lrn_mem + "," + "\n\t" + \
"Classifier Average Runtime (ms): " + "%0.2f" % lrn_ave_runtime + "," + "\n\t" + \
"Classifier Total Runtime (ms): " + "%0.2f" % lrn_total_runtime + "," + "\n\t" + \
"Detection Delay: " + "%0.2f" % dl + " TP: " + str(tp) + " FP: " + str(fp) + " FN: " + str(fn) + "," + "\n\t" + \
"Average Detection Memory Usage (bytes): " + "%0.2f" % ddm_mem + "," + "\n\t" + \
"Average Detection Runtime (ms): " + "%0.2f" % ddm_avg_runtime + "," + "\n\t" + \
"Total Detection Runtime (ms): " + "%0.2f" % ddm_total_runtime + "," + "\n\t" + \
"Drift Points detected: " + str(self.__located_drift_points)
print(stats)
st_wr.write(stats)
st_wr.close()
def __plot(self):
learner_name = TornadoDic.get_short_names(self.learner.LEARNER_NAME)
detector_name = self.drift_detector.DETECTOR_NAME
detector_setting = self.drift_detector.get_settings()
file_name = learner_name + "_" + detector_name + "." + detector_setting[0]
up_range = numpy.max(self.__learner_error_rate_array)
up_range = 1 if up_range > 0.75 else round(up_range, 1) + 0.25
pair_name = learner_name + ' + ' + detector_name + "(" + detector_setting[1] + ")"
Plotter.plot_single(pair_name, self.__learner_error_rate_array, "Error-rate",
self.__project_name, self.__project_path, file_name, [0, up_range], 'upper right', 200)
Archiver.archive_single(pair_name, self.__learner_error_rate_array,
self.__project_path, self.__project_name, 'Error-rate')
Plotter.plot_single_ddm_points(pair_name, self.__drift_points_boolean,
self.__project_name, self.__project_path, file_name)
| [
"copy.copy",
"numpy.max",
"numpy.mean",
"evaluators.detector_evaluator.DriftDetectionEvaluator.calculate_dl_tp_fp_fn",
"random.seed",
"archiver.archiver.Archiver.archive_single",
"pympler.asizeof.asizeof"
] | [((1972, 1996), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (1983, 1996), False, 'import random\n'), ((7008, 7148), 'evaluators.detector_evaluator.DriftDetectionEvaluator.calculate_dl_tp_fp_fn', 'DriftDetectionEvaluator.calculate_dl_tp_fp_fn', (['self.__located_drift_points', 'self.__actual_drift_points', 'self.__drift_acceptance_interval'], {}), '(self.__located_drift_points,\n self.__actual_drift_points, self.__drift_acceptance_interval)\n', (7053, 7148), False, 'from evaluators.detector_evaluator import DriftDetectionEvaluator\n'), ((9581, 9623), 'numpy.max', 'numpy.max', (['self.__learner_error_rate_array'], {}), '(self.__learner_error_rate_array)\n', (9590, 9623), False, 'import numpy\n'), ((10003, 10131), 'archiver.archiver.Archiver.archive_single', 'Archiver.archive_single', (['pair_name', 'self.__learner_error_rate_array', 'self.__project_path', 'self.__project_name', '"""Error-rate"""'], {}), "(pair_name, self.__learner_error_rate_array, self.\n __project_path, self.__project_name, 'Error-rate')\n", (10026, 10131), False, 'from archiver.archiver import Archiver\n'), ((2487, 2504), 'copy.copy', 'copy.copy', (['record'], {}), '(record)\n', (2496, 2504), False, 'import copy\n'), ((7394, 7433), 'numpy.mean', 'numpy.mean', (['self.__learner_memory_usage'], {}), '(self.__learner_memory_usage)\n', (7404, 7433), False, 'import numpy\n'), ((7465, 7499), 'numpy.mean', 'numpy.mean', (['self.__learner_runtime'], {}), '(self.__learner_runtime)\n', (7475, 7499), False, 'import numpy\n'), ((7619, 7666), 'numpy.mean', 'numpy.mean', (['self.__drift_detection_memory_usage'], {}), '(self.__drift_detection_memory_usage)\n', (7629, 7666), False, 'import numpy\n'), ((7698, 7740), 'numpy.mean', 'numpy.mean', (['self.__drift_detection_runtime'], {}), '(self.__drift_detection_runtime)\n', (7708, 7740), False, 'import numpy\n'), ((7846, 7885), 'pympler.asizeof.asizeof', 'asizeof.asizeof', (['self.learner'], {'limit': '(20)'}), '(self.learner, limit=20)\n', (7861, 7885), False, 'from pympler import asizeof\n'), ((8027, 8073), 'pympler.asizeof.asizeof', 'asizeof.asizeof', (['self.drift_detector'], {'limit': '(20)'}), '(self.drift_detector, limit=20)\n', (8042, 8073), False, 'from pympler import asizeof\n'), ((4699, 4738), 'pympler.asizeof.asizeof', 'asizeof.asizeof', (['self.learner'], {'limit': '(20)'}), '(self.learner, limit=20)\n', (4714, 4738), False, 'from pympler import asizeof\n'), ((4890, 4936), 'pympler.asizeof.asizeof', 'asizeof.asizeof', (['self.drift_detector'], {'limit': '(20)'}), '(self.drift_detector, limit=20)\n', (4905, 4936), False, 'from pympler import asizeof\n'), ((6198, 6244), 'pympler.asizeof.asizeof', 'asizeof.asizeof', (['self.drift_detector'], {'limit': '(20)'}), '(self.drift_detector, limit=20)\n', (6213, 6244), False, 'from pympler import asizeof\n')] |
import numpy as np
from solver import mmica
def whitening(Y, mode='sph'):
'''
Whitens the data Y using sphering or pca
'''
R = np.dot(Y, Y.T) / Y.shape[1]
U, D, _ = np.linalg.svd(R)
if mode == 'pca':
W = U.T / np.sqrt(D)[:, None]
Z = np.dot(W, Y)
elif mode == 'sph':
W = np.dot(U, U.T / np.sqrt(D)[:, None])
Z = np.dot(W, Y)
return Z, W
print('''
Majorization-minimization ICA example!
''')
# Fix a seed
rng = np.random.RandomState(3)
# Generate some super-Gaussian sources :
print('Generating mixed signals...')
n_sources, n_samples = 3, 10000
S = rng.laplace(size=(n_sources, n_samples))
# Mix the signals :
A = rng.randn(n_sources, n_sources)
X = np.dot(A, S)
# Whiten the observed signals :
print('Whitening the signals...')
X_white, W_white = whitening(X)
# Apply MM-ICA:
print('Running MM-ICA...')
W = mmica(X_white, max_iter=101, verbose=True)
print('Done!')
# Check that the mixing matrix is recovered :
print('''The product of the estimated unmixing matrix and the true mixing
matrix is : ''')
print(np.dot(W, np.dot(W_white, A)))
| [
"numpy.random.RandomState",
"solver.mmica",
"numpy.linalg.svd",
"numpy.dot",
"numpy.sqrt"
] | [((480, 504), 'numpy.random.RandomState', 'np.random.RandomState', (['(3)'], {}), '(3)\n', (501, 504), True, 'import numpy as np\n'), ((722, 734), 'numpy.dot', 'np.dot', (['A', 'S'], {}), '(A, S)\n', (728, 734), True, 'import numpy as np\n'), ((882, 924), 'solver.mmica', 'mmica', (['X_white'], {'max_iter': '(101)', 'verbose': '(True)'}), '(X_white, max_iter=101, verbose=True)\n', (887, 924), False, 'from solver import mmica\n'), ((187, 203), 'numpy.linalg.svd', 'np.linalg.svd', (['R'], {}), '(R)\n', (200, 203), True, 'import numpy as np\n'), ((145, 159), 'numpy.dot', 'np.dot', (['Y', 'Y.T'], {}), '(Y, Y.T)\n', (151, 159), True, 'import numpy as np\n'), ((276, 288), 'numpy.dot', 'np.dot', (['W', 'Y'], {}), '(W, Y)\n', (282, 288), True, 'import numpy as np\n'), ((1095, 1113), 'numpy.dot', 'np.dot', (['W_white', 'A'], {}), '(W_white, A)\n', (1101, 1113), True, 'import numpy as np\n'), ((374, 386), 'numpy.dot', 'np.dot', (['W', 'Y'], {}), '(W, Y)\n', (380, 386), True, 'import numpy as np\n'), ((244, 254), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (251, 254), True, 'import numpy as np\n'), ((341, 351), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (348, 351), True, 'import numpy as np\n')] |
# -*-coding:utf-8-*-
import numpy as np
import pickle
import cv2
import os
import tarfile
import sys
import glob
import math
import tensorflow as tf
from six.moves import urllib
'''
for data reading and data augmentation
'''
def generate_vali_batch(vali_data, vali_label, vali_batch_size):
offset = np.random.choice(10000 - vali_batch_size, 1)[0]
vali_data_batch = vali_data[offset:offset + vali_batch_size, ...]
vali_label_batch = vali_label[offset:offset + vali_batch_size]
return vali_data_batch, vali_label_batch
def generate_augment_train_batch(train_data, train_labels, config):
train_batch_size = config.batch_size
if config.dataset == 'cifar10':
offset = np.random.choice(50000 - train_batch_size, 1)[0]
batch_data = train_data[offset:offset + train_batch_size, ...]
batch_data = random_crop_and_flip(batch_data, config)
# batch_data = whitening_image(batch_data, config)
batch_label = train_labels[offset:offset + train_batch_size]
elif config.dataset == 'captcha':
indices = np.random.choice(len(train_labels), train_batch_size)
batch_data = train_data[indices]
batch_label = train_labels[indices]
elif config.dataset == 'easy':
indices = np.random.choice(len(train_labels), train_batch_size)
batch_data = train_data[indices]
batch_label = train_labels[indices]
return batch_data, batch_label
def horizontal_flip(image, axis):
flip_prop = np.random.randint(low=0, high=2)
if flip_prop == 0:
# careful !!! todo: this change the RGB???
image = cv2.flip(image, axis)
return image
def random_crop_and_flip(batch_data, config):
padding_size = config.aug_padding
IMG_HEIGHT = config.input_size_h
IMG_WIDTH = config.input_size_w
IMG_DEPTH = config.input_size_d
# pad_width = ((0, 0), (padding_size, padding_size), (padding_size, padding_size), (0, 0))
# batch_data = np.pad(batch_data, pad_width=pad_width, mode='constant', constant_values=0)
cropped_batch = np.zeros(len(batch_data) * IMG_HEIGHT * IMG_WIDTH * IMG_DEPTH).reshape(
len(batch_data), IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH)
for i in range(len(batch_data)):
x_offset = np.random.randint(low=0, high=2 * padding_size, size=1)[0]
y_offset = np.random.randint(low=0, high=2 * padding_size, size=1)[0]
cropped_batch[i, ...] = batch_data[i, ...][x_offset:x_offset+IMG_HEIGHT, y_offset: y_offset+IMG_WIDTH, :]
cropped_batch[i, ...] = horizontal_flip(image=cropped_batch[i, ...], axis=1)
return cropped_batch
def maybe_download_and_extract_cifar10():
'''
Will download and extract the cifar10 data automatically
:return: nothing
'''
dest_directory = 'datasets/cifar10'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size)
/ float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def read_train_data(config_dict=None):
path_list = []
if config_dict.dataset == 'cifar10':
maybe_download_and_extract_cifar10()
NUM_TRAIN_BATCH = 5
for i in range(1, NUM_TRAIN_BATCH + 1):
path_list.append(config_dict.data_path + 'cifar-10-batches-py/data_batch_'+ str(i))
data, label = read_images(config_dict, path_list, shuffle=True, is_random_label=False)
pad_width = ((0, 0), (config_dict.aug_padding, config_dict.aug_padding), (config_dict.aug_padding, config_dict.aug_padding), (0, 0))
data = np.pad(data, pad_width=pad_width, mode='constant', constant_values=0)
elif config_dict.dataset == 'captcha':
if not os.path.exists(config_dict.data_path):
raise ValueError('images_path is not exist.')
images = []
labels = []
images_path = os.path.join(config_dict.data_path, '*.jpg')
count = 0
for image_file in glob.glob(images_path):
count += 1
if count % 1000 == 0:
print('Load {} images.'.format(count))
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Assume the name of each image is imagexxx_label.jpg
label = int(image_file.split('_')[-1].split('.')[0])
images.append(image)
labels.append(label)
data = np.array(images)
label = np.array(labels)
elif config_dict.dataset == 'easy':
if not os.path.exists(config_dict.data_path):
raise ValueError('images_path is not exist.')
images = []
labels = []
images_path = os.path.join(config_dict.data_path, '*.jpg')
count = 0
for image_file in glob.glob(images_path):
count += 1
if count % 100 == 0:
print('Load {} images.'.format(count))
image = cv2.imread(image_file)
image = cv2.resize(image, (config_dict.input_resize_w,config_dict.input_resize_h))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Assume the name of each image is imagexxx_label.jpg
label = int(ord(image_file.split('_')[-3].split('/')[-1])-65)
images.append(image)
labels.append(label)
data = np.array(images)
label = np.array(labels)
return data, label
def read_validation_data(config_dict=None):
path_list = []
if config_dict.dataset == 'cifar10':
path_list.append(config_dict.data_path + 'cifar-10-batches-py/test_batch')
validation_array, validation_labels = read_images(config_dict, path_list, is_random_label=False)
validation_array = whitening_image(validation_array, config_dict)
return validation_array, validation_labels
def whitening_image(image_np, config_dict):
for i in range(len(image_np)):
mean = np.mean(image_np[i, ...])
# Use adjusted standard deviation here, in case the std == 0.
IMG_HEIGHT = config_dict.input_size_h
IMG_WIDTH = config_dict.input_size_w
IMG_DEPTH = config_dict.input_size_d
std = np.max([np.std(image_np[i, ...]), 1.0/np.sqrt(IMG_HEIGHT * IMG_WIDTH * IMG_DEPTH)])
image_np[i,...] = (image_np[i, ...] - mean) / std
return image_np
# only for cifar10
def read_images(config_dict, address_list, shuffle=True, is_random_label=False):
data = np.array([]).reshape([0, config_dict.input_size_w*config_dict.input_size_h*config_dict.input_size_d])
label = np.array([])
for address in address_list:
print('Reading images from ' + address)
batch_data, batch_label = _read_one_batch_cifar10(address, is_random_label)
# Concatenate along axis 0 by default
data = np.concatenate((data, batch_data))
label = np.concatenate((label, batch_label))
num_data = len(label)
IMG_HEIGHT = config_dict.input_size_h
IMG_WIDTH = config_dict.input_size_w
IMG_DEPTH = config_dict.input_size_d
data = data.reshape((num_data, IMG_HEIGHT * IMG_WIDTH, IMG_DEPTH), order='F')
data = data.reshape((num_data, IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH))
if shuffle is True:
print('Shuffling')
order = np.random.permutation(num_data)
data = data[order, ...]
label = label[order]
data = data.astype(np.float32)
return data, label
def _read_one_batch_cifar10(path, is_random_label):
fo = open(path, 'rb')
# python3在读文件时需要指定编码
dicts = pickle.load(fo, encoding='iso-8859-1')
fo.close()
data = dicts['data']
if is_random_label is False:
label = np.array(dicts['labels'])
else:
labels = np.random.randint(low=0, high=10, size=10000)
label = np.array(labels)
return data, label
'''
***************************************************************************************
'''
def _random_rotate(image, rotate_prob=0.5, rotate_angle_max=30,
interpolation='BILINEAR'):
"""Rotates the given image using the provided angle.
Args:
image: An image of shape [height, width, channels].
rotate_prob: The probability to roate.
rotate_angle_angle: The upper bound of angle to ratoted.
interpolation: One of 'BILINEAR' or 'NEAREST'.(双线性插值和最邻近插值)
Returns:
The rotated image.
"""
def _rotate():
rotate_angle = tf.random_uniform([], minval=-rotate_angle_max,
maxval=rotate_angle_max,
dtype=tf.float32)
rotate_angle = tf.div(tf.multiply(rotate_angle, math.pi), 180.)
rotated_image = tf.contrib.image.rotate([image], [rotate_angle],
interpolation=interpolation)
return tf.squeeze(rotated_image)
rand = tf.random_uniform([], minval=0, maxval=1)
return tf.cond(tf.greater(rand, rotate_prob), lambda: image, _rotate)
def _border_expand(image, mode='CONSTANT', constant_values=255):
"""Expands the given image.
Args:
Args:
image: A 3-D image `Tensor`.
output_height: The height of the image after Expanding.
output_width: The width of the image after Expanding.
resize: A boolean indicating whether to resize the expanded image
to [output_height, output_width, channels] or not.
Returns:
expanded_image: A 3-D tensor containing the resized image.
"""
# todo: 这种闭包形式的用法
shape = tf.shape(image)
height = shape[0]
width = shape[1]
def _pad_left_right():
pad_left = tf.floordiv(height - width, 2)
pad_right = height - width - pad_left
return [[0, 0], [pad_left, pad_right], [0, 0]]
def _pad_top_bottom():
pad_top = tf.floordiv(width - height, 2)
pad_bottom = width - height - pad_top
return [[pad_top, pad_bottom], [0, 0], [0, 0]]
paddings = tf.cond(tf.greater(height, width),
_pad_left_right,
_pad_top_bottom)
# expanding want to make w=h
expanded_image = tf.pad(image, paddings, mode=mode,
constant_values=constant_values)
return expanded_image
def _smallest_size_at_least(height, width, smallest_side):
# 以给定的最短边并保持原图像横纵比计算新的w h
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
# todo: learn to use lambda
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(tf.rint(height * scale))
new_width = tf.to_int32(tf.rint(width * scale))
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
# 保持原横纵比resize图像
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def _fixed_sides_resize(image, output_height, output_width):
"""Resize images by fixed sides.
Args:
image: A 3-D image `Tensor`.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
output_height = tf.convert_to_tensor(output_height, dtype=tf.int32)
output_width = tf.convert_to_tensor(output_width, dtype=tf.int32)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_nearest_neighbor(
image, [output_height, output_width], align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
with tf.control_dependencies(asserts):
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
# todo: where the mean and std from?
def _normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""Normalizes an image."""
image = tf.to_float(image)
return tf.div(tf.div(image, 255.) - mean, std)
def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def preprocess_for_train(image,
output_height,
output_width,
border_expand=False, normalize=True,
preserving_aspect_ratio_resize=False,
dataset_config=None):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
The output_width and output_height should be smaller than resize_side_min!
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
Returns:
A preprocessed image.
"""
resize_side_min = dataset_config._RESIZE_SIDE_MIN
resize_side_max = dataset_config._RESIZE_SIDE_MAX
# todo: set rotate a switch
# image = _random_rotate(image, rotate_angle_max=20)
if border_expand:
image = _border_expand(image)
# 保留横纵比的resize
if preserving_aspect_ratio_resize:
# resize_side: resize后的最短边
resize_side = tf.random_uniform(
[], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32)
image = _aspect_preserving_resize(image, resize_side)
else:
# todo: make it can set fixed resize
image = _fixed_sides_resize(image, resize_side_min, resize_side_min)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
# todo: set a switch
image = tf.image.random_flip_left_right(image)
if normalize:
return _normalize(image)
return _mean_image_subtraction(image, [dataset_config._R_MEAN, dataset_config._G_MEAN, dataset_config._B_MEAN])
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def preprocess_for_eval(image, output_height, output_width, resize_side,
border_expand=False, normalize=True,
preserving_aspect_ratio_resize=False,
dataset_config=None):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
Returns:
A preprocessed image.
"""
if border_expand:
image = _border_expand(image)
if preserving_aspect_ratio_resize:
image = _aspect_preserving_resize(image, resize_side)
else:
image = _fixed_sides_resize(image, resize_side, resize_side)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
if normalize:
return _normalize(image)
return _mean_image_subtraction(image, [dataset_config._R_MEAN, dataset_config._G_MEAN, dataset_config._B_MEAN])
def preprocess_image(image, output_height, output_width, is_training=False,
border_expand=False, normalize=False,
preserving_aspect_ratio_resize=False,
dataset_config=None):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, this value is
ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
Returns:
A preprocessed image.
"""
resize_side_min = dataset_config._RESIZE_SIDE_MIN
resize_side_max = dataset_config._RESIZE_SIDE_MAX
if is_training:
return preprocess_for_train(image, output_height, output_width,
border_expand, normalize,
preserving_aspect_ratio_resize,
dataset_config)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min, border_expand, normalize,
preserving_aspect_ratio_resize,
dataset_config)
def preprocess_images(images, output_height, output_width,
is_training=False,
border_expand=False, normalize=True,
preserving_aspect_ratio_resize=False,
dataset_config=None):
"""Preprocesses the given image.
Args:
images: A `Tensor` representing a batch of images of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image
for aspect-preserving resizing. If `is_training` is `False`, then
this value is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image
for aspect-preserving resizing. If `is_training` is `False`, this
value is ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
Returns:
A batch of preprocessed images.
"""
# resize_side_min = dataset_config._RESIZE_SIDE_MIN
# resize_side_max = dataset_config._RESIZE_SIDE_MAX
images = tf.cast(images, tf.float32)
def _preprocess_image(image):
return preprocess_image(image, output_height, output_width,
is_training, border_expand, normalize,
preserving_aspect_ratio_resize,
dataset_config)
return tf.map_fn(_preprocess_image, elems=images)
def border_expand(image, mode='CONSTANT', constant_values=255,
resize=False, output_height=None, output_width=None,
channels=3):
"""Expands (and resize) the given image."""
expanded_image = _border_expand(image, mode, constant_values)
if resize:
if output_height is None or output_width is None:
raise ValueError('`output_height` and `output_width` must be '
'specified in the resize case.')
expanded_image = _fixed_sides_resize(expanded_image, output_height,
output_width)
expanded_image.set_shape([output_height, output_width, channels])
return expanded_image
| [
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.reshape",
"tensorflow.multiply",
"pickle.load",
"numpy.random.randint",
"numpy.mean",
"sys.stdout.flush",
"glob.glob",
"tensorflow.greater_equal",
"tensorflow.greater",
"tensorflow.split",
"os.path.join",
"numpy.sqrt",
"numpy.pad",
... | [((1486, 1518), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)'}), '(low=0, high=2)\n', (1503, 1518), True, 'import numpy as np\n'), ((2990, 3028), 'os.path.join', 'os.path.join', (['dest_directory', 'filename'], {}), '(dest_directory, filename)\n', (3002, 3028), False, 'import os\n'), ((7125, 7137), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7133, 7137), True, 'import numpy as np\n'), ((8089, 8127), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""iso-8859-1"""'}), "(fo, encoding='iso-8859-1')\n", (8100, 8127), False, 'import pickle\n'), ((9430, 9471), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'minval': '(0)', 'maxval': '(1)'}), '([], minval=0, maxval=1)\n', (9447, 9471), True, 'import tensorflow as tf\n'), ((10093, 10108), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (10101, 10108), True, 'import tensorflow as tf\n'), ((10694, 10761), 'tensorflow.pad', 'tf.pad', (['image', 'paddings'], {'mode': 'mode', 'constant_values': 'constant_values'}), '(image, paddings, mode=mode, constant_values=constant_values)\n', (10700, 10761), True, 'import tensorflow as tf\n'), ((11516, 11567), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['smallest_side'], {'dtype': 'tf.int32'}), '(smallest_side, dtype=tf.int32)\n', (11536, 11567), True, 'import tensorflow as tf\n'), ((11580, 11599), 'tensorflow.to_float', 'tf.to_float', (['height'], {}), '(height)\n', (11591, 11599), True, 'import tensorflow as tf\n'), ((11610, 11628), 'tensorflow.to_float', 'tf.to_float', (['width'], {}), '(width)\n', (11621, 11628), True, 'import tensorflow as tf\n'), ((11647, 11673), 'tensorflow.to_float', 'tf.to_float', (['smallest_side'], {}), '(smallest_side)\n', (11658, 11673), True, 'import tensorflow as tf\n'), ((12369, 12420), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['smallest_side'], {'dtype': 'tf.int32'}), '(smallest_side, dtype=tf.int32)\n', (12389, 12420), True, 'import tensorflow as tf\n'), ((12432, 12447), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (12440, 12447), True, 'import tensorflow as tf\n'), ((12577, 12601), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (12591, 12601), True, 'import tensorflow as tf\n'), ((12620, 12697), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[new_height, new_width]'], {'align_corners': '(False)'}), '(image, [new_height, new_width], align_corners=False)\n', (12644, 12697), True, 'import tensorflow as tf\n'), ((12759, 12784), 'tensorflow.squeeze', 'tf.squeeze', (['resized_image'], {}), '(resized_image)\n', (12769, 12784), True, 'import tensorflow as tf\n'), ((13241, 13292), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output_height'], {'dtype': 'tf.int32'}), '(output_height, dtype=tf.int32)\n', (13261, 13292), True, 'import tensorflow as tf\n'), ((13312, 13362), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output_width'], {'dtype': 'tf.int32'}), '(output_width, dtype=tf.int32)\n', (13332, 13362), True, 'import tensorflow as tf\n'), ((13376, 13400), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (13390, 13400), True, 'import tensorflow as tf\n'), ((13421, 13516), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['image', '[output_height, output_width]'], {'align_corners': '(False)'}), '(image, [output_height, output_width],\n align_corners=False)\n', (13453, 13516), True, 'import tensorflow as tf\n'), ((13542, 13567), 'tensorflow.squeeze', 'tf.squeeze', (['resized_image'], {}), '(resized_image)\n', (13552, 13567), True, 'import tensorflow as tf\n'), ((14381, 14396), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (14389, 14396), True, 'import tensorflow as tf\n'), ((15137, 15169), 'tensorflow.reshape', 'tf.reshape', (['image', 'cropped_shape'], {}), '(image, cropped_shape)\n', (15147, 15169), True, 'import tensorflow as tf\n'), ((17822, 17885), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'maxval': 'max_offset_height', 'dtype': 'tf.int32'}), '([], maxval=max_offset_height, dtype=tf.int32)\n', (17839, 17885), True, 'import tensorflow as tf\n'), ((17910, 17972), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'maxval': 'max_offset_width', 'dtype': 'tf.int32'}), '([], maxval=max_offset_width, dtype=tf.int32)\n', (17927, 17972), True, 'import tensorflow as tf\n'), ((18259, 18277), 'tensorflow.to_float', 'tf.to_float', (['image'], {}), '(image)\n', (18270, 18277), True, 'import tensorflow as tf\n'), ((19212, 19274), 'tensorflow.split', 'tf.split', ([], {'axis': '(2)', 'num_or_size_splits': 'num_channels', 'value': 'image'}), '(axis=2, num_or_size_splits=num_channels, value=image)\n', (19220, 19274), True, 'import tensorflow as tf\n'), ((19344, 19378), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(2)', 'values': 'channels'}), '(axis=2, values=channels)\n', (19353, 19378), True, 'import tensorflow as tf\n'), ((21124, 21142), 'tensorflow.to_float', 'tf.to_float', (['image'], {}), '(image)\n', (21135, 21142), True, 'import tensorflow as tf\n'), ((21176, 21214), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (21207, 21214), True, 'import tensorflow as tf\n'), ((23086, 23104), 'tensorflow.to_float', 'tf.to_float', (['image'], {}), '(image)\n', (23097, 23104), True, 'import tensorflow as tf\n'), ((26235, 26262), 'tensorflow.cast', 'tf.cast', (['images', 'tf.float32'], {}), '(images, tf.float32)\n', (26242, 26262), True, 'import tensorflow as tf\n'), ((26561, 26603), 'tensorflow.map_fn', 'tf.map_fn', (['_preprocess_image'], {'elems': 'images'}), '(_preprocess_image, elems=images)\n', (26570, 26603), True, 'import tensorflow as tf\n'), ((306, 350), 'numpy.random.choice', 'np.random.choice', (['(10000 - vali_batch_size)', '(1)'], {}), '(10000 - vali_batch_size, 1)\n', (322, 350), True, 'import numpy as np\n'), ((1609, 1630), 'cv2.flip', 'cv2.flip', (['image', 'axis'], {}), '(image, axis)\n', (1617, 1630), False, 'import cv2\n'), ((2868, 2898), 'os.path.exists', 'os.path.exists', (['dest_directory'], {}), '(dest_directory)\n', (2882, 2898), False, 'import os\n'), ((2908, 2935), 'os.makedirs', 'os.makedirs', (['dest_directory'], {}), '(dest_directory)\n', (2919, 2935), False, 'import os\n'), ((3040, 3064), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (3054, 3064), False, 'import os\n'), ((3361, 3418), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['DATA_URL', 'filepath', '_progress'], {}), '(DATA_URL, filepath, _progress)\n', (3387, 3418), False, 'from six.moves import urllib\n'), ((3438, 3455), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (3445, 3455), False, 'import os\n'), ((4170, 4239), 'numpy.pad', 'np.pad', (['data'], {'pad_width': 'pad_width', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(data, pad_width=pad_width, mode='constant', constant_values=0)\n", (4176, 4239), True, 'import numpy as np\n'), ((6491, 6516), 'numpy.mean', 'np.mean', (['image_np[i, ...]'], {}), '(image_np[i, ...])\n', (6498, 6516), True, 'import numpy as np\n'), ((7364, 7398), 'numpy.concatenate', 'np.concatenate', (['(data, batch_data)'], {}), '((data, batch_data))\n', (7378, 7398), True, 'import numpy as np\n'), ((7415, 7451), 'numpy.concatenate', 'np.concatenate', (['(label, batch_label)'], {}), '((label, batch_label))\n', (7429, 7451), True, 'import numpy as np\n'), ((7821, 7852), 'numpy.random.permutation', 'np.random.permutation', (['num_data'], {}), '(num_data)\n', (7842, 7852), True, 'import numpy as np\n'), ((8217, 8242), 'numpy.array', 'np.array', (["dicts['labels']"], {}), "(dicts['labels'])\n", (8225, 8242), True, 'import numpy as np\n'), ((8270, 8315), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(10)', 'size': '(10000)'}), '(low=0, high=10, size=10000)\n', (8287, 8315), True, 'import numpy as np\n'), ((8332, 8348), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8340, 8348), True, 'import numpy as np\n'), ((8982, 9076), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'minval': '(-rotate_angle_max)', 'maxval': 'rotate_angle_max', 'dtype': 'tf.float32'}), '([], minval=-rotate_angle_max, maxval=rotate_angle_max,\n dtype=tf.float32)\n', (8999, 9076), True, 'import tensorflow as tf\n'), ((9251, 9328), 'tensorflow.contrib.image.rotate', 'tf.contrib.image.rotate', (['[image]', '[rotate_angle]'], {'interpolation': 'interpolation'}), '([image], [rotate_angle], interpolation=interpolation)\n', (9274, 9328), True, 'import tensorflow as tf\n'), ((9392, 9417), 'tensorflow.squeeze', 'tf.squeeze', (['rotated_image'], {}), '(rotated_image)\n', (9402, 9417), True, 'import tensorflow as tf\n'), ((9491, 9520), 'tensorflow.greater', 'tf.greater', (['rand', 'rotate_prob'], {}), '(rand, rotate_prob)\n', (9501, 9520), True, 'import tensorflow as tf\n'), ((10199, 10229), 'tensorflow.floordiv', 'tf.floordiv', (['(height - width)', '(2)'], {}), '(height - width, 2)\n', (10210, 10229), True, 'import tensorflow as tf\n'), ((10377, 10407), 'tensorflow.floordiv', 'tf.floordiv', (['(width - height)', '(2)'], {}), '(width - height, 2)\n', (10388, 10407), True, 'import tensorflow as tf\n'), ((10533, 10558), 'tensorflow.greater', 'tf.greater', (['height', 'width'], {}), '(height, width)\n', (10543, 10558), True, 'import tensorflow as tf\n'), ((11723, 11748), 'tensorflow.greater', 'tf.greater', (['height', 'width'], {}), '(height, width)\n', (11733, 11748), True, 'import tensorflow as tf\n'), ((11876, 11899), 'tensorflow.rint', 'tf.rint', (['(height * scale)'], {}), '(height * scale)\n', (11883, 11899), True, 'import tensorflow as tf\n'), ((11927, 11949), 'tensorflow.rint', 'tf.rint', (['(width * scale)'], {}), '(width * scale)\n', (11934, 11949), True, 'import tensorflow as tf\n'), ((14515, 14556), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[rank_assertion]'], {}), '([rank_assertion])\n', (14538, 14556), True, 'import tensorflow as tf\n'), ((14578, 14632), 'tensorflow.stack', 'tf.stack', (['[crop_height, crop_width, original_shape[2]]'], {}), '([crop_height, crop_width, original_shape[2]])\n', (14586, 14632), True, 'import tensorflow as tf\n'), ((14881, 14923), 'tensorflow.stack', 'tf.stack', (['[offset_height, offset_width, 0]'], {}), '([offset_height, offset_width, 0])\n', (14889, 14923), True, 'import tensorflow as tf\n'), ((15033, 15074), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[size_assertion]'], {}), '([size_assertion])\n', (15056, 15074), True, 'import tensorflow as tf\n'), ((15088, 15127), 'tensorflow.slice', 'tf.slice', (['image', 'offsets', 'cropped_shape'], {}), '(image, offsets, cropped_shape)\n', (15096, 15127), True, 'import tensorflow as tf\n'), ((16062, 16084), 'tensorflow.rank', 'tf.rank', (['image_list[i]'], {}), '(image_list[i])\n', (16069, 16084), True, 'import tensorflow as tf\n'), ((16298, 16343), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[rank_assertions[0]]'], {}), '([rank_assertions[0]])\n', (16321, 16343), True, 'import tensorflow as tf\n'), ((16363, 16386), 'tensorflow.shape', 'tf.shape', (['image_list[0]'], {}), '(image_list[0])\n', (16371, 16386), True, 'import tensorflow as tf\n'), ((17590, 17622), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['asserts'], {}), '(asserts)\n', (17613, 17622), True, 'import tensorflow as tf\n'), ((17648, 17694), 'tensorflow.reshape', 'tf.reshape', (['(image_height - crop_height + 1)', '[]'], {}), '(image_height - crop_height + 1, [])\n', (17658, 17694), True, 'import tensorflow as tf\n'), ((17702, 17734), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['asserts'], {}), '(asserts)\n', (17725, 17734), True, 'import tensorflow as tf\n'), ((17759, 17803), 'tensorflow.reshape', 'tf.reshape', (['(image_width - crop_width + 1)', '[]'], {}), '(image_width - crop_width + 1, [])\n', (17769, 17803), True, 'import tensorflow as tf\n'), ((20712, 20805), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {'minval': 'resize_side_min', 'maxval': '(resize_side_max + 1)', 'dtype': 'tf.int32'}), '([], minval=resize_side_min, maxval=resize_side_max + 1,\n dtype=tf.int32)\n', (20729, 20805), True, 'import tensorflow as tf\n'), ((701, 746), 'numpy.random.choice', 'np.random.choice', (['(50000 - train_batch_size)', '(1)'], {}), '(50000 - train_batch_size, 1)\n', (717, 746), True, 'import numpy as np\n'), ((2243, 2298), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 * padding_size)', 'size': '(1)'}), '(low=0, high=2 * padding_size, size=1)\n', (2260, 2298), True, 'import numpy as np\n'), ((2321, 2376), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 * padding_size)', 'size': '(1)'}), '(low=0, high=2 * padding_size, size=1)\n', (2338, 2376), True, 'import numpy as np\n'), ((3320, 3338), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3336, 3338), False, 'import sys\n'), ((4459, 4503), 'os.path.join', 'os.path.join', (['config_dict.data_path', '"""*.jpg"""'], {}), "(config_dict.data_path, '*.jpg')\n", (4471, 4503), False, 'import os\n'), ((4548, 4570), 'glob.glob', 'glob.glob', (['images_path'], {}), '(images_path)\n', (4557, 4570), False, 'import glob\n'), ((4998, 5014), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (5006, 5014), True, 'import numpy as np\n'), ((5031, 5047), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5039, 5047), True, 'import numpy as np\n'), ((7011, 7023), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7019, 7023), True, 'import numpy as np\n'), ((9185, 9219), 'tensorflow.multiply', 'tf.multiply', (['rotate_angle', 'math.pi'], {}), '(rotate_angle, math.pi)\n', (9196, 9219), True, 'import tensorflow as tf\n'), ((14443, 14457), 'tensorflow.rank', 'tf.rank', (['image'], {}), '(image)\n', (14450, 14457), True, 'import tensorflow as tf\n'), ((14696, 14744), 'tensorflow.greater_equal', 'tf.greater_equal', (['original_shape[0]', 'crop_height'], {}), '(original_shape[0], crop_height)\n', (14712, 14744), True, 'import tensorflow as tf\n'), ((14756, 14803), 'tensorflow.greater_equal', 'tf.greater_equal', (['original_shape[1]', 'crop_width'], {}), '(original_shape[1], crop_width)\n', (14772, 14803), True, 'import tensorflow as tf\n'), ((16122, 16145), 'tensorflow.equal', 'tf.equal', (['image_rank', '(3)'], {}), '(image_rank, 3)\n', (16130, 16145), True, 'import tensorflow as tf\n'), ((16514, 16557), 'tensorflow.greater_equal', 'tf.greater_equal', (['image_height', 'crop_height'], {}), '(image_height, crop_height)\n', (16530, 16557), True, 'import tensorflow as tf\n'), ((16569, 16610), 'tensorflow.greater_equal', 'tf.greater_equal', (['image_width', 'crop_width'], {}), '(image_width, crop_width)\n', (16585, 16610), True, 'import tensorflow as tf\n'), ((16828, 16873), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[rank_assertions[i]]'], {}), '([rank_assertions[i]])\n', (16851, 16873), True, 'import tensorflow as tf\n'), ((16889, 16904), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (16897, 16904), True, 'import tensorflow as tf\n'), ((16988, 17018), 'tensorflow.equal', 'tf.equal', (['height', 'image_height'], {}), '(height, image_height)\n', (16996, 17018), True, 'import tensorflow as tf\n'), ((17160, 17188), 'tensorflow.equal', 'tf.equal', (['width', 'image_width'], {}), '(width, image_width)\n', (17168, 17188), True, 'import tensorflow as tf\n'), ((18296, 18316), 'tensorflow.div', 'tf.div', (['image', '(255.0)'], {}), '(image, 255.0)\n', (18302, 18316), True, 'import tensorflow as tf\n'), ((21825, 21840), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (21833, 21840), True, 'import tensorflow as tf\n'), ((21862, 21877), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (21870, 21877), True, 'import tensorflow as tf\n'), ((3543, 3573), 'tarfile.open', 'tarfile.open', (['filepath', '"""r:gz"""'], {}), "(filepath, 'r:gz')\n", (3555, 3573), False, 'import tarfile\n'), ((4299, 4336), 'os.path.exists', 'os.path.exists', (['config_dict.data_path'], {}), '(config_dict.data_path)\n', (4313, 4336), False, 'import os\n'), ((4704, 4726), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (4714, 4726), False, 'import cv2\n'), ((4747, 4785), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4759, 4785), False, 'import cv2\n'), ((5263, 5307), 'os.path.join', 'os.path.join', (['config_dict.data_path', '"""*.jpg"""'], {}), "(config_dict.data_path, '*.jpg')\n", (5275, 5307), False, 'import os\n'), ((5352, 5374), 'glob.glob', 'glob.glob', (['images_path'], {}), '(images_path)\n', (5361, 5374), False, 'import glob\n'), ((5905, 5921), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (5913, 5921), True, 'import numpy as np\n'), ((5938, 5954), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5946, 5954), True, 'import numpy as np\n'), ((6745, 6769), 'numpy.std', 'np.std', (['image_np[i, ...]'], {}), '(image_np[i, ...])\n', (6751, 6769), True, 'import numpy as np\n'), ((5103, 5140), 'os.path.exists', 'os.path.exists', (['config_dict.data_path'], {}), '(config_dict.data_path)\n', (5117, 5140), False, 'import os\n'), ((5507, 5529), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (5517, 5529), False, 'import cv2\n'), ((5550, 5625), 'cv2.resize', 'cv2.resize', (['image', '(config_dict.input_resize_w, config_dict.input_resize_h)'], {}), '(image, (config_dict.input_resize_w, config_dict.input_resize_h))\n', (5560, 5625), False, 'import cv2\n'), ((5645, 5683), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (5657, 5683), False, 'import cv2\n'), ((6775, 6818), 'numpy.sqrt', 'np.sqrt', (['(IMG_HEIGHT * IMG_WIDTH * IMG_DEPTH)'], {}), '(IMG_HEIGHT * IMG_WIDTH * IMG_DEPTH)\n', (6782, 6818), True, 'import numpy as np\n')] |
"""Test EC-EARTH fixes."""
import unittest
import numpy as np
from cf_units import Unit
from iris.coords import DimCoord
from iris.cube import Cube, CubeList
from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas
from esmvalcore.cmor.fix import Fix
class TestSic(unittest.TestCase):
"""Test sic fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='sic', units='J')
self.fix = Sic(None)
def test_get(self):
"""Test fix get"""
self.assertListEqual(Fix.get_fixes('CMIP5', 'EC-EARTH', 'Amon', 'sic'),
[Sic(None)])
def test_fix_data(self):
"""Test data fix."""
cube = self.fix.fix_data(self.cube)
self.assertEqual(cube.data[0], 100)
self.assertEqual(cube.units, Unit('J'))
class TestSftlf(unittest.TestCase):
"""Test sftlf fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='sftlf', units='J')
self.fix = Sftlf(None)
def test_get(self):
"""Test fix get"""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'EC-EARTH', 'Amon', 'sftlf'), [Sftlf(None)])
def test_fix_data(self):
"""Test data fix."""
cube = self.fix.fix_data(self.cube)
self.assertEqual(cube.data[0], 100)
self.assertEqual(cube.units, Unit('J'))
class TestTas(unittest.TestCase):
"""Test tas fixes."""
def setUp(self):
"""Prepare tests."""
height_coord = DimCoord(2.,
standard_name='height',
long_name='height',
var_name='height',
units='m',
bounds=None,
attributes={'positive': 'up'})
time_coord = DimCoord(
1.,
standard_name='time',
var_name='time',
units=Unit('days since 2070-01-01 00:00:00', calendar='gregorian'),
)
self.height_coord = height_coord
self.cube_without = CubeList([Cube([3.0], var_name='tas')])
self.cube_without[0].add_aux_coord(time_coord, 0)
self.cube_with = CubeList([Cube([3.0], var_name='tas')])
self.cube_with[0].add_aux_coord(height_coord, ())
self.cube_with[0].add_aux_coord(time_coord, 0)
self.cube_with[0].coord('time').long_name = 'time'
self.fix = Tas(None)
def test_get(self):
"""Test fix get"""
self.assertListEqual(Fix.get_fixes('CMIP5', 'EC-EARTH', 'Amon', 'tas'),
[Tas(None)])
def test_tas_fix_metadata(self):
"""Test metadata fix."""
out_cube_without = self.fix.fix_metadata(self.cube_without)
# make sure this does not raise an error
out_cube_with = self.fix.fix_metadata(self.cube_with)
coord = out_cube_without[0].coord('height')
assert coord == self.height_coord
coord = out_cube_without[0].coord('time')
assert coord.long_name == "time"
coord = out_cube_with[0].coord('height')
assert coord == self.height_coord
coord = out_cube_with[0].coord('time')
assert coord.long_name == "time"
class TestAreacello(unittest.TestCase):
"""Test areacello fixes."""
def setUp(self):
"""Prepare tests."""
latitude = Cube(
np.ones((2, 2)),
standard_name='latitude',
long_name='latitude',
var_name='lat',
units='degrees_north',
)
longitude = Cube(
np.ones((2, 2)),
standard_name='longitude',
long_name='longitude',
var_name='lon',
units='degrees_north',
)
self.cubes = CubeList([
Cube(
np.ones((2, 2)),
var_name='areacello',
long_name='Areas of grid cell',
), latitude, longitude
])
self.fix = Areacello(None)
def test_get(self):
"""Test fix get"""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'EC-EARTH', 'Omon', 'areacello'),
[Areacello(None)],
)
def test_areacello_fix_metadata(self):
"""Test metadata fix."""
out_cube = self.fix.fix_metadata(self.cubes)
assert len(out_cube) == 1
out_cube[0].coord('latitude')
out_cube[0].coord('longitude')
| [
"esmvalcore.cmor._fixes.cmip5.ec_earth.Sftlf",
"esmvalcore.cmor._fixes.cmip5.ec_earth.Areacello",
"esmvalcore.cmor.fix.Fix.get_fixes",
"numpy.ones",
"esmvalcore.cmor._fixes.cmip5.ec_earth.Tas",
"iris.coords.DimCoord",
"iris.cube.Cube",
"cf_units.Unit",
"esmvalcore.cmor._fixes.cmip5.ec_earth.Sic"
] | [((405, 443), 'iris.cube.Cube', 'Cube', (['[1.0]'], {'var_name': '"""sic"""', 'units': '"""J"""'}), "([1.0], var_name='sic', units='J')\n", (409, 443), False, 'from iris.cube import Cube, CubeList\n'), ((463, 472), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Sic', 'Sic', (['None'], {}), '(None)\n', (466, 472), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((978, 1018), 'iris.cube.Cube', 'Cube', (['[1.0]'], {'var_name': '"""sftlf"""', 'units': '"""J"""'}), "([1.0], var_name='sftlf', units='J')\n", (982, 1018), False, 'from iris.cube import Cube, CubeList\n'), ((1038, 1049), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Sftlf', 'Sftlf', (['None'], {}), '(None)\n', (1043, 1049), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((1543, 1678), 'iris.coords.DimCoord', 'DimCoord', (['(2.0)'], {'standard_name': '"""height"""', 'long_name': '"""height"""', 'var_name': '"""height"""', 'units': '"""m"""', 'bounds': 'None', 'attributes': "{'positive': 'up'}"}), "(2.0, standard_name='height', long_name='height', var_name='height',\n units='m', bounds=None, attributes={'positive': 'up'})\n", (1551, 1678), False, 'from iris.coords import DimCoord\n'), ((2494, 2503), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Tas', 'Tas', (['None'], {}), '(None)\n', (2497, 2503), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((4061, 4076), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Areacello', 'Areacello', (['None'], {}), '(None)\n', (4070, 4076), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((554, 603), 'esmvalcore.cmor.fix.Fix.get_fixes', 'Fix.get_fixes', (['"""CMIP5"""', '"""EC-EARTH"""', '"""Amon"""', '"""sic"""'], {}), "('CMIP5', 'EC-EARTH', 'Amon', 'sic')\n", (567, 603), False, 'from esmvalcore.cmor.fix import Fix\n'), ((831, 840), 'cf_units.Unit', 'Unit', (['"""J"""'], {}), "('J')\n", (835, 840), False, 'from cf_units import Unit\n'), ((1144, 1195), 'esmvalcore.cmor.fix.Fix.get_fixes', 'Fix.get_fixes', (['"""CMIP5"""', '"""EC-EARTH"""', '"""Amon"""', '"""sftlf"""'], {}), "('CMIP5', 'EC-EARTH', 'Amon', 'sftlf')\n", (1157, 1195), False, 'from esmvalcore.cmor.fix import Fix\n'), ((1396, 1405), 'cf_units.Unit', 'Unit', (['"""J"""'], {}), "('J')\n", (1400, 1405), False, 'from cf_units import Unit\n'), ((2585, 2634), 'esmvalcore.cmor.fix.Fix.get_fixes', 'Fix.get_fixes', (['"""CMIP5"""', '"""EC-EARTH"""', '"""Amon"""', '"""tas"""'], {}), "('CMIP5', 'EC-EARTH', 'Amon', 'tas')\n", (2598, 2634), False, 'from esmvalcore.cmor.fix import Fix\n'), ((3460, 3475), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3467, 3475), True, 'import numpy as np\n'), ((3661, 3676), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3668, 3676), True, 'import numpy as np\n'), ((4171, 4226), 'esmvalcore.cmor.fix.Fix.get_fixes', 'Fix.get_fixes', (['"""CMIP5"""', '"""EC-EARTH"""', '"""Omon"""', '"""areacello"""'], {}), "('CMIP5', 'EC-EARTH', 'Omon', 'areacello')\n", (4184, 4226), False, 'from esmvalcore.cmor.fix import Fix\n'), ((635, 644), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Sic', 'Sic', (['None'], {}), '(None)\n', (638, 644), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((1198, 1209), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Sftlf', 'Sftlf', (['None'], {}), '(None)\n', (1203, 1209), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((1995, 2055), 'cf_units.Unit', 'Unit', (['"""days since 2070-01-01 00:00:00"""'], {'calendar': '"""gregorian"""'}), "('days since 2070-01-01 00:00:00', calendar='gregorian')\n", (1999, 2055), False, 'from cf_units import Unit\n'), ((2148, 2175), 'iris.cube.Cube', 'Cube', (['[3.0]'], {'var_name': '"""tas"""'}), "([3.0], var_name='tas')\n", (2152, 2175), False, 'from iris.cube import Cube, CubeList\n'), ((2272, 2299), 'iris.cube.Cube', 'Cube', (['[3.0]'], {'var_name': '"""tas"""'}), "([3.0], var_name='tas')\n", (2276, 2299), False, 'from iris.cube import Cube, CubeList\n'), ((2666, 2675), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Tas', 'Tas', (['None'], {}), '(None)\n', (2669, 2675), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((4241, 4256), 'esmvalcore.cmor._fixes.cmip5.ec_earth.Areacello', 'Areacello', (['None'], {}), '(None)\n', (4250, 4256), False, 'from esmvalcore.cmor._fixes.cmip5.ec_earth import Areacello, Sftlf, Sic, Tas\n'), ((3892, 3907), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3899, 3907), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 15:45:02 2019
@author: huisunum
"""
import pickle
import numpy as np
import solver
import logistic
import svm
import softmax
with open('data.pkl', 'rb') as f:
data = pickle.load(f, encoding='latin1')
print(np.shape(data[0][0:500, :]))
x_test = data[0][750:, :]
y_test = data[1][750:]
data_input = {
'X_train': data[0][0:500, :],
'y_train': data[1][0:500],
'X_val': data[0][500:750, :],
'y_val': data[1][500:750]# validation labels
}
#model = svm.SVM(input_dim=20, reg=0.016)
model = svm.SVM(input_dim=20, hidden_dim=16, reg=0.018)
solver = solver.Solver(model, data_input,
update_rule='sgd',
optim_config={
'learning_rate': 0.6,
},
lr_decay=0.985,
num_epochs=800, batch_size=40,
print_every=250)
solver.train()
print(solver.check_accuracy(x_test, y_test, num_samples=None, batch_size=40))
#print(solver.best_params)
| [
"solver.Solver",
"solver.train",
"solver.check_accuracy",
"numpy.shape",
"pickle.load",
"svm.SVM"
] | [((590, 637), 'svm.SVM', 'svm.SVM', ([], {'input_dim': '(20)', 'hidden_dim': '(16)', 'reg': '(0.018)'}), '(input_dim=20, hidden_dim=16, reg=0.018)\n', (597, 637), False, 'import svm\n'), ((648, 809), 'solver.Solver', 'solver.Solver', (['model', 'data_input'], {'update_rule': '"""sgd"""', 'optim_config': "{'learning_rate': 0.6}", 'lr_decay': '(0.985)', 'num_epochs': '(800)', 'batch_size': '(40)', 'print_every': '(250)'}), "(model, data_input, update_rule='sgd', optim_config={\n 'learning_rate': 0.6}, lr_decay=0.985, num_epochs=800, batch_size=40,\n print_every=250)\n", (661, 809), False, 'import solver\n'), ((926, 940), 'solver.train', 'solver.train', ([], {}), '()\n', (938, 940), False, 'import solver\n'), ((238, 271), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (249, 271), False, 'import pickle\n'), ((279, 306), 'numpy.shape', 'np.shape', (['data[0][0:500, :]'], {}), '(data[0][0:500, :])\n', (287, 306), True, 'import numpy as np\n'), ((948, 1018), 'solver.check_accuracy', 'solver.check_accuracy', (['x_test', 'y_test'], {'num_samples': 'None', 'batch_size': '(40)'}), '(x_test, y_test, num_samples=None, batch_size=40)\n', (969, 1018), False, 'import solver\n')] |
import heterocl as hcl
import numpy as np
def test_slice_op():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: A[x][8:0] + A[x][16:8])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = (np_A & 0xFF) + ((np_A >> 8) & 0xFF)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
| [
"heterocl.compute",
"numpy.zeros",
"heterocl.placeholder",
"numpy.random.randint",
"heterocl.build",
"heterocl.create_schedule",
"heterocl.init",
"numpy.array_equal",
"heterocl.asarray"
] | [((69, 79), 'heterocl.init', 'hcl.init', ([], {}), '()\n', (77, 79), True, 'import heterocl as hcl\n'), ((179, 201), 'heterocl.placeholder', 'hcl.placeholder', (['(10,)'], {}), '((10,))\n', (194, 201), True, 'import heterocl as hcl\n'), ((210, 240), 'heterocl.create_schedule', 'hcl.create_schedule', (['A', 'kernel'], {}), '(A, kernel)\n', (229, 240), True, 'import heterocl as hcl\n'), ((249, 261), 'heterocl.build', 'hcl.build', (['s'], {}), '(s)\n', (258, 261), True, 'import heterocl as hcl\n'), ((274, 307), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(10,)'}), '(10, size=(10,))\n', (291, 307), True, 'import numpy as np\n'), ((319, 331), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (327, 331), True, 'import numpy as np\n'), ((394, 411), 'heterocl.asarray', 'hcl.asarray', (['np_A'], {}), '(np_A)\n', (405, 411), True, 'import heterocl as hcl\n'), ((424, 441), 'heterocl.asarray', 'hcl.asarray', (['np_B'], {}), '(np_B)\n', (435, 441), True, 'import heterocl as hcl\n'), ((501, 528), 'numpy.array_equal', 'np.array_equal', (['golden', 'ret'], {}), '(golden, ret)\n', (515, 528), True, 'import numpy as np\n'), ((115, 169), 'heterocl.compute', 'hcl.compute', (['A.shape', '(lambda x: A[x][8:0] + A[x][16:8])'], {}), '(A.shape, lambda x: A[x][8:0] + A[x][16:8])\n', (126, 169), True, 'import heterocl as hcl\n')] |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import glob
import logging
import os
import subprocess
import sys
import zipfile
import numpy as np
# import traceback
logger = logging.getLogger(__name__)
import argparse
if sys.version_info < (3, 0):
import urllib as urllibr
else:
import urllib.request as urllibr
# import funkcí z jiného adresáře
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
def submodule_update():
# update submodules codes
print('Updating submodules')
try:
# import pdb; pdb.set_trace()
subprocess.call('git submodule update --init --recursive', shell=True)
# subprocess.call('git submodule update --init --recursive')
except:
print('Probem with git submodules')
def check_python_architecture(pythondir, target_arch_str):
"""
functions check architecture of target python
"""
pyth_str = subprocess.check_output(
[pythondir + 'python', '-c',
'import platform; print platform.architecture()[0]'])
if pyth_str[:2] != target_arch_str:
raise Exception(
"Wrong architecture of target python. Expected arch is"
+ target_arch_str)
def remove(local_file_name):
try:
os.remove(local_file_name)
except Exception as e:
print("Cannot remove file '" + local_file_name + "'. Please remove\
it manually.")
print(e)
# def downzip(url, destination='./sample_data/'):
# """
# Download, unzip and delete.
# """
#
# # url = "http://172.16.58.3/queetech/sample-data/jatra_06mm_jenjatra.zip"
# logmsg = "downloading from '" + url + "'"
# print(logmsg)
# logger.debug(logmsg)
# local_file_name = os.path.join(destination, 'tmp.zip')
# urllibr.urlretrieve(url, local_file_name)
# datafile = zipfile.ZipFile(local_file_name)
# datafile.extractall(destination)
# remove(local_file_name)
#
#
# # you can get hash from command line with:
# # python imtools/sample_data.py -v sliver_training_001
#
# # vessels.pkl nejprve vytvoří prázný adresář s názvem vessels.pkl, pak jej při rozbalování zase smaže
# data_urls= {
# "head": ["http://147.228.240.61/queetech/sample-data/head.zip", "89e9b60fd23257f01c4a1632ff7bb800", "matlab"] ,
# "jatra_06mm_jenjatra": ["http://14172.16.31.10/queetech/sample-data/jatra_06mm_jenjatra.zip", "jatra_06mm_jenjatra/*.dcm"],
# "jatra_5mm": ["http://147.228.240.61/queetech/sample-data/jatra_5mm.zip", '1b9039ffe1ff9af9caa344341c8cec03', "jatra_06mm/*.dcm"],
# "exp": ["http://147.228.240.61/queetech/sample-data/exp.zip", '74f2c10b17b6bd31bd03662df6cf884d'],
# "sliver_training_001": ["http://147.228.240.61/queetech/sample-data/sliver_training_001.zip","d64235727c0adafe13d24bfb311d1ed0","liver*001.*"],
# "volumetrie": ["http://147.228.240.61/queetech/sample-data/volumetrie.zip","6b2a2da67874ba526e2fe00a78dd19c9"],
# "vessels.pkl": ["http://147.228.240.61/queetech/sample-data/vessels.pkl.zip","698ef2bc345bb616f8d4195048538ded"],
# "biodur_sample": ["http://172.16.58.3/queetech/sample-data/biodur_sample.zip","d459dd5b308ca07d10414b3a3a9000ea"],
# "gensei_slices": ["http://172.16.58.3/queetech/sample-data/gensei_slices.zip", "ef93b121add8e4a133bb086e9e6491c9"],
# "exp_small": ["http://172.16.58.3/queetech/sample-data/exp_small.zip", "0526ba8ea363fe8b5227f5807b7aaca7"],
# "vincentka": ["http://192.168.3.111/queetech/vincentka.zip", "a30fdabaa39c5ce032a3223ed30b88e3"],
# "vincentka_sample": ["http://147.228.240.61/queetech/sample-data/vincentka_sample.zip"],
# "donut": "http://172.16.58.3/queetech/sample-data/donut.zip",
# # není nutné pole, stačí jen string
# # "exp_small": "http://147.228.240.61/queetech/sample-data/exp_small.zip",
# }
#
# def get_sample_data(data_label=None, destination_dir="."):
# """
# Same as get() due to back compatibility
# :param data_label:
# :param destination_dir:
# :return:
# """
# get(data_label=data_label, destination_dir=destination_dir)
#
#
# def get(data_label=None, destination_dir="."):
# """
# Download sample data by data label. Labels can be listed by sample_data.data_urls.keys()
# :param data_label: label of data. If it is set to None, all data are downloaded
# :param destination_dir: output dir for data
# :return:
# """
# try:
# os.mkdir(destination_dir)
# except:
# pass
# if data_label is None:
# data_label=data_urls.keys()
#
# if type(data_label) == str:
# data_label = [data_label]
#
# for label in data_label:
# # make all data:url have length 3
# data_url = data_urls[label]
# if type(data_url) == str:
# # back compatibility
# data_url = [data_url]
# data_url.extend([None, None])
# data_url = data_url[:3]
# url, expected_hash, hash_path = data_url
#
# if hash_path is None:
# hash_path = label
#
# try:
# computed_hash = checksum(os.path.join(destination_dir, hash_path))
# except:
# # there is probably no checksumdir module
# logger.warning("problem with sample_data.checksum()")
# computed_hash = None
#
# logger.info("dataset '" + label + "'")
# logger.info("expected hash: '" + str(expected_hash) + "'")
# logger.info("computed hash: '" + str(computed_hash) + "'")
# if (computed_hash is not None) and (expected_hash == computed_hash):
# logger.info("match ok - no download needed")
# else:
# logger.info("downloading")
# downzip(url, destination=destination_dir)
# logger.info("finished")
# downloaded_hash = checksum(os.path.join(destination_dir, hash_path))
# logger.info("downloaded hash: '" + str(downloaded_hash) + "'")
# if downloaded_hash != expected_hash:
# logger.warning("downloaded hash is different from expected hash\n" + \
# "expected hash: '" + str(expected_hash) + "'\n" + \
# "downloaded hash: '" + str(downloaded_hash) + "'\n")
#
#
# def checksum(path, hashfunc='md5'):
# """
# Return checksum given by path. Wildcards can be used in check sum. Function is strongly
# dependent on checksumdir package by 'cakepietoast'.
#
# :param path:
# :param hashfunc:
# :return:
# """
# import checksumdir
# hash_func = checksumdir.HASH_FUNCS.get(hashfunc)
# if not hash_func:
# raise NotImplementedError('{} not implemented.'.format(hashfunc))
#
# if os.path.isdir(path):
# return checksumdir.dirhash(path, hashfunc=hashfunc)
#
# hashvalues = []
# path_list = glob.glob(path)
# logger.debug("path_list " + str(path_list))
# for path in path_list:
# if os.path.isfile(path):
# hashvalues.append(checksumdir._filehash(path, hashfunc=hash_func))
# logger.debug(str(hashvalues))
# hash = checksumdir._reduce_hash(hashvalues, hashfunc=hash_func)
# return hash
#
def donut():
"""
Generate donut like shape with stick inside
:return: datap with keys data3d, segmentation and voxelsize_mm
"""
import numpy as np
segmentation = np.zeros([20, 30, 40])
# generate test data
segmentation[6:10, 7:24, 10:37] = 1
segmentation[6:10, 7, 10] = 0
segmentation[6:10, 23, 10] = 0
segmentation[6:10, 7, 36] = 0
segmentation[6:10, 23, 36] = 0
segmentation[2:18, 12:19, 18:28] = 2
data3d = segmentation * 100 + np.random.random(segmentation.shape) * 30
voxelsize_mm=[3,2,1]
datap = {
'data3d': data3d,
'segmentation': segmentation.astype(np.int8),
'voxelsize_mm': voxelsize_mm,
"slab": {"donut": 1, "stick": 2}
}
# io3d.write(datap, "donut.pklz")
return datap
def download_and_run(url, local_file_name):
urllibr.urlretrieve(url, local_file_name)
subprocess.call(local_file_name)
def get_conda_path():
"""
Return anaconda or miniconda directory
:return: anaconda directory
"""
dstdir = ''
# try:
import subprocess
import re
# cond info --root work only for root environment
# p = subprocess.Popen(['conda', 'info', '--root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen(['conda', 'info', '-e'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
dstdir = out.strip()
dstdir = re.search("\*(.*)\n", dstdir).group(1).strip()
# except:
# import traceback
# traceback.print_exc()
# import os.path as op
# conda_pth = op.expanduser('~/anaconda/bin')
# if not op.exists(conda_pth):
# conda_pth = op.expanduser('~/miniconda/bin')
# return conda_pth
return dstdir
def generate(size = 100, liver_intensity=100, noise_intensity=20, portal_vein_intensity=130, spleen_intensity=90):
boundary = int(size/4)
voxelsize_mm = [1.0, 1.5, 1.5]
slab = {
'liver': 1,
'porta': 2,
'spleen': 17
}
segmentation = np.zeros([size, size, size], dtype=np.uint8)
segmentation[boundary:-boundary, boundary:-2*boundary, 2*boundary:-boundary] = 1
segmentation[:, boundary*2:boundary*2+5, boundary*2:boundary*2+5] = 2
segmentation[:, boundary*2:boundary*2+5, boundary*2:boundary*2+5] = 2
segmentation[:, -5:, -boundary:] = 17
seeds = np.zeros([size, size, size], dtype=np.uint8)
seeds[
boundary + 1 : boundary + 4,
boundary + 1 : boundary + 4,
2 * boundary + 1 : 2 * boundary + 4
] = 1
seeds_porta = np.zeros([size, size, size], dtype=np.uint8)
seeds_porta[:, boundary*2+2, boundary*2:boundary*2+2] = 1
noise = (np.random.random(segmentation.shape) * noise_intensity).astype(np.int)
data3d = np.zeros(segmentation.shape, dtype=np.int)
data3d [segmentation == 1] = liver_intensity
data3d [segmentation == 2] = portal_vein_intensity
data3d [segmentation == 17] = spleen_intensity
data3d += noise
datap = {
'data3d': data3d,
'segmentation': segmentation,
'voxelsize_mm': voxelsize_mm,
'seeds': seeds,
'slab': slab,
"seeds_porta": seeds_porta
}
return datap
def file_copy_and_replace_lines(in_path, out_path):
import shutil
import fileinput
# print "path to script:"
# print path_to_script
lisa_path = os.path.abspath(path_to_script)
shutil.copy2(in_path, out_path)
conda_path = get_conda_path()
# print 'ip ', in_path
# print 'op ', out_path
# print 'cp ', conda_path
for line in fileinput.input(out_path, inplace=true):
# coma on end makes no linebreak
line = line.replace("@{lisa_path}", lisa_path)
line = line.replace("@{conda_path}", conda_path)
print(line)
def make_icon():
import platform
system = platform.system()
if system == 'Darwin':
# MacOS
__make_icon_osx()
pass
elif system == "Linux":
__make_icon_linux()
def __make_icon_osx():
home_path = os.path.expanduser('~')
in_path = os.path.join(path_to_script, "applications/lisa_gui")
dt_path = os.path.join(home_path, "Desktop")
subprocess.call(['ln', '-s', in_path, dt_path])
def __make_icon_linux():
in_path = os.path.join(path_to_script, "applications/lisa.desktop.in")
in_path_ha = os.path.join(path_to_script, "applications/ha.desktop.in")
print("icon input path:")
print(in_path, in_path_ha)
home_path = os.path.expanduser('~')
if os.path.exists(os.path.join(home_path, 'Desktop')):
desktop_path = os.path.join(home_path, 'Desktop')
elif os.path.exists(os.path.join(home_path, 'Plocha')):
desktop_path = os.path.join(home_path, 'Plocha')
else:
print("Cannot find desktop directory")
desktop_path = None
# copy desktop files to desktop
if desktop_path is not None:
out_path = os.path.join(desktop_path, "lisa.desktop")
out_path_ha = os.path.join(desktop_path, "ha.desktop")
# fi = fileinput.input(out_path, inplace=True)
print("icon output path:")
print(out_path, out_path_ha)
file_copy_and_replace_lines(in_path, out_path)
file_copy_and_replace_lines(in_path_ha, out_path_ha)
# copy desktop files to $HOME/.local/share/applications/
# to be accesable in application menu (Linux)
local_app_path = os.path.join(home_path, '.local/share/applications')
if os.path.exists(local_app_path) and os.path.isdir(local_app_path):
out_path = os.path.join(local_app_path, "lisa.desktop")
out_path_ha = os.path.join(local_app_path, "ha.desktop")
print("icon output path:")
print(out_path, out_path_ha)
file_copy_and_replace_lines(in_path, out_path)
file_copy_and_replace_lines(in_path_ha, out_path_ha)
else:
print("Couldnt find $HOME/.local/share/applications/.")
| [
"os.path.abspath",
"subprocess.Popen",
"os.remove",
"os.path.join",
"fileinput.input",
"os.path.isdir",
"subprocess.check_output",
"shutil.copy2",
"numpy.zeros",
"os.path.exists",
"urllib.request.urlretrieve",
"numpy.random.random",
"subprocess.call",
"re.search",
"platform.system",
"o... | [((174, 201), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (191, 201), False, 'import logging\n'), ((407, 432), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (422, 432), False, 'import os\n'), ((917, 1027), 'subprocess.check_output', 'subprocess.check_output', (["[pythondir + 'python', '-c',\n 'import platform; print platform.architecture()[0]']"], {}), "([pythondir + 'python', '-c',\n 'import platform; print platform.architecture()[0]'])\n", (940, 1027), False, 'import subprocess\n'), ((7320, 7342), 'numpy.zeros', 'np.zeros', (['[20, 30, 40]'], {}), '([20, 30, 40])\n', (7328, 7342), True, 'import numpy as np\n'), ((7973, 8014), 'urllib.request.urlretrieve', 'urllibr.urlretrieve', (['url', 'local_file_name'], {}), '(url, local_file_name)\n', (7992, 8014), True, 'import urllib.request as urllibr\n'), ((8019, 8051), 'subprocess.call', 'subprocess.call', (['local_file_name'], {}), '(local_file_name)\n', (8034, 8051), False, 'import subprocess\n'), ((8398, 8492), 'subprocess.Popen', 'subprocess.Popen', (["['conda', 'info', '-e']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['conda', 'info', '-e'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n", (8414, 8492), False, 'import subprocess\n'), ((9166, 9210), 'numpy.zeros', 'np.zeros', (['[size, size, size]'], {'dtype': 'np.uint8'}), '([size, size, size], dtype=np.uint8)\n', (9174, 9210), True, 'import numpy as np\n'), ((9500, 9544), 'numpy.zeros', 'np.zeros', (['[size, size, size]'], {'dtype': 'np.uint8'}), '([size, size, size], dtype=np.uint8)\n', (9508, 9544), True, 'import numpy as np\n'), ((9691, 9735), 'numpy.zeros', 'np.zeros', (['[size, size, size]'], {'dtype': 'np.uint8'}), '([size, size, size], dtype=np.uint8)\n', (9699, 9735), True, 'import numpy as np\n'), ((9896, 9938), 'numpy.zeros', 'np.zeros', (['segmentation.shape'], {'dtype': 'np.int'}), '(segmentation.shape, dtype=np.int)\n', (9904, 9938), True, 'import numpy as np\n'), ((10503, 10534), 'os.path.abspath', 'os.path.abspath', (['path_to_script'], {}), '(path_to_script)\n', (10518, 10534), False, 'import os\n'), ((10540, 10571), 'shutil.copy2', 'shutil.copy2', (['in_path', 'out_path'], {}), '(in_path, out_path)\n', (10552, 10571), False, 'import shutil\n'), ((10708, 10747), 'fileinput.input', 'fileinput.input', (['out_path'], {'inplace': 'true'}), '(out_path, inplace=true)\n', (10723, 10747), False, 'import fileinput\n'), ((10975, 10992), 'platform.system', 'platform.system', ([], {}), '()\n', (10990, 10992), False, 'import platform\n'), ((11172, 11195), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (11190, 11195), False, 'import os\n'), ((11210, 11263), 'os.path.join', 'os.path.join', (['path_to_script', '"""applications/lisa_gui"""'], {}), "(path_to_script, 'applications/lisa_gui')\n", (11222, 11263), False, 'import os\n'), ((11278, 11312), 'os.path.join', 'os.path.join', (['home_path', '"""Desktop"""'], {}), "(home_path, 'Desktop')\n", (11290, 11312), False, 'import os\n'), ((11317, 11364), 'subprocess.call', 'subprocess.call', (["['ln', '-s', in_path, dt_path]"], {}), "(['ln', '-s', in_path, dt_path])\n", (11332, 11364), False, 'import subprocess\n'), ((11407, 11467), 'os.path.join', 'os.path.join', (['path_to_script', '"""applications/lisa.desktop.in"""'], {}), "(path_to_script, 'applications/lisa.desktop.in')\n", (11419, 11467), False, 'import os\n'), ((11485, 11543), 'os.path.join', 'os.path.join', (['path_to_script', '"""applications/ha.desktop.in"""'], {}), "(path_to_script, 'applications/ha.desktop.in')\n", (11497, 11543), False, 'import os\n'), ((11622, 11645), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (11640, 11645), False, 'import os\n'), ((12538, 12590), 'os.path.join', 'os.path.join', (['home_path', '""".local/share/applications"""'], {}), "(home_path, '.local/share/applications')\n", (12550, 12590), False, 'import os\n'), ((578, 648), 'subprocess.call', 'subprocess.call', (['"""git submodule update --init --recursive"""'], {'shell': '(True)'}), "('git submodule update --init --recursive', shell=True)\n", (593, 648), False, 'import subprocess\n'), ((1254, 1280), 'os.remove', 'os.remove', (['local_file_name'], {}), '(local_file_name)\n', (1263, 1280), False, 'import os\n'), ((11669, 11703), 'os.path.join', 'os.path.join', (['home_path', '"""Desktop"""'], {}), "(home_path, 'Desktop')\n", (11681, 11703), False, 'import os\n'), ((11729, 11763), 'os.path.join', 'os.path.join', (['home_path', '"""Desktop"""'], {}), "(home_path, 'Desktop')\n", (11741, 11763), False, 'import os\n'), ((12055, 12097), 'os.path.join', 'os.path.join', (['desktop_path', '"""lisa.desktop"""'], {}), "(desktop_path, 'lisa.desktop')\n", (12067, 12097), False, 'import os\n'), ((12120, 12160), 'os.path.join', 'os.path.join', (['desktop_path', '"""ha.desktop"""'], {}), "(desktop_path, 'ha.desktop')\n", (12132, 12160), False, 'import os\n'), ((12598, 12628), 'os.path.exists', 'os.path.exists', (['local_app_path'], {}), '(local_app_path)\n', (12612, 12628), False, 'import os\n'), ((12633, 12662), 'os.path.isdir', 'os.path.isdir', (['local_app_path'], {}), '(local_app_path)\n', (12646, 12662), False, 'import os\n'), ((12683, 12727), 'os.path.join', 'os.path.join', (['local_app_path', '"""lisa.desktop"""'], {}), "(local_app_path, 'lisa.desktop')\n", (12695, 12727), False, 'import os\n'), ((12751, 12793), 'os.path.join', 'os.path.join', (['local_app_path', '"""ha.desktop"""'], {}), "(local_app_path, 'ha.desktop')\n", (12763, 12793), False, 'import os\n'), ((7622, 7658), 'numpy.random.random', 'np.random.random', (['segmentation.shape'], {}), '(segmentation.shape)\n', (7638, 7658), True, 'import numpy as np\n'), ((11788, 11821), 'os.path.join', 'os.path.join', (['home_path', '"""Plocha"""'], {}), "(home_path, 'Plocha')\n", (11800, 11821), False, 'import os\n'), ((11847, 11880), 'os.path.join', 'os.path.join', (['home_path', '"""Plocha"""'], {}), "(home_path, 'Plocha')\n", (11859, 11880), False, 'import os\n'), ((9812, 9848), 'numpy.random.random', 'np.random.random', (['segmentation.shape'], {}), '(segmentation.shape)\n', (9828, 9848), True, 'import numpy as np\n'), ((8559, 8589), 're.search', 're.search', (['"""\\\\*(.*)\n"""', 'dstdir'], {}), "('\\\\*(.*)\\n', dstdir)\n", (8568, 8589), False, 'import re\n')] |
from config import Options
import json
import os
import scipy.io as sio
import numpy as np
def options_to_json(options):
keys = [a for a in dir(options) if not a.startswith('__')]
b = dict()
for k in keys:
b[k] = getattr(options,k)
return json.dumps(b)
def save_options_to_file(options, filepath):
with open(filepath,'w') as f:
z = options_to_json(options)
f.write(z)
def json_to_options(json_dict):
options = Options()
for k,v in json_dict.items():
setattr(options,k,v)
return options
def read_options_from_file(filepath):
with open(filepath,'r') as f:
z = json.load(f)
return json_to_options(z)
def args_to_options(**kargs):
options=Options()
for k,v in kargs.items():
if hasattr(options,k):
setattr(options,k,v)
return options
def save_to_mat(filename, data_dict):
sio.savemat(filename, data_dict)
def load_from_mat(filename):
return sio.loadmat(filename)
def save_to_h5py(filename, data_dict):
import h5py
hf = h5py.File(filename,'w')
for k in data_dict.keys():
print(k)
hf.create_dataset(k,data=data_dict[k])
hf.close()
def data_to_dict(data, num_classes=43, crop_size=32):
import cv2
out_dict = {}
imgs = []
labs = []
for pt, lb in zip(data[0],data[1]):
im = cv2.imread(pt)
im = cv2.resize(im, (crop_size,crop_size))
imgs.append(im)
lb_vec = np.zeros((num_classes,))
lb_vec[lb] = 1
labs.append(lb_vec)
imgs = np.asarray(imgs)
labs = np.asarray(labs)
out_dict['X_test'] = imgs
out_dict['Y_test'] = labs
print(out_dict['X_test'].shape)
print(out_dict['Y_test'].shape)
return out_dict
def cifar_data_to_dict(data, num_classes=10, crop_size=32):
out_dict = {}
imgs = []
labs = []
for pt, lb in zip(data[0],data[1]):
im = np.reshape(pt,[3,32,32])
im = np.transpose(im,[1,2,0])
imgs.append(im)
lb_vec = np.zeros((num_classes,))
lb_vec[lb] = 1
labs.append(lb_vec)
imgs = np.asarray(imgs)
labs = np.asarray(labs)
out_dict['X_test'] = imgs
out_dict['Y_test'] = labs
print(out_dict['X_test'].shape)
print(out_dict['Y_test'].shape)
return out_dict
def make_options_from_flags(FLAGS):
if FLAGS.json_config is not None:
options = read_options_from_file(FLAGS.json_config)
else:
options = Options() # the default value stored in config.Options
if FLAGS.shuffle is not None:
options.shuffle = FLAGS.shuffle
if FLAGS.net_mode is not None:
options.net_mode = FLAGS.net_mode
if FLAGS.data_mode is not None:
options.data_mode = FLAGS.data_mode
if FLAGS.load_mode is not None:
options.load_mode = FLAGS.load_mode
if FLAGS.fix_level is not None:
options.fix_level = FLAGS.fix_level
if FLAGS.init_learning_rate is not None:
options.base_lr = FLAGS.init_learning_rate
if FLAGS.optimizer != 'sgd':
options.optimizer = FLAGS.optimizer
if FLAGS.weight_decay != 0.00004:
options.weight_decay = FLAGS.weight_decay
if FLAGS.global_label is not None:
options.data_mode == 'global_label'
options.global_label = FLAGS.global_label
if options.load_mode != 'normal':
if FLAGS.backbone_model_path is not None:
options.backbone_model_path = FLAGS.backbone_model_path
else:
options.backbone_model_path = None
return options
def get_last_checkpoint_in_folder(folder_path):
f_p = os.path.join(folder_path, 'checkpoint')
with open(f_p, 'r') as f:
for li in f:
ckpt_name = li.split('"')[-2]
ld_p = os.path.join(folder_path, ckpt_name)
return ld_p
def inspect_checkpoint(model_path, all_tensors=True):
from tensorflow.python.tools import inspect_checkpoint as chkp
chkp.print_tensors_in_checkpoint_file(model_path, tensor_name='v0/cg/affine0/', all_tensors=all_tensors, all_tensor_names=True)
| [
"h5py.File",
"json.load",
"config.Options",
"scipy.io.loadmat",
"tensorflow.python.tools.inspect_checkpoint.print_tensors_in_checkpoint_file",
"numpy.asarray",
"numpy.zeros",
"scipy.io.savemat",
"json.dumps",
"numpy.transpose",
"cv2.imread",
"numpy.reshape",
"os.path.join",
"cv2.resize"
] | [((252, 265), 'json.dumps', 'json.dumps', (['b'], {}), '(b)\n', (262, 265), False, 'import json\n'), ((437, 446), 'config.Options', 'Options', ([], {}), '()\n', (444, 446), False, 'from config import Options\n'), ((682, 691), 'config.Options', 'Options', ([], {}), '()\n', (689, 691), False, 'from config import Options\n'), ((832, 864), 'scipy.io.savemat', 'sio.savemat', (['filename', 'data_dict'], {}), '(filename, data_dict)\n', (843, 864), True, 'import scipy.io as sio\n'), ((904, 925), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {}), '(filename)\n', (915, 925), True, 'import scipy.io as sio\n'), ((987, 1011), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (996, 1011), False, 'import h5py\n'), ((1437, 1453), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (1447, 1453), True, 'import numpy as np\n'), ((1463, 1479), 'numpy.asarray', 'np.asarray', (['labs'], {}), '(labs)\n', (1473, 1479), True, 'import numpy as np\n'), ((1940, 1956), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (1950, 1956), True, 'import numpy as np\n'), ((1966, 1982), 'numpy.asarray', 'np.asarray', (['labs'], {}), '(labs)\n', (1976, 1982), True, 'import numpy as np\n'), ((3329, 3368), 'os.path.join', 'os.path.join', (['folder_path', '"""checkpoint"""'], {}), "(folder_path, 'checkpoint')\n", (3341, 3368), False, 'import os\n'), ((3640, 3772), 'tensorflow.python.tools.inspect_checkpoint.print_tensors_in_checkpoint_file', 'chkp.print_tensors_in_checkpoint_file', (['model_path'], {'tensor_name': '"""v0/cg/affine0/"""', 'all_tensors': 'all_tensors', 'all_tensor_names': '(True)'}), "(model_path, tensor_name=\n 'v0/cg/affine0/', all_tensors=all_tensors, all_tensor_names=True)\n", (3677, 3772), True, 'from tensorflow.python.tools import inspect_checkpoint as chkp\n'), ((600, 612), 'json.load', 'json.load', (['f'], {}), '(f)\n', (609, 612), False, 'import json\n'), ((1264, 1278), 'cv2.imread', 'cv2.imread', (['pt'], {}), '(pt)\n', (1274, 1278), False, 'import cv2\n'), ((1288, 1326), 'cv2.resize', 'cv2.resize', (['im', '(crop_size, crop_size)'], {}), '(im, (crop_size, crop_size))\n', (1298, 1326), False, 'import cv2\n'), ((1359, 1383), 'numpy.zeros', 'np.zeros', (['(num_classes,)'], {}), '((num_classes,))\n', (1367, 1383), True, 'import numpy as np\n'), ((1770, 1797), 'numpy.reshape', 'np.reshape', (['pt', '[3, 32, 32]'], {}), '(pt, [3, 32, 32])\n', (1780, 1797), True, 'import numpy as np\n'), ((1804, 1831), 'numpy.transpose', 'np.transpose', (['im', '[1, 2, 0]'], {}), '(im, [1, 2, 0])\n', (1816, 1831), True, 'import numpy as np\n'), ((1862, 1886), 'numpy.zeros', 'np.zeros', (['(num_classes,)'], {}), '((num_classes,))\n', (1870, 1886), True, 'import numpy as np\n'), ((2278, 2287), 'config.Options', 'Options', ([], {}), '()\n', (2285, 2287), False, 'from config import Options\n'), ((3463, 3499), 'os.path.join', 'os.path.join', (['folder_path', 'ckpt_name'], {}), '(folder_path, ckpt_name)\n', (3475, 3499), False, 'import os\n')] |
import numpy as np
from keras.models import Sequential
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation
ind = np.load('saved/indoor_X_train_.npy')
out = np.load('saved/outdoor_X_train_.npy')
tran = np.load('saved/transportation_X_train_.npy')
X_test = np.load('saved/_X_test_.npy')
y_test = np.load('saved/_y_test_.npy')
X_train2 = np.concatenate([ind, out], axis = 0)
y_train2 = np.concatenate([[0]*len(ind), [1]*len(out)], axis = 0)
print(X_train2.shape, y_train2.shape)
X_test2, y_test2 = [], []
for i, label in enumerate(y_test):
if label != 2:
y_test2.append(label)
X_test2.append(X_test[i])
X_train2 = np.log(X_train2)/np.log(20)
X_test2 = np.log(X_test2)/np.log(20)
X_train2 = [(i - np.min(i))/(np.max(i) - np.min(i)) for i in X_train2]
X_test2 = [(i - np.min(i))/(np.max(i) - np.min(i)) for i in X_test2]
X_train2 = np.array(X_train2)
y_train2 = np.array(y_train2)
X_test2 = np.array(X_test2)
y_test2 = np.array(y_test2)
print(X_test2.shape, y_test2.shape)
y_train2_onehot = to_categorical(y_train2)
y_test2_onehot = to_categorical(y_test2)
X_train2 = np.array([i[..., np.newaxis] for i in X_train2])
X_test2 = np.array([i[..., np.newaxis] for i in X_test2])
# add noise
X_tmp = np.copy(X_train2)
X_train2 = []
for i in X_tmp:
if np.random.random() > 0.5:
X_train2.append(i + np.random.normal(0,1,i.shape) * 1e-3)
else:
X_train2.append(i)
X_train2 = np.array(X_train2)
X_test2 = np.array([i + np.random.normal(0,1,i.shape) * 1e-3 for i in X_test2])
#create model
model = Sequential()
#add model layers
model.add(Conv2D(32, kernel_size = (3,1), activation = 'relu', input_shape=(60,8,1)))
model.add(BatchNormalization())
# model.add(MaxPooling2D(3,1))
model.add(Dropout(0.3))
model.add(Conv2D(16, kernel_size = (3,1), activation = 'relu'))
model.add(BatchNormalization())
# model.add(MaxPooling2D(3,1))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
earlyStopping = EarlyStopping(monitor='val_accuracy', patience=10, verbose=0, mode='max')
mcp_save = ModelCheckpoint('.mdl_wts.hdf5', save_best_only=True, monitor='val_accuracy', mode='max')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='max')
model.fit(X_train2, y_train2_onehot, validation_data=(X_test2, y_test2_onehot), epochs = 50, verbose = True, shuffle = True, batch_size = 32, callbacks=[earlyStopping])
y_pred = model.predict(X_test2)
y_pred_label = np.argmax(y_pred, axis = 1)
print(classification_report(y_test2, y_pred_label))
print(confusion_matrix(y_test2, y_pred_label))
model.save('ind_vs_out.keras.model')
| [
"numpy.load",
"numpy.argmax",
"keras.models.Sequential",
"sklearn.metrics.classification_report",
"numpy.random.normal",
"numpy.copy",
"keras.layers.Flatten",
"numpy.max",
"keras.callbacks.ReduceLROnPlateau",
"keras.utils.to_categorical",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropou... | [((378, 414), 'numpy.load', 'np.load', (['"""saved/indoor_X_train_.npy"""'], {}), "('saved/indoor_X_train_.npy')\n", (385, 414), True, 'import numpy as np\n'), ((421, 458), 'numpy.load', 'np.load', (['"""saved/outdoor_X_train_.npy"""'], {}), "('saved/outdoor_X_train_.npy')\n", (428, 458), True, 'import numpy as np\n'), ((466, 510), 'numpy.load', 'np.load', (['"""saved/transportation_X_train_.npy"""'], {}), "('saved/transportation_X_train_.npy')\n", (473, 510), True, 'import numpy as np\n'), ((521, 550), 'numpy.load', 'np.load', (['"""saved/_X_test_.npy"""'], {}), "('saved/_X_test_.npy')\n", (528, 550), True, 'import numpy as np\n'), ((560, 589), 'numpy.load', 'np.load', (['"""saved/_y_test_.npy"""'], {}), "('saved/_y_test_.npy')\n", (567, 589), True, 'import numpy as np\n'), ((602, 636), 'numpy.concatenate', 'np.concatenate', (['[ind, out]'], {'axis': '(0)'}), '([ind, out], axis=0)\n', (616, 636), True, 'import numpy as np\n'), ((1101, 1119), 'numpy.array', 'np.array', (['X_train2'], {}), '(X_train2)\n', (1109, 1119), True, 'import numpy as np\n'), ((1131, 1149), 'numpy.array', 'np.array', (['y_train2'], {}), '(y_train2)\n', (1139, 1149), True, 'import numpy as np\n'), ((1160, 1177), 'numpy.array', 'np.array', (['X_test2'], {}), '(X_test2)\n', (1168, 1177), True, 'import numpy as np\n'), ((1188, 1205), 'numpy.array', 'np.array', (['y_test2'], {}), '(y_test2)\n', (1196, 1205), True, 'import numpy as np\n'), ((1261, 1285), 'keras.utils.to_categorical', 'to_categorical', (['y_train2'], {}), '(y_train2)\n', (1275, 1285), False, 'from keras.utils import to_categorical\n'), ((1303, 1326), 'keras.utils.to_categorical', 'to_categorical', (['y_test2'], {}), '(y_test2)\n', (1317, 1326), False, 'from keras.utils import to_categorical\n'), ((1338, 1386), 'numpy.array', 'np.array', (['[i[..., np.newaxis] for i in X_train2]'], {}), '([i[..., np.newaxis] for i in X_train2])\n', (1346, 1386), True, 'import numpy as np\n'), ((1397, 1444), 'numpy.array', 'np.array', (['[i[..., np.newaxis] for i in X_test2]'], {}), '([i[..., np.newaxis] for i in X_test2])\n', (1405, 1444), True, 'import numpy as np\n'), ((1466, 1483), 'numpy.copy', 'np.copy', (['X_train2'], {}), '(X_train2)\n', (1473, 1483), True, 'import numpy as np\n'), ((1643, 1661), 'numpy.array', 'np.array', (['X_train2'], {}), '(X_train2)\n', (1651, 1661), True, 'import numpy as np\n'), ((1766, 1778), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1776, 1778), False, 'from keras.models import Sequential\n'), ((2352, 2425), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_accuracy', patience=10, verbose=0, mode='max')\n", (2365, 2425), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), ((2437, 2531), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['""".mdl_wts.hdf5"""'], {'save_best_only': '(True)', 'monitor': '"""val_accuracy"""', 'mode': '"""max"""'}), "('.mdl_wts.hdf5', save_best_only=True, monitor=\n 'val_accuracy', mode='max')\n", (2452, 2531), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), ((2544, 2652), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_accuracy"""', 'factor': '(0.1)', 'patience': '(7)', 'verbose': '(1)', 'epsilon': '(0.0001)', 'mode': '"""max"""'}), "(monitor='val_accuracy', factor=0.1, patience=7, verbose=1,\n epsilon=0.0001, mode='max')\n", (2561, 2652), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), ((2865, 2890), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2874, 2890), True, 'import numpy as np\n'), ((884, 900), 'numpy.log', 'np.log', (['X_train2'], {}), '(X_train2)\n', (890, 900), True, 'import numpy as np\n'), ((901, 911), 'numpy.log', 'np.log', (['(20)'], {}), '(20)\n', (907, 911), True, 'import numpy as np\n'), ((922, 937), 'numpy.log', 'np.log', (['X_test2'], {}), '(X_test2)\n', (928, 937), True, 'import numpy as np\n'), ((938, 948), 'numpy.log', 'np.log', (['(20)'], {}), '(20)\n', (944, 948), True, 'import numpy as np\n'), ((1807, 1880), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 1)', 'activation': '"""relu"""', 'input_shape': '(60, 8, 1)'}), "(32, kernel_size=(3, 1), activation='relu', input_shape=(60, 8, 1))\n", (1813, 1880), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((1893, 1913), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1911, 1913), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((1956, 1968), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1963, 1968), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((1980, 2029), 'keras.layers.Conv2D', 'Conv2D', (['(16)'], {'kernel_size': '(3, 1)', 'activation': '"""relu"""'}), "(16, kernel_size=(3, 1), activation='relu')\n", (1986, 2029), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((2044, 2064), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2062, 2064), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((2107, 2119), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (2114, 2119), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((2131, 2140), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2138, 2140), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((2152, 2180), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (2157, 2180), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((2192, 2204), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (2199, 2204), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((2216, 2246), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (2221, 2246), False, 'from keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, Activation\n'), ((2900, 2944), 'sklearn.metrics.classification_report', 'classification_report', (['y_test2', 'y_pred_label'], {}), '(y_test2, y_pred_label)\n', (2921, 2944), False, 'from sklearn.metrics import classification_report\n'), ((2952, 2991), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test2', 'y_pred_label'], {}), '(y_test2, y_pred_label)\n', (2968, 2991), False, 'from sklearn.metrics import confusion_matrix\n'), ((1518, 1536), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1534, 1536), True, 'import numpy as np\n'), ((966, 975), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (972, 975), True, 'import numpy as np\n'), ((978, 987), 'numpy.max', 'np.max', (['i'], {}), '(i)\n', (984, 987), True, 'import numpy as np\n'), ((990, 999), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (996, 999), True, 'import numpy as np\n'), ((1036, 1045), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (1042, 1045), True, 'import numpy as np\n'), ((1048, 1057), 'numpy.max', 'np.max', (['i'], {}), '(i)\n', (1054, 1057), True, 'import numpy as np\n'), ((1060, 1069), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (1066, 1069), True, 'import numpy as np\n'), ((1687, 1718), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'i.shape'], {}), '(0, 1, i.shape)\n', (1703, 1718), True, 'import numpy as np\n'), ((1566, 1597), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'i.shape'], {}), '(0, 1, i.shape)\n', (1582, 1597), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Table 4 Correlations with Kappa
import our_plot_config
from our_plot_config import derived_dir, tab_dir
import pandas as pd
import numpy as np
import re
# for regressions
import pyhdfe
from sklearn import datasets, linear_model
import statsmodels.formula.api as smf
from statsmodels.iolib.summary2 import summary_col
# Input
f_regression = derived_dir / 'regression_data.parquet'
# Output
f_tab4 = tab_dir / 'table4.tex'
# Read data
cols = [
'from',
'to',
'quarter',
'kappa',
'retail_share',
'market_cap',
'marginsq',
'normalized_l2',
'big3',
'beta_BlackRock',
'beta_Vanguard',
'beta_StateStreet']
df = pd.read_parquet(
f_regression,
columns=cols).rename(
columns={
'beta_BlackRock': 'blackrock',
'beta_Vanguard': 'vanguard',
'beta_StateStreet': 'statestreet'})
# Filter on dates
df = df[(df.quarter > '2000-01-01')].copy()
# Calculate derived columns
df['lcap'] = np.log(df['market_cap'])
# Code the FE first: This speeds things up to avoid type converting 13
# million dates
df['pair_fe'] = df.groupby(['from', 'to']).ngroup()
df['quarter_fe'] = df.groupby(['quarter']).ngroup()
# Regressions!
# We will need to absorb: do that first
# This is comically slow and uses 30+GB
var_list = [
'kappa',
'retail_share',
'lcap',
'marginsq',
'normalized_l2',
'big3',
'blackrock',
'vanguard',
'statestreet']
# Drop any missings
df2 = df[var_list + ['pair_fe', 'quarter_fe']].dropna()
alg_pa = pyhdfe.create(
df2[['pair_fe', 'quarter_fe']].values, drop_singletons=False)
resid_pa = alg_pa.residualize(df2[var_list].values)
# Perform Regressions
# no need for fixed effects because we've already residualized everything
# drop rows containing NAs
pd_vars = pd.DataFrame(resid_pa, columns=['kappa', 'retail_share', 'lcap',
'marginsq', 'normalized_l2',
'big3', 'blackrock', 'vanguard', 'statestreet'])
reg1 = smf.ols(
formula='kappa ~ retail_share + lcap + marginsq + big3',
data=pd_vars).fit()
reg2 = smf.ols(
formula='kappa ~ retail_share + lcap + marginsq + normalized_l2',
data=pd_vars).fit()
reg3 = smf.ols(
formula='kappa ~ retail_share + lcap + marginsq + big3 + normalized_l2',
data=pd_vars).fit()
reg4 = smf.ols(
formula='kappa ~ retail_share + lcap + marginsq + normalized_l2 + blackrock + vanguard + statestreet',
data=pd_vars).fit()
# Adjust R^2 for the FE
def rsq_update(reg):
reg.rsquared = np.var(
reg.predict() + (df2['kappa'].values - resid_pa[:, 0])) / np.var(df2['kappa'])
reg.quarterfe = r" \checkmark "
reg.pairfe = r" \checkmark "
return
for r in [reg1, reg2, reg3, reg4]:
rsq_update(r)
# Print Output
info_dict = {'R\sq': lambda x: f"{x.rsquared:.4f}",
'Quarter FE': lambda x: f"{x.quarterfe}",
'Ordered Pair FE': lambda x: f"{x.pairfe}",
'N': lambda x: f"{int(x.nobs):d}"
}
dfoutput = summary_col(results=[reg1, reg2, reg3, reg4],
float_format='%0.4f',
stars=True,
model_names=['(1)',
'(2)',
'(3)',
'(4)'],
info_dict=info_dict,
regressor_order=['retail_share',
'lcap',
'marginsq',
'big3',
'normalized_l2',
'blackrock',
'vanguard',
'statestreet'
],
drop_omitted=True)
# Clean up the TeX by hand for the table
tab_reg2 = re.sub(r'\*\*\*', '*', dfoutput.as_latex())
tab_reg3 = re.sub(r'hline', 'toprule', tab_reg2, count=1)
tab_reg4 = re.sub(r'hline', 'bottomrule', tab_reg3, count=1)
tab_reg5 = re.sub(r'retail\\_share', 'Retail Share', tab_reg4)
tab_reg5 = re.sub(r'lcap', 'Log(Market Cap)', tab_reg5)
tab_reg5 = re.sub(r'marginsq', 'Operating Margin', tab_reg5)
tab_reg5 = re.sub(r'big3', 'Big Three Holdings', tab_reg5)
tab_reg5 = re.sub(r'normalized\\_l2', 'Investor Indexing', tab_reg5)
tab_reg5 = re.sub(r'blackrock', 'BlackRock Holdings', tab_reg5)
tab_reg5 = re.sub(r'vanguard', 'Vanguard Holdings', tab_reg5)
tab_reg5 = re.sub(r'statestreet', 'State Street Holdings', tab_reg5)
tab_reg5 = re.sub(r'R\\sq', '$R^2$', tab_reg5)
tab_reg5 = re.sub(r'N', '$N$', tab_reg5)
out_tab = '\n'.join(tab_reg5.splitlines()[3:-2])
# Display table and save
print(out_tab)
with open(f_tab4, 'w') as file:
file.write(out_tab)
| [
"pandas.DataFrame",
"numpy.log",
"pyhdfe.create",
"numpy.var",
"statsmodels.formula.api.ols",
"pandas.read_parquet",
"statsmodels.iolib.summary2.summary_col",
"re.sub"
] | [((1021, 1045), 'numpy.log', 'np.log', (["df['market_cap']"], {}), "(df['market_cap'])\n", (1027, 1045), True, 'import numpy as np\n'), ((1581, 1656), 'pyhdfe.create', 'pyhdfe.create', (["df2[['pair_fe', 'quarter_fe']].values"], {'drop_singletons': '(False)'}), "(df2[['pair_fe', 'quarter_fe']].values, drop_singletons=False)\n", (1594, 1656), False, 'import pyhdfe\n'), ((1848, 1994), 'pandas.DataFrame', 'pd.DataFrame', (['resid_pa'], {'columns': "['kappa', 'retail_share', 'lcap', 'marginsq', 'normalized_l2', 'big3',\n 'blackrock', 'vanguard', 'statestreet']"}), "(resid_pa, columns=['kappa', 'retail_share', 'lcap', 'marginsq',\n 'normalized_l2', 'big3', 'blackrock', 'vanguard', 'statestreet'])\n", (1860, 1994), True, 'import pandas as pd\n'), ((3108, 3406), 'statsmodels.iolib.summary2.summary_col', 'summary_col', ([], {'results': '[reg1, reg2, reg3, reg4]', 'float_format': '"""%0.4f"""', 'stars': '(True)', 'model_names': "['(1)', '(2)', '(3)', '(4)']", 'info_dict': 'info_dict', 'regressor_order': "['retail_share', 'lcap', 'marginsq', 'big3', 'normalized_l2', 'blackrock',\n 'vanguard', 'statestreet']", 'drop_omitted': '(True)'}), "(results=[reg1, reg2, reg3, reg4], float_format='%0.4f', stars=\n True, model_names=['(1)', '(2)', '(3)', '(4)'], info_dict=info_dict,\n regressor_order=['retail_share', 'lcap', 'marginsq', 'big3',\n 'normalized_l2', 'blackrock', 'vanguard', 'statestreet'], drop_omitted=True\n )\n", (3119, 3406), False, 'from statsmodels.iolib.summary2 import summary_col\n'), ((4064, 4109), 're.sub', 're.sub', (['"""hline"""', '"""toprule"""', 'tab_reg2'], {'count': '(1)'}), "('hline', 'toprule', tab_reg2, count=1)\n", (4070, 4109), False, 'import re\n'), ((4122, 4170), 're.sub', 're.sub', (['"""hline"""', '"""bottomrule"""', 'tab_reg3'], {'count': '(1)'}), "('hline', 'bottomrule', tab_reg3, count=1)\n", (4128, 4170), False, 'import re\n'), ((4184, 4236), 're.sub', 're.sub', (['"""retail\\\\\\\\_share"""', '"""Retail Share"""', 'tab_reg4'], {}), "('retail\\\\\\\\_share', 'Retail Share', tab_reg4)\n", (4190, 4236), False, 'import re\n'), ((4247, 4290), 're.sub', 're.sub', (['"""lcap"""', '"""Log(Market Cap)"""', 'tab_reg5'], {}), "('lcap', 'Log(Market Cap)', tab_reg5)\n", (4253, 4290), False, 'import re\n'), ((4303, 4351), 're.sub', 're.sub', (['"""marginsq"""', '"""Operating Margin"""', 'tab_reg5'], {}), "('marginsq', 'Operating Margin', tab_reg5)\n", (4309, 4351), False, 'import re\n'), ((4364, 4410), 're.sub', 're.sub', (['"""big3"""', '"""Big Three Holdings"""', 'tab_reg5'], {}), "('big3', 'Big Three Holdings', tab_reg5)\n", (4370, 4410), False, 'import re\n'), ((4423, 4481), 're.sub', 're.sub', (['"""normalized\\\\\\\\_l2"""', '"""Investor Indexing"""', 'tab_reg5'], {}), "('normalized\\\\\\\\_l2', 'Investor Indexing', tab_reg5)\n", (4429, 4481), False, 'import re\n'), ((4492, 4543), 're.sub', 're.sub', (['"""blackrock"""', '"""BlackRock Holdings"""', 'tab_reg5'], {}), "('blackrock', 'BlackRock Holdings', tab_reg5)\n", (4498, 4543), False, 'import re\n'), ((4556, 4605), 're.sub', 're.sub', (['"""vanguard"""', '"""Vanguard Holdings"""', 'tab_reg5'], {}), "('vanguard', 'Vanguard Holdings', tab_reg5)\n", (4562, 4605), False, 'import re\n'), ((4618, 4674), 're.sub', 're.sub', (['"""statestreet"""', '"""State Street Holdings"""', 'tab_reg5'], {}), "('statestreet', 'State Street Holdings', tab_reg5)\n", (4624, 4674), False, 'import re\n'), ((4687, 4723), 're.sub', 're.sub', (['"""R\\\\\\\\sq"""', '"""$R^2$"""', 'tab_reg5'], {}), "('R\\\\\\\\sq', '$R^2$', tab_reg5)\n", (4693, 4723), False, 'import re\n'), ((4734, 4762), 're.sub', 're.sub', (['"""N"""', '"""$N$"""', 'tab_reg5'], {}), "('N', '$N$', tab_reg5)\n", (4740, 4762), False, 'import re\n'), ((705, 748), 'pandas.read_parquet', 'pd.read_parquet', (['f_regression'], {'columns': 'cols'}), '(f_regression, columns=cols)\n', (720, 748), True, 'import pandas as pd\n'), ((2084, 2162), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""kappa ~ retail_share + lcap + marginsq + big3"""', 'data': 'pd_vars'}), "(formula='kappa ~ retail_share + lcap + marginsq + big3', data=pd_vars)\n", (2091, 2162), True, 'import statsmodels.formula.api as smf\n'), ((2185, 2276), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""kappa ~ retail_share + lcap + marginsq + normalized_l2"""', 'data': 'pd_vars'}), "(formula='kappa ~ retail_share + lcap + marginsq + normalized_l2',\n data=pd_vars)\n", (2192, 2276), True, 'import statsmodels.formula.api as smf\n'), ((2295, 2399), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""kappa ~ retail_share + lcap + marginsq + big3 + normalized_l2"""', 'data': 'pd_vars'}), "(formula=\n 'kappa ~ retail_share + lcap + marginsq + big3 + normalized_l2', data=\n pd_vars)\n", (2302, 2399), True, 'import statsmodels.formula.api as smf\n'), ((2412, 2546), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""kappa ~ retail_share + lcap + marginsq + normalized_l2 + blackrock + vanguard + statestreet"""', 'data': 'pd_vars'}), "(formula=\n 'kappa ~ retail_share + lcap + marginsq + normalized_l2 + blackrock + vanguard + statestreet'\n , data=pd_vars)\n", (2419, 2546), True, 'import statsmodels.formula.api as smf\n'), ((2693, 2713), 'numpy.var', 'np.var', (["df2['kappa']"], {}), "(df2['kappa'])\n", (2699, 2713), True, 'import numpy as np\n')] |
import os
os.chdir("G:/My Drive/EverythingElseBackup/Python_fun_projects/Wordle_solver")
import pandas as pd
import numpy as np
import scipy as sp
import csv
from numpy import loadtxt
from collections import Counter
from itertools import compress
import pandas as pd
pl = open('possible.txt', 'r').read().replace("'", "").split(",") # the 2000 words to solve
possible = sorted(pl)
ml = open('master.txt', 'r').read().replace("'", "").split(",")
master = sorted(ml+possible)
al = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
#Frequency model
plc = sorted(Counter(''.join(possible)), key=Counter(''.join(possible)).get, reverse=True)
mlc = sorted(Counter(''.join(master)), key=Counter(''.join(master)).get, reverse=True) #totals
mlcw = [i / sum(Counter(''.join(master)).values()) for i in list(Counter(''.join(master)).values())] #weights
#sort by single letter frequency
letter_sorted_master = []
#sort master list by letter frequency in Master
for alpha in range(0,26):
for i in range(0, len(master)):
if mlc[alpha] in master[i]:
letter_sorted_master.append(master[i])
letter_sorted_master = [i for n, i in enumerate(letter_sorted_master) if i not in letter_sorted_master[:n]]
len(letter_sorted_master)
################## spanning the uncertainty space
rrun = 10000
#rrun = 10
solution_counter = []
word_list = []
guess_words = []
first_mword = []
# second_mword = []
# third_mword = []
# fourth_mword = []
# fifth_mword = []
# sixth_mword = []
word_saver = np.zeros((rrun, 50), dtype = object)
#alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
for xxx in range(0,rrun):
word = possible[np.random.randint(0,len(possible))]
mword = letter_sorted_master[np.random.randint(0,len(letter_sorted_master))]
#word = possible[np.random.randint(0,10)]
#mword = letter_sorted_master[xxx]
#mword = letter_sorted_master[xxx]
#word = 'chase'
#mword = 'stare'
# first_mword.append(mword)
word_list.append(word)
filtered_master = letter_sorted_master
for ooo in range(0,30):
word_saver[xxx,ooo] = mword
if word == mword:
solution_counter.append(ooo+1)
guess_words.append(mword)
print(xxx)
break
#removing the words with letters are incorrect positions
eperm_index = []
for i in range(0,5):
if mword[i] != word[i]:
eperm_index.append(i)
if len(eperm_index) >0:
esuper_master = []
esuper_master.append(filtered_master)
esuper_master.append(list(set(esuper_master[0]) - set((list(compress(esuper_master[0], np.char.find(esuper_master[0], mword[eperm_index[0]]) == eperm_index[0]))))))
if len(eperm_index) > 1:
esuper_master.append(list(set(esuper_master[1]) - set((list(compress(esuper_master[1], np.char.find(esuper_master[1], mword[eperm_index[1]]) == eperm_index[1]))))))
if len(eperm_index) > 2:
esuper_master.append(list(set(esuper_master[2]) - set((list(compress(esuper_master[2], np.char.find(esuper_master[2], mword[eperm_index[2]]) == eperm_index[2]))))))
if len(eperm_index) > 3:
esuper_master.append(list(set(esuper_master[3]) - set((list(compress(esuper_master[3], np.char.find(esuper_master[3], mword[eperm_index[3]]) == eperm_index[3]))))))
if len(eperm_index) > 4:
esuper_master.append(list(set(esuper_master[4]) - set((list(compress(esuper_master[4], np.char.find(esuper_master[4], mword[eperm_index[4]]) == eperm_index[4]))))))
perm_index = []
for i in range(0,5):
if mword[i] == word[i]:
perm_index.append(i)
#get set of words with letters at exact position, if any
super_master = []
if len(perm_index) == 0:
super_master.append(esuper_master[len(eperm_index)])
if len(perm_index) > 0:
super_master.append((list(compress(esuper_master[len(eperm_index)], np.char.find(esuper_master[len(eperm_index)], mword[perm_index[0]], start=perm_index[0]) ==perm_index[0]))))
if len(perm_index) > 1:
super_master.append((list(compress(super_master[0], np.char.find(super_master[0], mword[perm_index[1]], start=perm_index[1]) ==perm_index[1]))))
if len(perm_index) > 2:
super_master.append((list(compress(super_master[1], np.char.find(super_master[1], mword[perm_index[2]], start=perm_index[2]) ==perm_index[2]))))
if len(perm_index) > 3:
super_master.append((list(compress(super_master[2], np.char.find(super_master[2], mword[perm_index[3]], start=perm_index[3]) ==perm_index[3]))))
if len(perm_index) > 4:
super_master.append((list(compress(super_master[3], np.char.find(super_master[3], mword[perm_index[4]], start=perm_index[4]) ==perm_index[4]))))
aperm_index = []
eli_index = []
for i in range(0,5):
if mword[i] in word:
aperm_index.append(i)
if mword[i] not in word:
eli_index.append(i)
#removal of letter/s that dont appear in the word
if len(eli_index) == 0:
super_master == super_master
if len(eli_index) > 0:
super_master[len(perm_index)-1] = list(set(super_master[len(perm_index)-1]) - set((list(compress(super_master[len(perm_index)-1], np.char.find(super_master[len(perm_index)-1], mword[eli_index[0]]) >= 0)))))
if len(eli_index) > 1:
super_master[len(perm_index)-1] = list(set(super_master[len(perm_index)-1]) - set((list(compress(super_master[len(perm_index)-1], np.char.find(super_master[len(perm_index)-1], mword[eli_index[1]]) >= 0)))))
if len(eli_index) > 2:
super_master[len(perm_index)-1] = list(set(super_master[len(perm_index)-1]) - set((list(compress(super_master[len(perm_index)-1], np.char.find(super_master[len(perm_index)-1], mword[eli_index[2]]) >= 0)))))
if len(eli_index) > 3:
super_master[len(perm_index)-1] = list(set(super_master[len(perm_index)-1]) - set((list(compress(super_master[len(perm_index)-1], np.char.find(super_master[len(perm_index)-1], mword[eli_index[3]]) >= 0)))))
if len(eli_index) > 4:
super_master[len(perm_index)-1] = list(set(super_master[len(perm_index)-1]) - set((list(compress(super_master[len(perm_index)-1], np.char.find(super_master[len(perm_index)-1], mword[eli_index[4]]) >= 0)))))
net_index = [x for x in aperm_index if x not in perm_index]
#keeping the set only with the letters that appear somewhere in the word
afiltered_master = []
if len(net_index) == 0:
afiltered_master = [super_master[len(perm_index)-1]]
if len(net_index) > 0:
afiltered_master.append(list(compress(super_master[len(perm_index)-1], np.char.find(super_master[len(perm_index)-1], mword[net_index[0]]) >=0)))
if len(net_index) > 1:
afiltered_master.append(list(compress(afiltered_master[0], np.char.find(afiltered_master[0], mword[net_index[1]]) >=0)))
if len(net_index) > 2:
afiltered_master.append(list(compress(afiltered_master[1], np.char.find(afiltered_master[1], mword[net_index[2]]) >=0)))
if len(net_index) > 3:
afiltered_master.append(list(compress(afiltered_master[2], np.char.find(afiltered_master[2], mword[net_index[3]]) >=0)))
if len(net_index) > 4:
afiltered_master.append(list(compress(afiltered_master[3], np.char.find(afiltered_master[3], mword[net_index[4]]) >=0)))
if len(afiltered_master) > 1:
filtered_master = afiltered_master[len(net_index)-1]
if len(afiltered_master) == 1:
filtered_master = afiltered_master[0]
if len([filtered_master][0]) == 1:
mword = filtered_master[0]
if len([filtered_master][0]) > 1:
mword = filtered_master[np.random.randint(0,len(filtered_master))]
if word == mword:
word_saver[xxx,ooo+1] = mword
solution_counter.append(ooo+2)
guess_words.append(mword)
print(xxx)
break
filtered_master.remove(mword)
np.mean(solution_counter)
#writer = pd.ExcelWriter('solutioncounter_skill.xlsx', engine='xlsxwriter')
wrd = pd.DataFrame(word_saver).replace(0, '')
#pd.concat([pd.DataFrame([solution_counter,word_list, guess_words]).T,wrd],axis=1).to_excel(writer, sheet_name = 'Counter', header = ['Tries', 'OrigWord', 'GuessedWord', 'First', 'Sec', 'Third', 'Fourth', 'Fifth', 'Sixth', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third'])
pd.concat([pd.DataFrame([solution_counter,word_list, guess_words]).T,wrd],axis=1).to_csv('testing.csv', header = ['Tries', 'OrigWord', 'GuessedWord', 'First', 'Sec', 'Third', 'Fourth', 'Fifth', 'Sixth', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third', 'Third'])
#writer.save()
len(solution_counter)
word_list
guess_words
len(guess_words)
# pd.DataFrame(solution_counter).to_excel(writer,sheet_name = 'counter')
# pd.DataFrame(word_list).to_excel(writer, sheet_name = 'Orig list')
# pd.DataFrame(guess_words).to_excel(writer, sheet_name = 'Guessed list')
# pd.DataFrame(first_mword).to_excel(writer, sheet_name = 'First mword')
#pd.DataFrame(second_mword).to_excel(writer, sheet_name = 'Second mword')
| [
"pandas.DataFrame",
"numpy.char.find",
"numpy.zeros",
"numpy.mean",
"os.chdir"
] | [((11, 89), 'os.chdir', 'os.chdir', (['"""G:/My Drive/EverythingElseBackup/Python_fun_projects/Wordle_solver"""'], {}), "('G:/My Drive/EverythingElseBackup/Python_fun_projects/Wordle_solver')\n", (19, 89), False, 'import os\n'), ((1627, 1661), 'numpy.zeros', 'np.zeros', (['(rrun, 50)'], {'dtype': 'object'}), '((rrun, 50), dtype=object)\n', (1635, 1661), True, 'import numpy as np\n'), ((8606, 8631), 'numpy.mean', 'np.mean', (['solution_counter'], {}), '(solution_counter)\n', (8613, 8631), True, 'import numpy as np\n'), ((8718, 8742), 'pandas.DataFrame', 'pd.DataFrame', (['word_saver'], {}), '(word_saver)\n', (8730, 8742), True, 'import pandas as pd\n'), ((9391, 9447), 'pandas.DataFrame', 'pd.DataFrame', (['[solution_counter, word_list, guess_words]'], {}), '([solution_counter, word_list, guess_words])\n', (9403, 9447), True, 'import pandas as pd\n'), ((4477, 4549), 'numpy.char.find', 'np.char.find', (['super_master[0]', 'mword[perm_index[1]]'], {'start': 'perm_index[1]'}), '(super_master[0], mword[perm_index[1]], start=perm_index[1])\n', (4489, 4549), True, 'import numpy as np\n'), ((4668, 4740), 'numpy.char.find', 'np.char.find', (['super_master[1]', 'mword[perm_index[2]]'], {'start': 'perm_index[2]'}), '(super_master[1], mword[perm_index[2]], start=perm_index[2])\n', (4680, 4740), True, 'import numpy as np\n'), ((4859, 4931), 'numpy.char.find', 'np.char.find', (['super_master[2]', 'mword[perm_index[3]]'], {'start': 'perm_index[3]'}), '(super_master[2], mword[perm_index[3]], start=perm_index[3])\n', (4871, 4931), True, 'import numpy as np\n'), ((5050, 5122), 'numpy.char.find', 'np.char.find', (['super_master[3]', 'mword[perm_index[4]]'], {'start': 'perm_index[4]'}), '(super_master[3], mword[perm_index[4]], start=perm_index[4])\n', (5062, 5122), True, 'import numpy as np\n'), ((7387, 7441), 'numpy.char.find', 'np.char.find', (['afiltered_master[0]', 'mword[net_index[1]]'], {}), '(afiltered_master[0], mword[net_index[1]])\n', (7399, 7441), True, 'import numpy as np\n'), ((7553, 7607), 'numpy.char.find', 'np.char.find', (['afiltered_master[1]', 'mword[net_index[2]]'], {}), '(afiltered_master[1], mword[net_index[2]])\n', (7565, 7607), True, 'import numpy as np\n'), ((7719, 7773), 'numpy.char.find', 'np.char.find', (['afiltered_master[2]', 'mword[net_index[3]]'], {}), '(afiltered_master[2], mword[net_index[3]])\n', (7731, 7773), True, 'import numpy as np\n'), ((7885, 7939), 'numpy.char.find', 'np.char.find', (['afiltered_master[3]', 'mword[net_index[4]]'], {}), '(afiltered_master[3], mword[net_index[4]])\n', (7897, 7939), True, 'import numpy as np\n'), ((2903, 2956), 'numpy.char.find', 'np.char.find', (['esuper_master[0]', 'mword[eperm_index[0]]'], {}), '(esuper_master[0], mword[eperm_index[0]])\n', (2915, 2956), True, 'import numpy as np\n'), ((3115, 3168), 'numpy.char.find', 'np.char.find', (['esuper_master[1]', 'mword[eperm_index[1]]'], {}), '(esuper_master[1], mword[eperm_index[1]])\n', (3127, 3168), True, 'import numpy as np\n'), ((3327, 3380), 'numpy.char.find', 'np.char.find', (['esuper_master[2]', 'mword[eperm_index[2]]'], {}), '(esuper_master[2], mword[eperm_index[2]])\n', (3339, 3380), True, 'import numpy as np\n'), ((3539, 3592), 'numpy.char.find', 'np.char.find', (['esuper_master[3]', 'mword[eperm_index[3]]'], {}), '(esuper_master[3], mword[eperm_index[3]])\n', (3551, 3592), True, 'import numpy as np\n'), ((3751, 3804), 'numpy.char.find', 'np.char.find', (['esuper_master[4]', 'mword[eperm_index[4]]'], {}), '(esuper_master[4], mword[eperm_index[4]])\n', (3763, 3804), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
def getEqns(N):
"""
String representations of equations to solve for N external particles.
"""
for i in reversed(range(2, N)):
print("r_{} = {}u^{} - {}u^{}".format(i-1, N-i+1, N-i, N-i, N-i+1))
def mkData(N, npoint=100):
U=np.linspace(0,1,npoint)
ret = []
for i in reversed(range(2, N)):
ret.append( (N-i+1)*U**(N-i) - (N-i)*U**(N-i+1))
return U, ret
if __name__ == "__main__":
import sys
NP = int(sys.argv[1])
getEqns(NP)
import matplotlib.pyplot as plt
plt.style.use("ggplot")
# U, YY = mkData(NP)
# for num, y in enumerate(YY):
# plt.plot(U, y, label="$r_{}$".format(num+1))
# plt.legend()
# plt.show()
def u2(r1):
"""
Integration variable in the n=3 case
"""
return 1. - np.sqrt(1.-r1)
def u3(r):
kappa = np.cbrt( 2*np.sqrt(r*(r-1) ) -2*r +1 )
return 0.5*(kappa + 1./kappa + 1)
def f(x, a, r):
return a*x**(a-1) - (a-1)*x**a - r
def fp(x, a):
return a*(a-1)*(x**(a-2) - x**(a-1))
def fpp(x, a):
return a*(a-1)*((a-2)*x**(a-3) - (a-1)*x**(a-2))
a=NP-1
r = np.random.rand()
print("Drawn random number r={}".format(r))
from scipy import optimize
if a>2:
res = optimize.newton(lambda x : f(x,a,r), r, fprime=lambda x: fp(x,a), fprime2=lambda x: fpp(x,a))
else:
res = optimize.newton(lambda x : f(x,a,r), r, fprime=lambda x: fp(x,a))
# from IPython import embed
# embed()
# X = np.linspace(0,1,100)
# plt.plot(X, f(X,a,r), label="f, r={}".format(r))
# plt.plot(X, fp(X,a), label="fp")
# if a>2:
# plt.plot(X, fpp(X,a), label="fpp")
# plt.axvline(res, label="u={}".format(res))
# plt.legend()
# plt.show()
# Numerical experimentation --- measure distance between r and u
NSAMPLES=int(sys.argv[2])#10000
import time
U = np.empty(NSAMPLES)
R = np.empty(NSAMPLES)
t1=time.time()
# U = [optimize.newton(lambda x : f(x,a,r), r, fprime=lambda x: fp(x,a), fprime2=lambda x: fpp(x,a)) for r in R]
# for num, r in enumerate(R):
for num in range(NSAMPLES):
r = np.random.rand()
u = optimize.newton(lambda x : f(x,a,r), r, fprime=lambda x: fp(x,a), fprime2=lambda x: fpp(x,a))
R[num] = r
U[num] = u
t2=time.time()
print("Evaluation of {} samples with a={} took {} seconds, residual = {}".format(NSAMPLES, a, t2-t1, sum([f(u,a,r) for u, r in zip(U,R)])))
print("Distance between u and r = {} +/- {}".format(np.mean(R-U), np.std(R-U)))
plt.scatter(R,U, s=0.01)
plt.show()
# plt.hist(U, bins=50, label="u", histtype="step")
# plt.hist(R, bins=50, label="r", histtype="step")
# plt.legend()
# plt.show()
t1=time.time()
UNO = [u2(r) for r in R]
t2=time.time()
print("Evaluation of {} samples with a={} took {} seconds".format(NSAMPLES, a, t2-t1))
# from IPython import embed
# embed()
| [
"matplotlib.pyplot.show",
"numpy.std",
"numpy.empty",
"matplotlib.pyplot.scatter",
"time.time",
"matplotlib.pyplot.style.use",
"numpy.mean",
"numpy.linspace",
"numpy.random.rand",
"numpy.sqrt"
] | [((298, 323), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'npoint'], {}), '(0, 1, npoint)\n', (309, 323), True, 'import numpy as np\n'), ((574, 597), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (587, 597), True, 'import matplotlib.pyplot as plt\n'), ((1216, 1232), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1230, 1232), True, 'import numpy as np\n'), ((1974, 1992), 'numpy.empty', 'np.empty', (['NSAMPLES'], {}), '(NSAMPLES)\n', (1982, 1992), True, 'import numpy as np\n'), ((2001, 2019), 'numpy.empty', 'np.empty', (['NSAMPLES'], {}), '(NSAMPLES)\n', (2009, 2019), True, 'import numpy as np\n'), ((2027, 2038), 'time.time', 'time.time', ([], {}), '()\n', (2036, 2038), False, 'import time\n'), ((2403, 2414), 'time.time', 'time.time', ([], {}), '()\n', (2412, 2414), False, 'import time\n'), ((2651, 2676), 'matplotlib.pyplot.scatter', 'plt.scatter', (['R', 'U'], {'s': '(0.01)'}), '(R, U, s=0.01)\n', (2662, 2676), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2690), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2688, 2690), True, 'import matplotlib.pyplot as plt\n'), ((2846, 2857), 'time.time', 'time.time', ([], {}), '()\n', (2855, 2857), False, 'import time\n'), ((2894, 2905), 'time.time', 'time.time', ([], {}), '()\n', (2903, 2905), False, 'import time\n'), ((2234, 2250), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2248, 2250), True, 'import numpy as np\n'), ((858, 875), 'numpy.sqrt', 'np.sqrt', (['(1.0 - r1)'], {}), '(1.0 - r1)\n', (865, 875), True, 'import numpy as np\n'), ((2618, 2632), 'numpy.mean', 'np.mean', (['(R - U)'], {}), '(R - U)\n', (2625, 2632), True, 'import numpy as np\n'), ((2632, 2645), 'numpy.std', 'np.std', (['(R - U)'], {}), '(R - U)\n', (2638, 2645), True, 'import numpy as np\n'), ((917, 937), 'numpy.sqrt', 'np.sqrt', (['(r * (r - 1))'], {}), '(r * (r - 1))\n', (924, 937), True, 'import numpy as np\n')] |
# This file create a new drug-protein association matrix based on shortest-path calculations.
import numpy as np
import csv
import networkx as nx
import joblib
from tqdm import tqdm
import pandas as pd
with open('./data/DrugsToProteins.txt', "r") as f:
R_DP = [element.split() for element in f.readlines()]
f.close()
PROTEINS = list(set([x[1] for x in R_DP]))
DRUGS = list(set([x[0] for x in R_DP]))
drug_set = set(DRUGS)
with open('./data/DrugsToLabels.txt', "r") as f:
R_DL_all = [[element.split()[0], " ".join(element.split()[1:])] for element in f.readlines()]
R_DL = [x for x in R_DL_all if x[0] in drug_set]
f.close()
LABELS = list(set([x[1] for x in R_DL]))
with open('./data/ProteinsToProteins.txt', "r") as f:
R_PP = [element.split()[:2] for element in f.readlines()]
f.close()
revert_edge = lambda x : [x[1], x[0]]
PROTEINS.sort()
DRUGS.sort()
G = nx.DiGraph()
G.add_nodes_from(PROTEINS)
G.add_nodes_from(DRUGS)
G.add_edges_from([revert_edge(x) for x in R_DP])
G.add_edges_from(R_PP + [revert_edge(x) for x in R_PP])
n2, n3 = len(DRUGS), len(PROTEINS)
R23_new = np.zeros((n2, n3))
for i in tqdm(range(n2)):
for j in (range(n3)):
if nx.has_path(G, PROTEINS[j], DRUGS[i]):
R23_new[i,j] = nx.shortest_path_length(G, source = PROTEINS[j], target = DRUGS[i])
for i in range(n2):
for j in range(n3):
if R23_new[i,j] > 3:
R23_new[i,j] = 0
R23_new_1 = R23_new.astype('float32')
for i in range(n2):
for j in range(n3):
if int(R23_new_1[i,j]) != 0:
R23_new_1[i,j] = 0.2 ** int(R23_new_1[i,j]-1)
np.save('R23_enhanced_matrix.npy', R23_new)
| [
"networkx.shortest_path_length",
"numpy.save",
"numpy.zeros",
"networkx.has_path",
"networkx.DiGraph"
] | [((885, 897), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (895, 897), True, 'import networkx as nx\n'), ((1109, 1127), 'numpy.zeros', 'np.zeros', (['(n2, n3)'], {}), '((n2, n3))\n', (1117, 1127), True, 'import numpy as np\n'), ((1632, 1675), 'numpy.save', 'np.save', (['"""R23_enhanced_matrix.npy"""', 'R23_new'], {}), "('R23_enhanced_matrix.npy', R23_new)\n", (1639, 1675), True, 'import numpy as np\n'), ((1192, 1229), 'networkx.has_path', 'nx.has_path', (['G', 'PROTEINS[j]', 'DRUGS[i]'], {}), '(G, PROTEINS[j], DRUGS[i])\n', (1203, 1229), True, 'import networkx as nx\n'), ((1258, 1321), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['G'], {'source': 'PROTEINS[j]', 'target': 'DRUGS[i]'}), '(G, source=PROTEINS[j], target=DRUGS[i])\n', (1281, 1321), True, 'import networkx as nx\n')] |
from math import *
from prettytable import PrettyTable
import numpy as np
def solutions():
# x1, x2 = -7, 12
# dx = 1
x1, x2 = map(float, input('x1, x2: ').split())
dx = float(input('dx: '))
table = PrettyTable()
table.field_names = ["x", "value"]
rad = 3
rad = pow(rad, 2)
for i in np.arange(x1, x2, dx):
i = round(i, 5)
if i <= -1 * sqrt(rad):
y = sqrt(rad)
elif i >= 2 * sqrt(rad):
y = i - 3 * sqrt(rad)
elif rad - pow(i, 2) >= 0:
y = sqrt(rad) - sqrt(rad - pow(i, 2))
else:
y = -2 * i + 3 * sqrt(rad)
table.add_row([i, y])
print(table)
if __name__ == '__main__':
solutions() | [
"numpy.arange",
"prettytable.PrettyTable"
] | [((222, 235), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (233, 235), False, 'from prettytable import PrettyTable\n'), ((318, 339), 'numpy.arange', 'np.arange', (['x1', 'x2', 'dx'], {}), '(x1, x2, dx)\n', (327, 339), True, 'import numpy as np\n')] |
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from monty.collections import AttrDict
from torch.distributions import Bernoulli, LogisticNormal, Normal
from torch_scae import cv_ops, math_ops
from torch_scae import nn_ext
from torch_scae.general_utils import prod
from torch_scae.math_ops import l2_loss
class CapsuleLayer(nn.Module):
"""Implementation of a capsule layer."""
# number of parameters needed to parametrize linear transformations.
n_transform_params = 6 # P
def __init__(self,
n_caps,
dim_feature,
n_votes,
dim_caps,
hidden_sizes=(128,),
caps_dropout_rate=0.0,
learn_vote_scale=False,
allow_deformations=True,
noise_type=None,
noise_scale=0.,
similarity_transform=True,
):
"""Builds the module.
Args:
n_caps: int, number of capsules.
dim_caps: int, number of capsule parameters
hidden_sizes: int or sequence of ints, number of hidden units for an MLP
which predicts capsule params from the input encoding.
n_caps_dims: int, number of capsule coordinates.
caps_dropout_rate: float in [0, 1].
n_votes: int, number of votes generated by each capsule.
learn_vote_scale: bool, learns input-dependent scale for each
capsules' votes.
allow_deformations: bool, allows input-dependent deformations of capsule-part
relationships.
noise_type: 'normal', 'logistic' or None; noise type injected into
presence logits.
noise_scale: float >= 0. scale parameters for the noise.
similarity_transform: boolean; uses similarity transforms if True.
"""
super().__init__()
self.n_caps = n_caps # O
self.dim_feature = dim_feature # F
self.hidden_sizes = list(hidden_sizes) # [H_i, ...]
self.dim_caps = dim_caps # D
self.caps_dropout_rate = caps_dropout_rate
self.n_votes = n_votes
self.learn_vote_scale = learn_vote_scale
self.allow_deformations = allow_deformations
self.noise_type = noise_type
self.noise_scale = noise_scale
self.similarity_transform = similarity_transform
self._build()
def _build(self):
# Use separate parameters to do predictions for different capsules.
sizes = [self.dim_feature] + self.hidden_sizes + [self.dim_caps]
self.mlps = nn.ModuleList([
nn_ext.MLP(sizes=sizes)
for _ in range(self.n_caps)
])
self.output_shapes = (
[self.n_votes, self.n_transform_params], # OPR-dynamic
[1, self.n_transform_params], # OVR
[1], # per-object presence
[self.n_votes], # per-vote-presence
[self.n_votes], # per-vote scale
)
self.splits = [prod(i) for i in self.output_shapes]
self.n_outputs = sum(self.splits) # A
# we don't use bias in the output layer in order to separate the static
# and dynamic parts of the OP
sizes = [self.dim_caps + 1] + self.hidden_sizes + [self.n_outputs]
self.caps_mlps = nn.ModuleList([
nn_ext.MLP(sizes=sizes, bias=False)
for _ in range(self.n_caps)
])
self.caps_bias_list = nn.ParameterList([
nn.Parameter(torch.zeros(1, self.n_caps, *shape), requires_grad=True)
for shape in self.output_shapes[1:]
])
# constant object-part relationship matrices, OPR-static
self.cpr_static = nn.Parameter(
torch.zeros([1, self.n_caps, self.n_votes, self.n_transform_params]),
requires_grad=True
)
def forward(self, feature, parent_transform=None, parent_presence=None):
"""
Args:
feature: Tensor of encodings of shape [B, O, F].
parent_transform: Tuple of (matrix, vector).
parent_presence: pass
Returns:
A bunch of stuff.
"""
device = feature.device
batch_size = feature.shape[0] # B
# Predict capsule and additional params from the input encoding.
# [B, O, D]
caps_feature_list = feature.unbind(1) # [(B, F)] * O
caps_param_list = [self.mlps[i](caps_feature_list[i])
for i in range(self.n_caps)] # [(B, D)] * O
del feature, caps_feature_list
raw_caps_param = torch.stack(caps_param_list, 1) # (B, O, D)
del caps_param_list
if self.caps_dropout_rate == 0.0:
caps_exist = torch.ones(batch_size, self.n_caps, 1) # (B, O, 1)
else:
pmf = Bernoulli(1. - self.caps_dropout_rate)
caps_exist = pmf.sample((batch_size, self.n_caps, 1)) # (B, O, 1)
caps_exist = caps_exist.to(device)
caps_param = torch.cat([raw_caps_param, caps_exist], -1) # (B, O, D+1)
del raw_caps_param, caps_exist
caps_eparam_list = caps_param.unbind(1) # [(B, D+1)] * O
all_param_list = [self.caps_mlps[i](caps_eparam_list[i])
for i in range(self.n_caps)] # [(B, A)] * O
del caps_eparam_list
all_param = torch.stack(all_param_list, 1) # (B, O, A)
del all_param_list
all_param_split_list = torch.split(all_param, self.splits, -1)
result = [t.view(batch_size, self.n_caps, *s)
for (t, s) in zip(all_param_split_list, self.output_shapes)]
del all_param
del all_param_split_list
# add up static and dynamic object part relationship
cpr_dynamic = result[0] # (B, O, V, P)
if not self.allow_deformations:
cpr_dynamic = torch.zeros_like(cpr_dynamic)
cpr_dynamic_reg_loss = l2_loss(cpr_dynamic) / batch_size
cpr = self._make_transform(cpr_dynamic + self.cpr_static) # (B, O, V, 3, 3)
del cpr_dynamic
# add bias to all remaining outputs
# (B, O, 1, P), (B, O, 1), (B, O, V), (B, O, V)
cvr, presence_logit_per_caps, presence_logit_per_vote, scale_per_vote = [
t + bias
for (t, bias) in zip(result[1:], self.caps_bias_list)
]
del result
# this is for hierarchical
# (B, O, 1, 3, 3)
if parent_transform is None:
cvr = self._make_transform(cvr)
else:
cvr = parent_transform
cvr_per_vote = cvr.repeat(1, 1, self.n_votes, 1, 1) # (B, O, V, 3, 3)
# PVR = OVR x OPR
vote = torch.matmul(cvr_per_vote, cpr) # (B, O, V, 3, 3)
del cvr_per_vote, cpr
if self.caps_dropout_rate > 0.0:
presence_logit_per_caps = presence_logit_per_caps \
+ math_ops.log_safe(caps_exist)
def add_noise(tensor):
"""Adds noise to tensors."""
if self.noise_type == 'uniform':
noise = (torch.rand_like(tensor) - 0.5) * self.noise_scale
elif self.noise_type == 'logistic':
pdf = LogisticNormal(0., self.noise_scale)
noise = pdf.sample(tensor.shape)
elif not self.noise_type:
noise = torch.tensor([0.0])
else:
raise ValueError(f'Invalid noise type: {self.noise_type}')
return tensor + noise.to(device)
presence_logit_per_caps = add_noise(presence_logit_per_caps) # (B, O, 1)
presence_logit_per_vote = add_noise(presence_logit_per_vote) # (B, O, V)
if parent_presence is not None:
presence_per_caps = parent_presence
else:
presence_per_caps = torch.sigmoid(presence_logit_per_caps)
vote_presence = presence_per_caps * torch.sigmoid(presence_logit_per_vote) # (B, O, V)
del presence_per_caps
# (B, O, V)
if self.learn_vote_scale:
# for numerical stability
scale_per_vote = F.softplus(scale_per_vote + .5) + 1e-2
else:
scale_per_vote = torch.ones_like(scale_per_vote, device=device)
return AttrDict(
vote=vote, # (B, O, V, 3, 3)
scale=scale_per_vote, # (B, O, V)
vote_presence=vote_presence, # (B, O, V)
presence_logit_per_caps=presence_logit_per_caps, # (B, O, 1)
presence_logit_per_vote=presence_logit_per_vote, # (B, O, V)
cpr_dynamic_reg_loss=cpr_dynamic_reg_loss,
)
def _make_transform(self, params):
return cv_ops.geometric_transform(params, self.similarity_transform,
nonlinear=True, as_matrix=True)
class CapsuleLikelihood:
"""Capsule voting mechanism."""
def __init__(self, vote, scale, vote_presence, dummy_vote):
super().__init__()
self.n_caps = vote.shape[1] # O
self.vote = vote # (B, O, M, P)
self.scale = scale # (B, O, M)
self.vote_presence = vote_presence # (B, O, M)
self.dummy_vote = dummy_vote # (1, 1, M, P)
def _get_pdf(self, votes, scales):
return Normal(votes, scales)
def __call__(self, x, presence=None): # (B, M, P), (B, M)
device = x.device
batch_size, n_input_points, dim_in = x.shape # B, M, P
# since scale is a per-caps scalar and we have one vote per capsule
vote_component_pdf = self._get_pdf(self.vote,
self.scale.unsqueeze(-1))
# expand input along caps dimensions
expanded_x = x.unsqueeze(1) # (B, 1, M, P)
vote_log_prob_per_dim = vote_component_pdf.log_prob(expanded_x) # (B, O, M, P)
vote_log_prob = vote_log_prob_per_dim.sum(-1) # (B, O, M)
del x, expanded_x, vote_log_prob_per_dim
# (B, 1, M)
dummy_vote_log_prob = torch.zeros(
batch_size, 1, n_input_points, device=device) + np.log(0.01)
# p(x_m | k, m)
vote_log_prob = torch.cat([vote_log_prob, dummy_vote_log_prob], 1) # (B, O+1, M)
del dummy_vote_log_prob
#
dummy_logit = torch.full((batch_size, 1, n_input_points),
fill_value=np.log(0.01), device=device)
mixing_logit = math_ops.log_safe(self.vote_presence) # (B, O, M)
mixing_logit = torch.cat([mixing_logit, dummy_logit], 1) # (B, O+1, M)
mixing_log_prob = mixing_logit - mixing_logit.logsumexp(1, keepdim=True) # (B, O+1, M)
# mask for votes which are better than dummy vote
vote_presence_binary = (mixing_logit[:, :-1] > mixing_logit[:, -1:]).float() # (B, O, M)
# (B, O + 1, M)
posterior_mixing_logits_per_point = mixing_logit + vote_log_prob
del vote_log_prob
# (B, M)
mixture_log_prob_per_point = posterior_mixing_logits_per_point.logsumexp(1)
if presence is not None:
mixture_log_prob_per_point = mixture_log_prob_per_point * presence.float()
# (B,)
mixture_log_prob_per_example = mixture_log_prob_per_point.sum(1)
del mixture_log_prob_per_point
# scalar
mixture_log_prob_per_batch = mixture_log_prob_per_example.mean()
del mixture_log_prob_per_example
# winner object index per part
winning_vote_idx = torch.argmax(
posterior_mixing_logits_per_point[:, :-1], 1) # (B, M)
batch_idx = torch.arange(batch_size, device=device).unsqueeze(1) # (B, 1)
batch_idx = batch_idx.repeat(1, n_input_points) # (B, M)
point_idx = torch.arange(n_input_points, device=device).unsqueeze(0) # (1, M)
point_idx = point_idx.repeat(batch_size, 1) # (B, M)
idx = torch.stack([batch_idx, winning_vote_idx, point_idx], -1)
del batch_idx
del point_idx
# (B, M, P)
winning_vote = self.vote[idx[:, :, 0], idx[:, :, 1], idx[:, :, 2]]
assert winning_vote.shape == (batch_size, n_input_points, dim_in)
# (B, M)
winning_presence = \
self.vote_presence[idx[:, :, 0], idx[:, :, 1], idx[:, :, 2]]
assert winning_presence.shape == (batch_size, n_input_points)
del idx
# is winner capsule or dummy
is_from_capsule = winning_vote_idx // n_input_points
# Soft winner. START
# (B, O+1, M)
posterior_mixing_prob = F.softmax(posterior_mixing_logits_per_point, 1)
del posterior_mixing_logits_per_point
dummy_vote = self.dummy_vote.repeat(batch_size, 1, 1, 1) # (B, 1, M, P)
dummy_presence = torch.zeros([batch_size, 1, n_input_points], device=device)
votes = torch.cat((self.vote, dummy_vote), 1) # (B, O+1, M, P)
vote_presence = torch.cat([self.vote_presence, dummy_presence], 1) # (B, O+1, M)
del dummy_vote
del dummy_presence
# (B, M, P)
soft_winner_vote = torch.sum(posterior_mixing_prob.unsqueeze(-1) * votes, 1)
assert soft_winner_vote.shape == (batch_size, n_input_points, dim_in)
# (B, M)
soft_winner_presence = torch.sum(posterior_mixing_prob * vote_presence, 1)
assert soft_winner_presence.shape == (batch_size, n_input_points)
# Soft winner. END
# (B, O, M)
posterior_mixing_prob = posterior_mixing_prob[:, :-1]
return AttrDict(
log_prob=mixture_log_prob_per_batch,
vote_presence_binary=vote_presence_binary,
winner=winning_vote,
winner_presence=winning_presence,
soft_winner=soft_winner_vote,
soft_winner_presence=soft_winner_presence,
posterior_mixing_prob=posterior_mixing_prob,
mixing_log_prob=mixing_log_prob,
mixing_logit=mixing_logit,
is_from_capsule=is_from_capsule,
)
class CapsuleObjectDecoder(nn.Module):
def __init__(self, capsule_layer: CapsuleLayer):
"""
Args:
capsule_layer: a capsule layer to predict object parameters
"""
super().__init__()
self.capsule_layer = capsule_layer
self.dummy_vote = nn.Parameter(
torch.zeros(1, 1, capsule_layer.n_votes, capsule_layer.n_transform_params),
requires_grad=True
)
@property
def n_obj_capsules(self):
return self.capsule_layer.n_caps
def forward(self,
obj_encoding: torch.Tensor,
part_pose: torch.Tensor,
part_presence: torch.Tensor = None):
"""
Args:
obj_encoding: Tensor of shape [B, O, D].
part_pose: Tensor of shape [B, M, P]
part_presence: Tensor of shape [B, M] or None; if it exists, it
indicates which input parts exist.
Returns:
A bunch of stuff.
"""
batch_size, n_caps = obj_encoding.shape[:2]
n_votes = part_pose.shape[1]
res = self.capsule_layer(obj_encoding)
# remove homogeneous coord row from transformation matrices
# and flatten last two dimensions
res.vote = res.vote[..., :-1, :].view(batch_size, n_caps, n_votes, -1)
# compute capsule presence by maximum part vote
res.caps_presence = res.vote_presence.max(-1)[0]
# compute likelihood of object decoding
likelihood = CapsuleLikelihood(
vote=res.vote,
scale=res.scale,
vote_presence=res.vote_presence,
dummy_vote=self.dummy_vote
)
ll_res = likelihood(part_pose, presence=part_presence)
res.update(ll_res)
del likelihood
return res
# prior sparsity loss
# l2(aggregated_prob - constant)
def capsule_l2_loss(caps_presence,
n_classes: int,
within_example_constant=None,
**unused_kwargs):
"""Computes l2 penalty on capsule activations."""
del unused_kwargs
batch_size, num_caps = caps_presence.shape # B, O
if within_example_constant is None:
within_example_constant = float(num_caps) / n_classes # K / C
within_example = torch.mean(
(caps_presence.sum(1) - within_example_constant) ** 2)
between_example_constant = float(batch_size) / n_classes # B / C
between_example = torch.mean(
(caps_presence.sum(0) - between_example_constant) ** 2)
return within_example, between_example
# posterior sparsity loss
def capsule_entropy_loss(caps_presence, k=1, **unused_kwargs):
"""Computes entropy in capsule activations."""
del unused_kwargs
# caps_presence (B, O)
within_prob = math_ops.normalize(caps_presence, 1) # (B, O)
within_example = math_ops.cross_entropy_safe(within_prob,
within_prob * k) # scalar
total_caps_prob = torch.sum(caps_presence, 0) # (O, )
between_prob = math_ops.normalize(total_caps_prob, 0) # (O, )
between_example = math_ops.cross_entropy_safe(between_prob,
between_prob * k) # scalar
# negate since we want to increase between example entropy
return within_example, -between_example
# kl(aggregated_prob||uniform)
def neg_capsule_kl(caps_presence, **unused_kwargs):
del unused_kwargs
n_caps = int(caps_presence.shape[-1])
return capsule_entropy_loss(caps_presence, k=n_caps)
def sparsity_loss(loss_type, *args, **kwargs):
"""Computes capsule sparsity loss according to the specified type."""
if loss_type == 'l2':
sparsity_func = capsule_l2_loss
elif loss_type == 'entropy':
sparsity_func = capsule_entropy_loss
elif loss_type == 'kl':
sparsity_func = neg_capsule_kl
else:
raise ValueError(f"Invalid sparsity loss: {loss_type}")
return sparsity_func(*args, **kwargs)
| [
"torch_scae.math_ops.normalize",
"torch.distributions.Bernoulli",
"torch_scae.math_ops.cross_entropy_safe",
"torch.argmax",
"torch_scae.math_ops.log_safe",
"torch.cat",
"torch.rand_like",
"torch.arange",
"torch.ones",
"monty.collections.AttrDict",
"torch_scae.nn_ext.MLP",
"torch.distributions.... | [((17341, 17377), 'torch_scae.math_ops.normalize', 'math_ops.normalize', (['caps_presence', '(1)'], {}), '(caps_presence, 1)\n', (17359, 17377), False, 'from torch_scae import cv_ops, math_ops\n'), ((17409, 17466), 'torch_scae.math_ops.cross_entropy_safe', 'math_ops.cross_entropy_safe', (['within_prob', '(within_prob * k)'], {}), '(within_prob, within_prob * k)\n', (17436, 17466), False, 'from torch_scae import cv_ops, math_ops\n'), ((17549, 17576), 'torch.sum', 'torch.sum', (['caps_presence', '(0)'], {}), '(caps_presence, 0)\n', (17558, 17576), False, 'import torch\n'), ((17605, 17643), 'torch_scae.math_ops.normalize', 'math_ops.normalize', (['total_caps_prob', '(0)'], {}), '(total_caps_prob, 0)\n', (17623, 17643), False, 'from torch_scae import cv_ops, math_ops\n'), ((17675, 17734), 'torch_scae.math_ops.cross_entropy_safe', 'math_ops.cross_entropy_safe', (['between_prob', '(between_prob * k)'], {}), '(between_prob, between_prob * k)\n', (17702, 17734), False, 'from torch_scae import cv_ops, math_ops\n'), ((5222, 5253), 'torch.stack', 'torch.stack', (['caps_param_list', '(1)'], {}), '(caps_param_list, 1)\n', (5233, 5253), False, 'import torch\n'), ((5630, 5673), 'torch.cat', 'torch.cat', (['[raw_caps_param, caps_exist]', '(-1)'], {}), '([raw_caps_param, caps_exist], -1)\n', (5639, 5673), False, 'import torch\n'), ((5980, 6010), 'torch.stack', 'torch.stack', (['all_param_list', '(1)'], {}), '(all_param_list, 1)\n', (5991, 6010), False, 'import torch\n'), ((6082, 6121), 'torch.split', 'torch.split', (['all_param', 'self.splits', '(-1)'], {}), '(all_param, self.splits, -1)\n', (6093, 6121), False, 'import torch\n'), ((7302, 7333), 'torch.matmul', 'torch.matmul', (['cvr_per_vote', 'cpr'], {}), '(cvr_per_vote, cpr)\n', (7314, 7333), False, 'import torch\n'), ((8861, 9085), 'monty.collections.AttrDict', 'AttrDict', ([], {'vote': 'vote', 'scale': 'scale_per_vote', 'vote_presence': 'vote_presence', 'presence_logit_per_caps': 'presence_logit_per_caps', 'presence_logit_per_vote': 'presence_logit_per_vote', 'cpr_dynamic_reg_loss': 'cpr_dynamic_reg_loss'}), '(vote=vote, scale=scale_per_vote, vote_presence=vote_presence,\n presence_logit_per_caps=presence_logit_per_caps,\n presence_logit_per_vote=presence_logit_per_vote, cpr_dynamic_reg_loss=\n cpr_dynamic_reg_loss)\n', (8869, 9085), False, 'from monty.collections import AttrDict\n'), ((9282, 9380), 'torch_scae.cv_ops.geometric_transform', 'cv_ops.geometric_transform', (['params', 'self.similarity_transform'], {'nonlinear': '(True)', 'as_matrix': '(True)'}), '(params, self.similarity_transform, nonlinear=\n True, as_matrix=True)\n', (9308, 9380), False, 'from torch_scae import cv_ops, math_ops\n'), ((9859, 9880), 'torch.distributions.Normal', 'Normal', (['votes', 'scales'], {}), '(votes, scales)\n', (9865, 9880), False, 'from torch.distributions import Bernoulli, LogisticNormal, Normal\n'), ((10724, 10774), 'torch.cat', 'torch.cat', (['[vote_log_prob, dummy_vote_log_prob]', '(1)'], {}), '([vote_log_prob, dummy_vote_log_prob], 1)\n', (10733, 10774), False, 'import torch\n'), ((10996, 11033), 'torch_scae.math_ops.log_safe', 'math_ops.log_safe', (['self.vote_presence'], {}), '(self.vote_presence)\n', (11013, 11033), False, 'from torch_scae import cv_ops, math_ops\n'), ((11070, 11111), 'torch.cat', 'torch.cat', (['[mixing_logit, dummy_logit]', '(1)'], {}), '([mixing_logit, dummy_logit], 1)\n', (11079, 11111), False, 'import torch\n'), ((12054, 12112), 'torch.argmax', 'torch.argmax', (['posterior_mixing_logits_per_point[:, :-1]', '(1)'], {}), '(posterior_mixing_logits_per_point[:, :-1], 1)\n', (12066, 12112), False, 'import torch\n'), ((12451, 12508), 'torch.stack', 'torch.stack', (['[batch_idx, winning_vote_idx, point_idx]', '(-1)'], {}), '([batch_idx, winning_vote_idx, point_idx], -1)\n', (12462, 12508), False, 'import torch\n'), ((13112, 13159), 'torch.nn.functional.softmax', 'F.softmax', (['posterior_mixing_logits_per_point', '(1)'], {}), '(posterior_mixing_logits_per_point, 1)\n', (13121, 13159), True, 'import torch.nn.functional as F\n'), ((13313, 13372), 'torch.zeros', 'torch.zeros', (['[batch_size, 1, n_input_points]'], {'device': 'device'}), '([batch_size, 1, n_input_points], device=device)\n', (13324, 13372), False, 'import torch\n'), ((13390, 13427), 'torch.cat', 'torch.cat', (['(self.vote, dummy_vote)', '(1)'], {}), '((self.vote, dummy_vote), 1)\n', (13399, 13427), False, 'import torch\n'), ((13470, 13520), 'torch.cat', 'torch.cat', (['[self.vote_presence, dummy_presence]', '(1)'], {}), '([self.vote_presence, dummy_presence], 1)\n', (13479, 13520), False, 'import torch\n'), ((13819, 13870), 'torch.sum', 'torch.sum', (['(posterior_mixing_prob * vote_presence)', '(1)'], {}), '(posterior_mixing_prob * vote_presence, 1)\n', (13828, 13870), False, 'import torch\n'), ((14071, 14448), 'monty.collections.AttrDict', 'AttrDict', ([], {'log_prob': 'mixture_log_prob_per_batch', 'vote_presence_binary': 'vote_presence_binary', 'winner': 'winning_vote', 'winner_presence': 'winning_presence', 'soft_winner': 'soft_winner_vote', 'soft_winner_presence': 'soft_winner_presence', 'posterior_mixing_prob': 'posterior_mixing_prob', 'mixing_log_prob': 'mixing_log_prob', 'mixing_logit': 'mixing_logit', 'is_from_capsule': 'is_from_capsule'}), '(log_prob=mixture_log_prob_per_batch, vote_presence_binary=\n vote_presence_binary, winner=winning_vote, winner_presence=\n winning_presence, soft_winner=soft_winner_vote, soft_winner_presence=\n soft_winner_presence, posterior_mixing_prob=posterior_mixing_prob,\n mixing_log_prob=mixing_log_prob, mixing_logit=mixing_logit,\n is_from_capsule=is_from_capsule)\n', (14079, 14448), False, 'from monty.collections import AttrDict\n'), ((3645, 3652), 'torch_scae.general_utils.prod', 'prod', (['i'], {}), '(i)\n', (3649, 3652), False, 'from torch_scae.general_utils import prod\n'), ((4372, 4440), 'torch.zeros', 'torch.zeros', (['[1, self.n_caps, self.n_votes, self.n_transform_params]'], {}), '([1, self.n_caps, self.n_votes, self.n_transform_params])\n', (4383, 4440), False, 'import torch\n'), ((5363, 5401), 'torch.ones', 'torch.ones', (['batch_size', 'self.n_caps', '(1)'], {}), '(batch_size, self.n_caps, 1)\n', (5373, 5401), False, 'import torch\n'), ((5447, 5486), 'torch.distributions.Bernoulli', 'Bernoulli', (['(1.0 - self.caps_dropout_rate)'], {}), '(1.0 - self.caps_dropout_rate)\n', (5456, 5486), False, 'from torch.distributions import Bernoulli, LogisticNormal, Normal\n'), ((6486, 6515), 'torch.zeros_like', 'torch.zeros_like', (['cpr_dynamic'], {}), '(cpr_dynamic)\n', (6502, 6515), False, 'import torch\n'), ((6547, 6567), 'torch_scae.math_ops.l2_loss', 'l2_loss', (['cpr_dynamic'], {}), '(cpr_dynamic)\n', (6554, 6567), False, 'from torch_scae.math_ops import l2_loss\n'), ((8428, 8466), 'torch.sigmoid', 'torch.sigmoid', (['presence_logit_per_caps'], {}), '(presence_logit_per_caps)\n', (8441, 8466), False, 'import torch\n'), ((8512, 8550), 'torch.sigmoid', 'torch.sigmoid', (['presence_logit_per_vote'], {}), '(presence_logit_per_vote)\n', (8525, 8550), False, 'import torch\n'), ((8798, 8844), 'torch.ones_like', 'torch.ones_like', (['scale_per_vote'], {'device': 'device'}), '(scale_per_vote, device=device)\n', (8813, 8844), False, 'import torch\n'), ((10589, 10646), 'torch.zeros', 'torch.zeros', (['batch_size', '(1)', 'n_input_points'], {'device': 'device'}), '(batch_size, 1, n_input_points, device=device)\n', (10600, 10646), False, 'import torch\n'), ((10662, 10674), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (10668, 10674), True, 'import numpy as np\n'), ((14882, 14956), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'capsule_layer.n_votes', 'capsule_layer.n_transform_params'], {}), '(1, 1, capsule_layer.n_votes, capsule_layer.n_transform_params)\n', (14893, 14956), False, 'import torch\n'), ((3253, 3276), 'torch_scae.nn_ext.MLP', 'nn_ext.MLP', ([], {'sizes': 'sizes'}), '(sizes=sizes)\n', (3263, 3276), False, 'from torch_scae import nn_ext\n'), ((3976, 4011), 'torch_scae.nn_ext.MLP', 'nn_ext.MLP', ([], {'sizes': 'sizes', 'bias': '(False)'}), '(sizes=sizes, bias=False)\n', (3986, 4011), False, 'from torch_scae import nn_ext\n'), ((7529, 7558), 'torch_scae.math_ops.log_safe', 'math_ops.log_safe', (['caps_exist'], {}), '(caps_exist)\n', (7546, 7558), False, 'from torch_scae import cv_ops, math_ops\n'), ((8716, 8748), 'torch.nn.functional.softplus', 'F.softplus', (['(scale_per_vote + 0.5)'], {}), '(scale_per_vote + 0.5)\n', (8726, 8748), True, 'import torch.nn.functional as F\n'), ((10943, 10955), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (10949, 10955), True, 'import numpy as np\n'), ((12157, 12196), 'torch.arange', 'torch.arange', (['batch_size'], {'device': 'device'}), '(batch_size, device=device)\n', (12169, 12196), False, 'import torch\n'), ((12307, 12350), 'torch.arange', 'torch.arange', (['n_input_points'], {'device': 'device'}), '(n_input_points, device=device)\n', (12319, 12350), False, 'import torch\n'), ((4138, 4173), 'torch.zeros', 'torch.zeros', (['(1)', 'self.n_caps', '*shape'], {}), '(1, self.n_caps, *shape)\n', (4149, 4173), False, 'import torch\n'), ((7822, 7859), 'torch.distributions.LogisticNormal', 'LogisticNormal', (['(0.0)', 'self.noise_scale'], {}), '(0.0, self.noise_scale)\n', (7836, 7859), False, 'from torch.distributions import Bernoulli, LogisticNormal, Normal\n'), ((7702, 7725), 'torch.rand_like', 'torch.rand_like', (['tensor'], {}), '(tensor)\n', (7717, 7725), False, 'import torch\n'), ((7970, 7989), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (7982, 7989), False, 'import torch\n')] |
# 0.7908 ± 0.0024
import argparse
import torch
from tqdm import tqdm
import torch.nn.functional as F
from torch_geometric.data import GraphSAINTRandomWalkSampler, NeighborSampler
from torch_geometric.nn import SAGEConv
from torch_geometric.utils import subgraph
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
import numpy as np
import sys
sys.path.insert(0,'../..')
from attacks import *
class SAGE(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(SAGE, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, edge_index, edge_weight=None):
for conv in self.convs[:-1]:
x = conv(x, edge_index, edge_weight)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, edge_index, edge_weight)
return torch.log_softmax(x, dim=-1)
def inference(self, x_all, subgraph_loader, device):
pbar = tqdm(total=x_all.size(0) * len(self.convs))
pbar.set_description('Evaluating')
for i, conv in enumerate(self.convs):
xs = []
for batch_size, n_id, adj in subgraph_loader:
edge_index, _, size = adj.to(device)
x = x_all[n_id].to(device)
x_target = x[:size[1]]
x = conv((x, x_target), edge_index)
if i != len(self.convs) - 1:
x = F.relu(x)
xs.append(x.cpu())
pbar.update(batch_size)
x_all = torch.cat(xs, dim=0)
pbar.close()
return x_all
def train(model, loader, optimizer, device):
model.train()
total_loss = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index)
y = data.y.squeeze(1)
loss = F.nll_loss(out[data.train_mask], y[data.train_mask])
loss.backward()
optimizer.step()
total_loss += loss.item()
return total_loss / len(loader)
def train_flag(model, loader, optimizer, device, args):
model.train()
total_loss = 0
for data in loader:
data = data.to(device)
y = data.y.squeeze(1)[data.train_mask]
forward = lambda perturb : model(data.x+perturb, data.edge_index)[data.train_mask]
model_forward = (model, forward)
loss, out = flag_biased(model_forward, data.x.shape, y, args, optimizer, device, F.nll_loss, data.train_mask)
total_loss += loss.item()
return total_loss / len(loader)
@torch.no_grad()
def test(model, data, evaluator, subgraph_loader, device):
model.eval()
out = model.inference(data.x, subgraph_loader, device)
y_true = data.y
y_pred = out.argmax(dim=-1, keepdim=True)
train_acc = evaluator.eval({
'y_true': y_true[data.train_mask],
'y_pred': y_pred[data.train_mask]
})['acc']
valid_acc = evaluator.eval({
'y_true': y_true[data.valid_mask],
'y_pred': y_pred[data.valid_mask]
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[data.test_mask],
'y_pred': y_pred[data.test_mask]
})['acc']
return train_acc, valid_acc, test_acc
def to_inductive(data):
mask = data.train_mask
data.x = data.x[mask]
data.y = data.y[mask]
data.train_mask = data.train_mask[mask]
data.test_mask = None
data.edge_index, _ = subgraph(mask, data.edge_index, None,
relabel_nodes=True, num_nodes=data.num_nodes)
data.num_nodes = mask.sum().item()
return data
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (GraphSAINT)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--inductive', action='store_true')
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--batch_size', type=int, default=20000)
parser.add_argument('--walk_length', type=int, default=3)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--num_steps', type=int, default=30)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--runs', type=int, default=10)
parser.add_argument('--step-size', type=float, default=8e-3)
parser.add_argument('-m', type=int, default=3)
parser.add_argument('--test-freq', type=int, default=2)
parser.add_argument('--attack', type=str, default='flag')
parser.add_argument('--amp', type=float, default=2)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products')
split_idx = dataset.get_idx_split()
data = dataset[0]
# Convert split indices to boolean masks and add them to `data`.
for key, idx in split_idx.items():
mask = torch.zeros(data.num_nodes, dtype=torch.bool)
mask[idx] = True
data[f'{key}_mask'] = mask
# We omit normalization factors here since those are only defined for the
# inductive learning setup.
sampler_data = data
if args.inductive:
sampler_data = to_inductive(data)
loader = GraphSAINTRandomWalkSampler(sampler_data,
batch_size=args.batch_size,
walk_length=args.walk_length,
num_steps=args.num_steps,
sample_coverage=0,
save_dir=dataset.processed_dir)
model = SAGE(data.x.size(-1), args.hidden_channels, dataset.num_classes,
args.num_layers, args.dropout).to(device)
subgraph_loader = NeighborSampler(data.edge_index, sizes=[-1],
batch_size=4096, shuffle=False,
num_workers=12)
evaluator = Evaluator(name='ogbn-products')
vals, tests = [], []
for run in range(args.runs):
best_val, final_test = 0, 0
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
loss = train_flag(model, loader, optimizer, device, args)
if epoch > args.epochs / 2 and epoch % args.test_freq == 0 or epoch == args.epochs:
result = test(model, data, evaluator, subgraph_loader, device)
train, val, tst = result
if val > best_val:
best_val = val
final_test = tst
print(f'Run{run} val:{best_val}, test:{final_test}')
vals.append(best_val)
tests.append(final_test)
print('')
print(f"Average val accuracy: {np.mean(vals)} ± {np.std(vals)}")
print(f"Average test accuracy: {np.mean(tests)} ± {np.std(tests)}")
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"torch.nn.functional.dropout",
"ogb.nodeproppred.PygNodePropPredDataset",
"torch_geometric.utils.subgraph",
"torch.cat",
"numpy.mean",
"torch.device",
"torch.no_grad",
"numpy.std",
"torch_geometric.data.NeighborSampler",
"torch.nn.functional.nll_loss",
"torch.nn.func... | [((360, 387), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../.."""'], {}), "(0, '../..')\n", (375, 387), False, 'import sys\n'), ((3011, 3026), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3024, 3026), False, 'import torch\n'), ((3867, 3955), 'torch_geometric.utils.subgraph', 'subgraph', (['mask', 'data.edge_index', 'None'], {'relabel_nodes': '(True)', 'num_nodes': 'data.num_nodes'}), '(mask, data.edge_index, None, relabel_nodes=True, num_nodes=data.\n num_nodes)\n', (3875, 3955), False, 'from torch_geometric.utils import subgraph\n'), ((4067, 4132), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""OGBN-Products (GraphSAINT)"""'}), "(description='OGBN-Products (GraphSAINT)')\n", (4090, 4132), False, 'import argparse\n'), ((5216, 5236), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (5228, 5236), False, 'import torch\n'), ((5252, 5296), 'ogb.nodeproppred.PygNodePropPredDataset', 'PygNodePropPredDataset', ([], {'name': '"""ogbn-products"""'}), "(name='ogbn-products')\n", (5274, 5296), False, 'from ogb.nodeproppred import PygNodePropPredDataset, Evaluator\n'), ((5803, 5988), 'torch_geometric.data.GraphSAINTRandomWalkSampler', 'GraphSAINTRandomWalkSampler', (['sampler_data'], {'batch_size': 'args.batch_size', 'walk_length': 'args.walk_length', 'num_steps': 'args.num_steps', 'sample_coverage': '(0)', 'save_dir': 'dataset.processed_dir'}), '(sampler_data, batch_size=args.batch_size,\n walk_length=args.walk_length, num_steps=args.num_steps, sample_coverage\n =0, save_dir=dataset.processed_dir)\n', (5830, 5988), False, 'from torch_geometric.data import GraphSAINTRandomWalkSampler, NeighborSampler\n'), ((6345, 6441), 'torch_geometric.data.NeighborSampler', 'NeighborSampler', (['data.edge_index'], {'sizes': '[-1]', 'batch_size': '(4096)', 'shuffle': '(False)', 'num_workers': '(12)'}), '(data.edge_index, sizes=[-1], batch_size=4096, shuffle=False,\n num_workers=12)\n', (6360, 6441), False, 'from torch_geometric.data import GraphSAINTRandomWalkSampler, NeighborSampler\n'), ((6531, 6562), 'ogb.nodeproppred.Evaluator', 'Evaluator', ([], {'name': '"""ogbn-products"""'}), "(name='ogbn-products')\n", (6540, 6562), False, 'from ogb.nodeproppred import PygNodePropPredDataset, Evaluator\n'), ((604, 625), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (623, 625), False, 'import torch\n'), ((1314, 1342), 'torch.log_softmax', 'torch.log_softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (1331, 1342), False, 'import torch\n'), ((2316, 2368), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['out[data.train_mask]', 'y[data.train_mask]'], {}), '(out[data.train_mask], y[data.train_mask])\n', (2326, 2368), True, 'import torch.nn.functional as F\n'), ((5166, 5191), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5189, 5191), False, 'import torch\n'), ((5483, 5528), 'torch.zeros', 'torch.zeros', (['data.num_nodes'], {'dtype': 'torch.bool'}), '(data.num_nodes, dtype=torch.bool)\n', (5494, 5528), False, 'import torch\n'), ((652, 690), 'torch_geometric.nn.SAGEConv', 'SAGEConv', (['in_channels', 'hidden_channels'], {}), '(in_channels, hidden_channels)\n', (660, 690), False, 'from torch_geometric.nn import SAGEConv\n'), ((832, 871), 'torch_geometric.nn.SAGEConv', 'SAGEConv', (['hidden_channels', 'out_channels'], {}), '(hidden_channels, out_channels)\n', (840, 871), False, 'from torch_geometric.nn import SAGEConv\n'), ((1165, 1174), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1171, 1174), True, 'import torch.nn.functional as F\n'), ((1191, 1243), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (1200, 1243), True, 'import torch.nn.functional as F\n'), ((1991, 2011), 'torch.cat', 'torch.cat', (['xs'], {'dim': '(0)'}), '(xs, dim=0)\n', (2000, 2011), False, 'import torch\n'), ((762, 804), 'torch_geometric.nn.SAGEConv', 'SAGEConv', (['hidden_channels', 'hidden_channels'], {}), '(hidden_channels, hidden_channels)\n', (770, 804), False, 'from torch_geometric.nn import SAGEConv\n'), ((7378, 7391), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (7385, 7391), True, 'import numpy as np\n'), ((7397, 7409), 'numpy.std', 'np.std', (['vals'], {}), '(vals)\n', (7403, 7409), True, 'import numpy as np\n'), ((7448, 7462), 'numpy.mean', 'np.mean', (['tests'], {}), '(tests)\n', (7455, 7462), True, 'import numpy as np\n'), ((7468, 7481), 'numpy.std', 'np.std', (['tests'], {}), '(tests)\n', (7474, 7481), True, 'import numpy as np\n'), ((1884, 1893), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1890, 1893), True, 'import torch.nn.functional as F\n')] |
"""Unit tests for soundings.py."""
import copy
import unittest
import numpy
import pandas
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import nwp_model_utils
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import temperature_conversions
from gewittergefahr.gg_utils import moisture_conversions
TOLERANCE = 1e-6
TOLERANCE_FOR_CONVERTED_VALUES = 1e-3
# The following constants are used to test _get_nwp_fields_for_sounding.
MINIMUM_PRESSURE_MB = 950.
MODEL_NAME = nwp_model_utils.RAP_MODEL_NAME
SURFACE_HEIGHT_NAME, SURFACE_HEIGHT_NAME_GRIB1 = (
nwp_model_utils.get_lowest_height_name(MODEL_NAME)
)
SURFACE_TEMP_NAME, SURFACE_TEMP_NAME_GRIB1 = (
nwp_model_utils.get_lowest_temperature_name(MODEL_NAME)
)
SURFACE_HUMIDITY_NAME, SURFACE_HUMIDITY_NAME_GRIB1 = (
nwp_model_utils.get_lowest_humidity_name(MODEL_NAME)
)
SURFACE_U_WIND_NAME, SURFACE_U_WIND_NAME_GRIB1 = (
nwp_model_utils.get_lowest_u_wind_name(MODEL_NAME)
)
SURFACE_V_WIND_NAME, SURFACE_V_WIND_NAME_GRIB1 = (
nwp_model_utils.get_lowest_v_wind_name(MODEL_NAME)
)
SURFACE_PRESSURE_NAME, SURFACE_PRESSURE_NAME_GRIB1 = (
nwp_model_utils.get_lowest_pressure_name(MODEL_NAME)
)
FIELD_NAMES_WITH_SURFACE = [
'geopotential_height_metres_950mb', 'geopotential_height_metres_975mb',
'geopotential_height_metres_1000mb', SURFACE_HEIGHT_NAME,
'temperature_kelvins_950mb', 'temperature_kelvins_975mb',
'temperature_kelvins_1000mb', SURFACE_TEMP_NAME,
'relative_humidity_percent_950mb', 'relative_humidity_percent_975mb',
'relative_humidity_percent_1000mb', SURFACE_HUMIDITY_NAME,
'u_wind_m_s01_950mb', 'u_wind_m_s01_975mb', 'u_wind_m_s01_1000mb',
SURFACE_U_WIND_NAME,
'v_wind_m_s01_950mb', 'v_wind_m_s01_975mb', 'v_wind_m_s01_1000mb',
SURFACE_V_WIND_NAME,
SURFACE_PRESSURE_NAME
]
FIELD_NAMES_NO_SURFACE = [
'geopotential_height_metres_950mb', 'geopotential_height_metres_975mb',
'geopotential_height_metres_1000mb',
'temperature_kelvins_950mb', 'temperature_kelvins_975mb',
'temperature_kelvins_1000mb',
'relative_humidity_percent_950mb', 'relative_humidity_percent_975mb',
'relative_humidity_percent_1000mb',
'u_wind_m_s01_950mb', 'u_wind_m_s01_975mb', 'u_wind_m_s01_1000mb',
'v_wind_m_s01_950mb', 'v_wind_m_s01_975mb', 'v_wind_m_s01_1000mb'
]
FIELD_NAMES_WITH_SURFACE_GRIB1 = [
'HGT:950 mb', 'HGT:975 mb', 'HGT:1000 mb', SURFACE_HEIGHT_NAME_GRIB1,
'TMP:950 mb', 'TMP:975 mb', 'TMP:1000 mb', SURFACE_TEMP_NAME_GRIB1,
'RH:950 mb', 'RH:975 mb', 'RH:1000 mb', SURFACE_HUMIDITY_NAME_GRIB1,
'UGRD:950 mb', 'UGRD:975 mb', 'UGRD:1000 mb', SURFACE_U_WIND_NAME_GRIB1,
'VGRD:950 mb', 'VGRD:975 mb', 'VGRD:1000 mb', SURFACE_V_WIND_NAME_GRIB1,
SURFACE_PRESSURE_NAME_GRIB1
]
FIELD_NAMES_NO_SURFACE_GRIB1 = [
'HGT:950 mb', 'HGT:975 mb', 'HGT:1000 mb',
'TMP:950 mb', 'TMP:975 mb', 'TMP:1000 mb',
'RH:950 mb', 'RH:975 mb', 'RH:1000 mb',
'UGRD:950 mb', 'UGRD:975 mb', 'UGRD:1000 mb',
'VGRD:950 mb', 'VGRD:975 mb', 'VGRD:1000 mb'
]
HEIGHT_NAMES_NO_SURFACE = [
'geopotential_height_metres_950mb', 'geopotential_height_metres_975mb',
'geopotential_height_metres_1000mb'
]
TEMPERATURE_NAMES_NO_SURFACE = [
'temperature_kelvins_950mb', 'temperature_kelvins_975mb',
'temperature_kelvins_1000mb'
]
HUMIDITY_NAMES_NO_SURFACE = [
'relative_humidity_percent_950mb', 'relative_humidity_percent_975mb',
'relative_humidity_percent_1000mb'
]
U_WIND_NAMES_NO_SURFACE = [
'u_wind_m_s01_950mb', 'u_wind_m_s01_975mb', 'u_wind_m_s01_1000mb'
]
V_WIND_NAMES_NO_SURFACE = [
'v_wind_m_s01_950mb', 'v_wind_m_s01_975mb', 'v_wind_m_s01_1000mb'
]
PRESSURE_LEVELS_NO_SURFACE_MB = numpy.array([950, 975, 1000], dtype=float)
THIS_DICT = {
soundings.PRESSURE_LEVEL_KEY: numpy.concatenate((
PRESSURE_LEVELS_NO_SURFACE_MB, numpy.array([numpy.nan])
)),
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS:
HEIGHT_NAMES_NO_SURFACE + [SURFACE_HEIGHT_NAME],
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS:
TEMPERATURE_NAMES_NO_SURFACE + [SURFACE_TEMP_NAME],
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS:
HUMIDITY_NAMES_NO_SURFACE + [SURFACE_HUMIDITY_NAME],
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS:
U_WIND_NAMES_NO_SURFACE + [SURFACE_U_WIND_NAME],
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS:
V_WIND_NAMES_NO_SURFACE + [SURFACE_V_WIND_NAME]
}
FIELD_NAME_TABLE_WITH_SURFACE = pandas.DataFrame.from_dict(THIS_DICT)
THIS_DICT = {
soundings.PRESSURE_LEVEL_KEY: PRESSURE_LEVELS_NO_SURFACE_MB,
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS: HEIGHT_NAMES_NO_SURFACE,
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS:
TEMPERATURE_NAMES_NO_SURFACE,
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS: HUMIDITY_NAMES_NO_SURFACE,
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS: U_WIND_NAMES_NO_SURFACE,
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS: V_WIND_NAMES_NO_SURFACE
}
FIELD_NAME_TABLE_NO_SURFACE = pandas.DataFrame.from_dict(THIS_DICT)
# The following constants are used to test _create_target_points_for_interp.
THESE_FULL_ID_STRINGS = ['A', 'B', 'C', 'A', 'B', 'C']
THESE_TIMES_UNIX_SEC = numpy.array([0, 0, 0, 1, 1, 1], dtype=int)
THESE_LATITUDES_DEG = numpy.array([50, 55, 60, 51, 56, 61], dtype=float)
THESE_LONGITUDES_DEG = numpy.array([250, 260, 270, 251, 261, 271], dtype=float)
THESE_EAST_VELOCITIES_M_S01 = numpy.full(6, 10000, dtype=float)
THESE_NORTH_VELOCITIES_M_S01 = numpy.full(6, 10000, dtype=float)
THIS_DICT = {
tracking_utils.FULL_ID_COLUMN: THESE_FULL_ID_STRINGS,
tracking_utils.VALID_TIME_COLUMN: THESE_TIMES_UNIX_SEC,
tracking_utils.CENTROID_LATITUDE_COLUMN: THESE_LATITUDES_DEG,
tracking_utils.CENTROID_LONGITUDE_COLUMN: THESE_LONGITUDES_DEG,
tracking_utils.EAST_VELOCITY_COLUMN: THESE_EAST_VELOCITIES_M_S01,
tracking_utils.NORTH_VELOCITY_COLUMN: THESE_NORTH_VELOCITIES_M_S01
}
DUMMY_STORM_OBJECT_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
UNIQUE_LEAD_TIMES_SECONDS = numpy.array([0, 1], dtype=int)
THESE_FULL_ID_STRINGS = [
'A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C'
]
THESE_INIT_TIMES_UNIX_SEC = numpy.array(
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=int
)
THESE_LATITUDES_DEG = numpy.array([
50, 55, 60, 51, 56, 61, 50.08981978, 55.08972691, 60.08963404, 51.08980123,
56.08970834, 61.08961544
], dtype=float)
THESE_LONGITUDES_DEG = numpy.array([
250, 260, 270, 251, 261, 271, 250.13973873, 260.15661394, 270.17969769,
251.14273048, 261.16064721, 271.18533962
], dtype=float)
THESE_VALID_TIMES_UNIX_SEC = numpy.array(
[0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2], dtype=int
)
THESE_LEAD_TIMES_SECONDS = numpy.array(
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=int
)
THESE_EAST_VELOCITIES_M_S01 = numpy.full(12, 10000, dtype=float)
THESE_NORTH_VELOCITIES_M_S01 = numpy.full(12, 10000, dtype=float)
THIS_DICT = {
tracking_utils.FULL_ID_COLUMN: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIME_COLUMN: THESE_INIT_TIMES_UNIX_SEC,
tracking_utils.CENTROID_LATITUDE_COLUMN: THESE_LATITUDES_DEG,
tracking_utils.CENTROID_LONGITUDE_COLUMN: THESE_LONGITUDES_DEG,
soundings.FORECAST_TIME_COLUMN: THESE_VALID_TIMES_UNIX_SEC,
soundings.LEAD_TIME_KEY: THESE_LEAD_TIMES_SECONDS,
tracking_utils.EAST_VELOCITY_COLUMN: THESE_EAST_VELOCITIES_M_S01,
tracking_utils.NORTH_VELOCITY_COLUMN: THESE_NORTH_VELOCITIES_M_S01
}
DUMMY_TARGET_POINT_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
# The following constants are used to test _convert_interp_table_to_soundings.
THIS_MATRIX = numpy.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
[2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
], dtype=float)
INTERP_TABLE_NO_SURFACE = pandas.DataFrame(THIS_MATRIX)
THESE_FULL_ID_STRINGS = ['a', 'b']
THESE_INIT_TIMES_UNIX_SEC = numpy.array([10, 10], dtype=int)
THESE_LEAD_TIMES_SECONDS = numpy.array([5, 5], dtype=int)
THIS_DICT = {
tracking_utils.FULL_ID_COLUMN: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIME_COLUMN: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIME_KEY: THESE_LEAD_TIMES_SECONDS
}
TARGET_POINT_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
for k in range(len(FIELD_NAMES_NO_SURFACE)):
INTERP_TABLE_NO_SURFACE.rename(
columns={k: FIELD_NAMES_NO_SURFACE[k]}, inplace=True
)
THIS_FIRST_MATRIX = numpy.array([
[0, 6, 3, 9, 12],
[1, 7, 4, 10, 13],
[2, 8, 5, 11, 14]
], dtype=int)
THIS_SECOND_MATRIX = numpy.array([
[2, 14, 8, 20, 26],
[4, 16, 10, 22, 28],
[6, 18, 12, 24, 30]
], dtype=int)
THIS_SOUNDING_MATRIX = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0
)
THESE_PRESSURE_LEVELS_MB = numpy.array([950, 975, 1000])
THESE_FIELD_NAMES = [
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS
]
SOUNDING_DICT_P_COORDS_NO_SURFACE = {
soundings.FULL_IDS_KEY: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIMES_KEY: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIMES_KEY: THESE_LEAD_TIMES_SECONDS,
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.SURFACE_PRESSURES_KEY: None,
soundings.PRESSURE_LEVELS_WITH_SFC_KEY: THESE_PRESSURE_LEVELS_MB,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES
}
THIS_MATRIX = numpy.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2000],
[2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40,
4200]
], dtype=float)
INTERP_TABLE_WITH_SURFACE = pandas.DataFrame(THIS_MATRIX)
for k in range(len(FIELD_NAMES_WITH_SURFACE)):
INTERP_TABLE_WITH_SURFACE.rename(
columns={k: FIELD_NAMES_WITH_SURFACE[k]}, inplace=True
)
THIS_FIRST_MATRIX = numpy.array([
[0, 8, 4, 12, 16],
[1, 9, 5, 13, 17],
[2, 10, 6, 14, 18],
[3, 11, 7, 15, 19]
], dtype=int)
THIS_SECOND_MATRIX = numpy.array([
[2, 18, 10, 26, 34],
[4, 20, 12, 28, 36],
[6, 22, 14, 30, 38],
[8, 24, 16, 32, 40]
], dtype=int)
THIS_SOUNDING_MATRIX = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0
)
THESE_PRESSURE_LEVELS_MB = numpy.array([950, 975, 1000, numpy.nan])
THESE_SURFACE_PRESSURES_MB = numpy.array([20, 42])
SOUNDING_DICT_P_COORDS_WITH_SURFACE = {
soundings.FULL_IDS_KEY: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIMES_KEY: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIMES_KEY: THESE_LEAD_TIMES_SECONDS,
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.SURFACE_PRESSURES_KEY: THESE_SURFACE_PRESSURES_MB,
soundings.PRESSURE_LEVELS_WITH_SFC_KEY: THESE_PRESSURE_LEVELS_MB,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES
}
# The following constants are used to test _get_pressures.
PRESSURE_MATRIX_NO_SURFACE_PASCALS = numpy.array([
[95000, 97500, 100000],
[95000, 97500, 100000]
])
PRESSURE_MATRIX_WITH_SURFACE_PASCALS = numpy.array([
[95000, 97500, 100000, 2000],
[95000, 97500, 100000, 4200]
])
# The following constants are used to test _relative_to_specific_humidity.
THESE_HEIGHTS_METRES = numpy.array([400, 300, 200, 100, 0])
THESE_TEMPERATURES_KELVINS = numpy.array([
273.15, 278.15, 283.15, 288.15, 298.15
])
THESE_U_WINDS_M_S01 = numpy.array([-10, -5, 0, 5, 10])
THESE_V_WINDS_M_S01 = numpy.array([20, 30, -40, 15, 7.2])
THESE_SPEC_HUMIDITIES_KG_KG01 = 0.001 * numpy.array([0.1, 1., 5., 10., 20.])
THESE_PRESSURES_PASCALS = numpy.array([99000, 100000, 101000, 102000, 103000])
PRESSURE_MATRIX_PASCALS = numpy.reshape(THESE_PRESSURES_PASCALS, (1, 5))
THESE_DEWPOINTS_KELVINS = moisture_conversions.specific_humidity_to_dewpoint(
specific_humidities_kg_kg01=THESE_SPEC_HUMIDITIES_KG_KG01,
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS
)
DEWPOINT_MATRIX_KELVINS = numpy.reshape(THESE_DEWPOINTS_KELVINS, (1, 5))
THESE_RELATIVE_HUMIDITIES = moisture_conversions.dewpoint_to_relative_humidity(
dewpoints_kelvins=THESE_DEWPOINTS_KELVINS,
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS
)
THESE_RELATIVE_HUMIDITIES_PERCENT = 100 * THESE_RELATIVE_HUMIDITIES
THESE_FIELD_NAMES = [
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS
]
THIS_SOUNDING_MATRIX = numpy.full((1, 5, 5), numpy.nan)
THIS_SOUNDING_MATRIX[0, :, 0] = THESE_HEIGHTS_METRES
THIS_SOUNDING_MATRIX[0, :, 1] = THESE_RELATIVE_HUMIDITIES_PERCENT
THIS_SOUNDING_MATRIX[0, :, 2] = THESE_TEMPERATURES_KELVINS
THIS_SOUNDING_MATRIX[0, :, 3] = THESE_U_WINDS_M_S01
THIS_SOUNDING_MATRIX[0, :, 4] = THESE_V_WINDS_M_S01
SOUNDING_DICT_P_COORDS_NO_SPFH = {
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES
}
THIS_NEW_MATRIX = numpy.reshape(THESE_SPEC_HUMIDITIES_KG_KG01, (1, 5, 1))
THIS_SOUNDING_MATRIX = numpy.concatenate(
(THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX), axis=-1
)
THIS_SOUNDING_MATRIX[..., 1] = THIS_SOUNDING_MATRIX[..., 1] / 100
THESE_FIELD_NAMES = [
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS,
soundings.RELATIVE_HUMIDITY_NAME,
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.SPFH_COLUMN_FOR_SOUNDINGS
]
SOUNDING_DICT_P_COORDS_WITH_SPFH = {
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES
}
# The following constants are used to test _specific_to_relative_humidity.
THESE_FIELD_NAMES = [
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.SPFH_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS
]
THIS_SOUNDING_MATRIX = numpy.full((1, 5, 5), numpy.nan)
THIS_SOUNDING_MATRIX[0, :, 0] = THESE_HEIGHTS_METRES
THIS_SOUNDING_MATRIX[0, :, 1] = THESE_SPEC_HUMIDITIES_KG_KG01
THIS_SOUNDING_MATRIX[0, :, 2] = THESE_TEMPERATURES_KELVINS
THIS_SOUNDING_MATRIX[0, :, 3] = THESE_U_WINDS_M_S01
THIS_SOUNDING_MATRIX[0, :, 4] = THESE_V_WINDS_M_S01
SOUNDING_DICT_P_COORDS_NO_RH = {
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES
}
THIS_NEW_MATRIX = numpy.reshape(THESE_RELATIVE_HUMIDITIES, (1, 5, 1))
THIS_SOUNDING_MATRIX = numpy.concatenate(
(THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX), axis=-1
)
NEW_FIELD_NAMES = THESE_FIELD_NAMES + [soundings.RELATIVE_HUMIDITY_NAME]
SOUNDING_DICT_P_COORDS_WITH_RH = {
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.FIELD_NAMES_KEY: NEW_FIELD_NAMES
}
SOUNDING_DICT_P_COORDS_NO_THETA_V = {
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.FIELD_NAMES_KEY: NEW_FIELD_NAMES
}
# The following constants are used to test _get_virtual_potential_temperatures.
THESE_VAPOUR_PRESSURES_PASCALS = (
moisture_conversions.dewpoint_to_vapour_pressure(
dewpoints_kelvins=THESE_DEWPOINTS_KELVINS,
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS
)
)
THESE_VIRTUAL_TEMPS_KELVINS = (
moisture_conversions.temperature_to_virtual_temperature(
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS,
vapour_pressures_pascals=THESE_VAPOUR_PRESSURES_PASCALS
)
)
THESE_THETA_V_KELVINS = (
temperature_conversions.temperatures_to_potential_temperatures(
temperatures_kelvins=THESE_VIRTUAL_TEMPS_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS)
)
THIS_NEW_MATRIX = numpy.reshape(THESE_THETA_V_KELVINS, (1, 5, 1))
THIS_SOUNDING_MATRIX = numpy.concatenate(
(THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX), axis=-1
)
NEW_FIELD_NAMES = THESE_FIELD_NAMES + [
soundings.RELATIVE_HUMIDITY_NAME,
soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME
]
SOUNDING_DICT_P_COORDS_WITH_THETA_V = {
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.FIELD_NAMES_KEY: NEW_FIELD_NAMES
}
# The following constants are used to test _fill_nans_in_soundings.
THESE_PRESSURE_LEVELS_MB = numpy.array([
500, 600, 700, 850, 925, 1000, numpy.nan
])
THESE_SURFACE_PRESSURES_MB = numpy.array([965, 1000])
PRESSURE_MATRIX_FOR_NAN_FILL_PASCALS = numpy.array([
[50000, 60000, 70000, 85000, 92500, 100000, 96500],
[50000, 60000, 70000, 85000, 92500, 100000, 100000]
])
THESE_HEIGHTS_METRES = numpy.array([5700, numpy.nan, 3090, 1500, 770, 0, 385])
THESE_U_WINDS_M_S01 = numpy.array([numpy.nan, 30, 20, 10, 0, -10, numpy.nan])
THESE_V_WINDS_M_S01 = numpy.array([30, 25, 20, numpy.nan, 10, 5, 0])
THESE_TEMPERATURES_KELVINS = numpy.array([
numpy.nan, numpy.nan, 275, numpy.nan, 290, 300, 301
])
THESE_SPEC_HUMIDITIES_KG_KG01 = numpy.array([
0.001, 0.002, numpy.nan, numpy.nan, 0.005, numpy.nan, numpy.nan
])
THESE_FIELD_NAMES = [
soundings.GEOPOTENTIAL_HEIGHT_NAME, soundings.U_WIND_NAME,
soundings.V_WIND_NAME, soundings.TEMPERATURE_NAME,
soundings.SPECIFIC_HUMIDITY_NAME
]
THIS_FIRST_MATRIX = numpy.transpose(numpy.vstack(
(THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,
THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01)
))
THESE_HEIGHTS_METRES = numpy.array([
numpy.nan, numpy.nan, numpy.nan, 1600, numpy.nan, numpy.nan, numpy.nan
])
THIS_SECOND_MATRIX = numpy.transpose(numpy.vstack(
(THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,
THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01)
))
THIS_SOUNDING_MATRIX = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0
)
THESE_FULL_ID_STRINGS = ['c', 'd']
THESE_INIT_TIMES_UNIX_SEC = numpy.array([6, 6], dtype=int)
THESE_LEAD_TIMES_SECONDS = numpy.array([1, 1], dtype=int)
SOUNDING_DICT_P_COORDS_WITH_NANS = {
soundings.FULL_IDS_KEY: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIMES_KEY: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIMES_KEY: THESE_LEAD_TIMES_SECONDS,
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.PRESSURE_LEVELS_WITH_SFC_KEY: THESE_PRESSURE_LEVELS_MB,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES,
soundings.SURFACE_PRESSURES_KEY: THESE_SURFACE_PRESSURES_MB
}
THESE_HEIGHTS_METRES = numpy.array([
5700, 4285.73988745, 3090, 1500, 770, 0, 385
])
THESE_U_WINDS_M_S01 = numpy.array([41.82748964, 30, 20, 10, 0, -10, -5])
THESE_V_WINDS_M_S01 = numpy.array([30, 25, 20, 13.14655172, 10, 5, 0])
THESE_TEMPERATURES_KELVINS = numpy.array([
258.125, 267.26892314, 275, 285.28017241, 290, 300, 301
])
THESE_SPEC_HUMIDITIES_KG_KG01 = numpy.array([
0.001, 0.002, 0.00302033, 0.00437709, 0.005, 0.00565705, 0.00532852
])
THIS_SOUNDING_MATRIX = numpy.transpose(numpy.vstack(
(THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,
THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01)
))
THIS_SOUNDING_MATRIX = numpy.expand_dims(THIS_SOUNDING_MATRIX, 0)
THESE_FULL_ID_STRINGS = ['c']
THESE_INIT_TIMES_UNIX_SEC = numpy.array([6], dtype=int)
THESE_LEAD_TIMES_SECONDS = numpy.array([1], dtype=int)
THESE_SURFACE_PRESSURES_MB = numpy.array([965])
SOUNDING_DICT_P_COORDS_NO_NANS = {
soundings.FULL_IDS_KEY: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIMES_KEY: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIMES_KEY: THESE_LEAD_TIMES_SECONDS,
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.PRESSURE_LEVELS_WITH_SFC_KEY: THESE_PRESSURE_LEVELS_MB,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES,
soundings.SURFACE_PRESSURES_KEY: THESE_SURFACE_PRESSURES_MB
}
# The following constants are used to test _pressure_to_height_coords.
THESE_STORM_ELEVATIONS_M_ASL = numpy.array([385])
SOUNDING_DICT_PRESSURE_COORDS = copy.deepcopy(SOUNDING_DICT_P_COORDS_NO_NANS)
SOUNDING_DICT_PRESSURE_COORDS.update({
soundings.STORM_ELEVATIONS_KEY: THESE_STORM_ELEVATIONS_M_ASL
})
HEIGHT_LEVELS_M_AGL = numpy.array([0, 385, 1115, 1910, 2705])
THESE_PRESSURES_PASCALS = numpy.array([96500, 92500, 85000, 77136.2431, 70000])
THESE_U_WINDS_M_S01 = numpy.array([-5, 0, 10, 15.2425046, 20])
THESE_V_WINDS_M_S01 = numpy.array([0, 10, 13.14655172, 16.7394751, 20])
THESE_TEMPERATURES_KELVINS = numpy.array([
301, 290, 285.28017241, 279.890787, 275
])
THESE_SPEC_HUMIDITIES_KG_KG01 = numpy.array([
0.00532852, 0.005, 0.00437709, 0.00366580795, 0.00302033
])
THESE_DEWPOINTS_KELVINS = (
moisture_conversions.specific_humidity_to_dewpoint(
specific_humidities_kg_kg01=THESE_SPEC_HUMIDITIES_KG_KG01,
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS
)
)
THESE_RELATIVE_HUMIDITIES = (
moisture_conversions.dewpoint_to_relative_humidity(
dewpoints_kelvins=THESE_DEWPOINTS_KELVINS,
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS
)
)
THESE_VAPOUR_PRESSURES_PASCALS = (
moisture_conversions.dewpoint_to_vapour_pressure(
dewpoints_kelvins=THESE_DEWPOINTS_KELVINS,
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS
)
)
THESE_VIRTUAL_TEMPS_KELVINS = (
moisture_conversions.temperature_to_virtual_temperature(
temperatures_kelvins=THESE_TEMPERATURES_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS,
vapour_pressures_pascals=THESE_VAPOUR_PRESSURES_PASCALS
)
)
THESE_THETA_V_KELVINS = (
temperature_conversions.temperatures_to_potential_temperatures(
temperatures_kelvins=THESE_VIRTUAL_TEMPS_KELVINS,
total_pressures_pascals=THESE_PRESSURES_PASCALS)
)
THESE_FIELD_NAMES = [
soundings.PRESSURE_NAME, soundings.U_WIND_NAME, soundings.V_WIND_NAME,
soundings.TEMPERATURE_NAME, soundings.SPECIFIC_HUMIDITY_NAME,
soundings.RELATIVE_HUMIDITY_NAME,
soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME
]
THIS_SOUNDING_MATRIX = numpy.transpose(numpy.vstack(
(THESE_PRESSURES_PASCALS, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,
THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01,
THESE_RELATIVE_HUMIDITIES, THESE_THETA_V_KELVINS)
))
THIS_SOUNDING_MATRIX = numpy.expand_dims(THIS_SOUNDING_MATRIX, 0)
SOUNDING_DICT_HEIGHT_COORDS = {
soundings.FULL_IDS_KEY: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIMES_KEY: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIMES_KEY: THESE_LEAD_TIMES_SECONDS,
soundings.STORM_ELEVATIONS_KEY: THESE_STORM_ELEVATIONS_M_ASL,
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.HEIGHT_LEVELS_KEY: HEIGHT_LEVELS_M_AGL,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES
}
# The following constants are used to test field_name_to_verbose.
NON_VERBOSE_FIELD_NAMES = [
'pressure_pascals', 'virtual_potential_temperature_kelvins',
'relative_humidity_unitless', 'specific_humidity',
'geopotential_height_metres', 'v_wind_m_s01', 'u_wind_m_s01',
'temperature_kelvins'
]
VERBOSE_FIELD_NAMES_WITH_UNITS = [
'Pressure (Pa)', 'Virtual potential temperature (K)',
'Relative humidity', r'Specific humidity (kg kg$^{-1}$)',
'Geopotential height (m)', r'$v$-wind (m s$^{-1}$)',
r'$u$-wind (m s$^{-1}$)', 'Temperature (K)'
]
VERBOSE_FIELD_NAMES_NO_UNITS = [
'Pressure', 'Virtual potential temperature',
'Relative humidity', 'Specific humidity',
'Geopotential height', r'$v$-wind',
r'$u$-wind', 'Temperature'
]
# The following constants are used to test find_sounding_file.
TOP_DIRECTORY_NAME = 'storm_soundings'
SPC_DATE_STRING = '20180618'
LEAD_TIME_IN_FILES_SEC = 3600
LAG_TIME_IN_FILES_SEC = 1800
FILE_TIME_UNIX_SEC = 1529374673
SOUNDING_FILE_NAME_ONE_TIME = (
'storm_soundings/2018/20180618/'
'storm_soundings_2018-06-19-021753_lead-time-03600sec_lag-time-1800sec.nc'
)
SOUNDING_FILE_NAME_ONE_SPC_DATE = (
'storm_soundings/2018/storm_soundings_20180618_lead-time-03600sec'
'_lag-time-1800sec.nc'
)
def _compare_target_point_tables(
first_target_point_table, second_target_point_table):
"""Determines equality of two target-point tables.
:param first_target_point_table: First table.
:param second_target_point_table: Second table.
:return: are_tables_equal: Boolean flag.
"""
first_column_names = list(first_target_point_table)
second_column_names = list(second_target_point_table)
if set(first_column_names) != set(second_column_names):
return False
for this_column_name in first_column_names:
if this_column_name == tracking_utils.FULL_ID_COLUMN:
are_columns_equal = numpy.array_equal(
first_target_point_table[this_column_name].values,
second_target_point_table[this_column_name].values)
else:
are_columns_equal = numpy.allclose(
first_target_point_table[this_column_name].values,
second_target_point_table[this_column_name].values,
atol=TOLERANCE)
if not are_columns_equal:
return False
return True
def _compare_sounding_dictionaries(first_sounding_dict, second_sounding_dict):
"""Determines equality of two sounding dictionaries.
Either both dictionaries must be in pressure coords, or both must be in
ground-relative height coords.
:param first_sounding_dict: Dictionary with keys listed in
`_convert_interp_table_to_soundings` or `_pressure_to_height_coords`.
:param second_sounding_dict: Same.
:return: are_dicts_equal: Boolean flag.
"""
first_keys = list(first_sounding_dict.keys())
second_keys = list(second_sounding_dict.keys())
if set(first_keys) != set(second_keys):
return False
sort_indices = numpy.argsort(numpy.array(
first_sounding_dict[soundings.FIELD_NAMES_KEY]
))
first_sounding_dict[soundings.FIELD_NAMES_KEY] = [
first_sounding_dict[soundings.FIELD_NAMES_KEY][k] for k in sort_indices
]
first_sounding_dict[soundings.SOUNDING_MATRIX_KEY] = (
first_sounding_dict[soundings.SOUNDING_MATRIX_KEY][..., sort_indices]
)
sort_indices = numpy.argsort(numpy.array(
second_sounding_dict[soundings.FIELD_NAMES_KEY]
))
second_sounding_dict[soundings.FIELD_NAMES_KEY] = [
second_sounding_dict[soundings.FIELD_NAMES_KEY][k] for k in sort_indices
]
second_sounding_dict[soundings.SOUNDING_MATRIX_KEY] = (
second_sounding_dict[soundings.SOUNDING_MATRIX_KEY][..., sort_indices]
)
for this_key in list(first_sounding_dict.keys()):
if this_key in [soundings.FIELD_NAMES_KEY, soundings.FULL_IDS_KEY]:
if first_sounding_dict[this_key] != second_sounding_dict[this_key]:
print('FOO2')
return False
elif (this_key == soundings.SURFACE_PRESSURES_KEY and
first_sounding_dict[this_key] is None):
if second_sounding_dict[this_key] is not None:
print('FOO3')
return False
else:
if not numpy.allclose(
first_sounding_dict[this_key],
second_sounding_dict[this_key], atol=TOLERANCE,
equal_nan=True):
print(first_sounding_dict[this_key])
print('\n\n')
print(second_sounding_dict[this_key])
print('\n\n\n***\n\n\n')
return False
return True
class SoundingsTests(unittest.TestCase):
"""Each method is a unit test for soundings.py."""
def test_get_nwp_fields_for_sounding_no_table_no_surface(self):
"""Ensures correct output from _get_nwp_fields_for_sounding.
In this case, return_table = False and include_surface = False.
"""
these_field_names, these_field_names_grib1 = (
soundings._get_nwp_fields_for_sounding(
model_name=MODEL_NAME, return_table=False,
minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=False
)[:2]
)
self.assertTrue(
set(these_field_names) == set(FIELD_NAMES_NO_SURFACE)
)
self.assertTrue(
set(these_field_names_grib1) == set(FIELD_NAMES_NO_SURFACE_GRIB1)
)
def test_get_nwp_fields_for_sounding_no_table_yes_surface(self):
"""Ensures correct output from _get_nwp_fields_for_sounding.
In this case, return_table = False and include_surface = True.
"""
these_field_names, these_field_names_grib1 = (
soundings._get_nwp_fields_for_sounding(
model_name=MODEL_NAME, return_table=False,
minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=True
)[:2]
)
self.assertTrue(
set(these_field_names) == set(FIELD_NAMES_WITH_SURFACE)
)
self.assertTrue(
set(these_field_names_grib1) == set(FIELD_NAMES_WITH_SURFACE_GRIB1)
)
def test_get_nwp_fields_for_sounding_yes_table_no_surface(self):
"""Ensures correct output from _get_nwp_fields_for_sounding.
In this case, return_table = True and include_surface = False.
"""
this_field_name_table = soundings._get_nwp_fields_for_sounding(
model_name=MODEL_NAME, return_table=True,
minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=False
)[-1]
actual_columns = list(this_field_name_table)
expected_columns = list(FIELD_NAME_TABLE_NO_SURFACE)
self.assertTrue(set(actual_columns) == set(expected_columns))
self.assertTrue(this_field_name_table[actual_columns].equals(
FIELD_NAME_TABLE_NO_SURFACE[actual_columns]
))
def test_get_nwp_fields_for_sounding_yes_table_yes_surface(self):
"""Ensures correct output from _get_nwp_fields_for_sounding.
In this case, return_table = True and include_surface = True.
"""
this_field_name_table = soundings._get_nwp_fields_for_sounding(
model_name=MODEL_NAME, return_table=True,
minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=True
)[-1]
actual_columns = list(this_field_name_table)
expected_columns = list(FIELD_NAME_TABLE_WITH_SURFACE)
self.assertTrue(set(actual_columns) == set(expected_columns))
self.assertTrue(this_field_name_table[actual_columns].equals(
FIELD_NAME_TABLE_WITH_SURFACE[actual_columns]
))
def test_create_target_points_for_interp(self):
"""Ensures correct output from _create_target_points_for_interp."""
this_target_point_table = soundings._create_target_points_for_interp(
storm_object_table=DUMMY_STORM_OBJECT_TABLE,
lead_times_seconds=UNIQUE_LEAD_TIMES_SECONDS)
self.assertTrue(_compare_target_point_tables(
this_target_point_table, DUMMY_TARGET_POINT_TABLE
))
def test_convert_interp_table_to_soundings_no_surface(self):
"""Ensures correct output from _convert_interp_table_to_soundings.
In this case, include_surface = False.
"""
this_sounding_dict = soundings._convert_interp_table_to_soundings(
interp_table=INTERP_TABLE_NO_SURFACE,
target_point_table=TARGET_POINT_TABLE, model_name=MODEL_NAME,
include_surface=False, minimum_pressure_mb=MINIMUM_PRESSURE_MB)
self.assertTrue(_compare_sounding_dictionaries(
this_sounding_dict, copy.deepcopy(SOUNDING_DICT_P_COORDS_NO_SURFACE)
))
def test_convert_interp_table_to_soundings_with_surface(self):
"""Ensures correct output from _convert_interp_table_to_soundings.
In this case, include_surface = True.
"""
this_sounding_dict = soundings._convert_interp_table_to_soundings(
interp_table=INTERP_TABLE_WITH_SURFACE,
target_point_table=TARGET_POINT_TABLE, model_name=MODEL_NAME,
include_surface=True, minimum_pressure_mb=MINIMUM_PRESSURE_MB)
self.assertTrue(_compare_sounding_dictionaries(
this_sounding_dict,
copy.deepcopy(SOUNDING_DICT_P_COORDS_WITH_SURFACE)
))
def test_get_pressures_no_surface(self):
"""Ensures correct output from _get_pressures.
In this case, soundings do *not* include surface.
"""
this_pressure_matrix_pascals = soundings._get_pressures(
SOUNDING_DICT_P_COORDS_NO_SURFACE)
self.assertTrue(numpy.allclose(
this_pressure_matrix_pascals, PRESSURE_MATRIX_NO_SURFACE_PASCALS,
atol=TOLERANCE
))
def test_get_pressures_with_surface(self):
"""Ensures correct output from _get_pressures.
In this case, soundings include surface.
"""
this_pressure_matrix_pascals = soundings._get_pressures(
SOUNDING_DICT_P_COORDS_WITH_SURFACE)
self.assertTrue(numpy.allclose(
this_pressure_matrix_pascals, PRESSURE_MATRIX_WITH_SURFACE_PASCALS,
atol=TOLERANCE
))
def test_relative_to_specific_humidity(self):
"""Ensures correct output from _relative_to_specific_humidity."""
this_sounding_dict, this_dewpoint_matrix_kelvins = (
soundings._relative_to_specific_humidity(
sounding_dict=copy.deepcopy(SOUNDING_DICT_P_COORDS_NO_SPFH),
pressure_matrix_pascals=PRESSURE_MATRIX_PASCALS)
)
self.assertTrue(numpy.allclose(
this_dewpoint_matrix_kelvins, DEWPOINT_MATRIX_KELVINS,
atol=TOLERANCE_FOR_CONVERTED_VALUES
))
self.assertTrue(_compare_sounding_dictionaries(
this_sounding_dict, copy.deepcopy(SOUNDING_DICT_P_COORDS_WITH_SPFH)
))
def test_specific_to_relative_humidity(self):
"""Ensures correct output from _specific_to_relative_humidity."""
this_sounding_dict, this_dewpoint_matrix_kelvins = (
soundings._specific_to_relative_humidity(
sounding_dict=copy.deepcopy(SOUNDING_DICT_P_COORDS_NO_RH),
pressure_matrix_pascals=PRESSURE_MATRIX_PASCALS)
)
self.assertTrue(numpy.allclose(
this_dewpoint_matrix_kelvins, DEWPOINT_MATRIX_KELVINS,
atol=TOLERANCE_FOR_CONVERTED_VALUES
))
self.assertTrue(_compare_sounding_dictionaries(
this_sounding_dict, copy.deepcopy(SOUNDING_DICT_P_COORDS_WITH_RH)
))
def test_get_virtual_potential_temperatures(self):
"""Ensures correct output from _get_virtual_potential_temperatures."""
this_sounding_dict = soundings._get_virtual_potential_temperatures(
sounding_dict=copy.deepcopy(SOUNDING_DICT_P_COORDS_NO_THETA_V),
pressure_matrix_pascals=PRESSURE_MATRIX_PASCALS,
dewpoint_matrix_kelvins=DEWPOINT_MATRIX_KELVINS)
self.assertTrue(_compare_sounding_dictionaries(
this_sounding_dict,
copy.deepcopy(SOUNDING_DICT_P_COORDS_WITH_THETA_V)
))
def test_fill_nans_in_soundings(self):
"""Ensures correct output from _fill_nans_in_soundings."""
this_sounding_dict = soundings._fill_nans_in_soundings(
sounding_dict_pressure_coords=copy.deepcopy(
SOUNDING_DICT_P_COORDS_WITH_NANS),
pressure_matrix_pascals=PRESSURE_MATRIX_FOR_NAN_FILL_PASCALS,
min_num_pressure_levels_without_nan=2)
self.assertTrue(_compare_sounding_dictionaries(
this_sounding_dict, copy.deepcopy(SOUNDING_DICT_P_COORDS_NO_NANS)
))
def test_pressure_to_height_coords(self):
"""Ensures correct output from _pressure_to_height_coords."""
this_sounding_dict = soundings._pressure_to_height_coords(
sounding_dict_pressure_coords=copy.deepcopy(
SOUNDING_DICT_PRESSURE_COORDS),
height_levels_m_agl=HEIGHT_LEVELS_M_AGL)
self.assertTrue(_compare_sounding_dictionaries(
this_sounding_dict, copy.deepcopy(SOUNDING_DICT_HEIGHT_COORDS)
))
def test_check_field_name_valid(self):
"""Ensures correct output from check_field_name.
In this case, field name is valid.
"""
soundings.check_field_name(soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME)
def test_check_field_name_invalid(self):
"""Ensures correct output from check_field_name.
In this case, field name is *not* valid.
"""
with self.assertRaises(ValueError):
soundings.check_field_name(soundings.STORM_ELEVATIONS_KEY)
def test_field_name_to_verbose_with_units(self):
"""Ensures correct output from field_name_to_verbose.
In this case, verbose field names should have units.
"""
these_verbose_field_names = [
soundings.field_name_to_verbose(field_name=f, include_units=True)
for f in NON_VERBOSE_FIELD_NAMES
]
self.assertTrue(
these_verbose_field_names == VERBOSE_FIELD_NAMES_WITH_UNITS
)
def test_field_name_to_verbose_no_units(self):
"""Ensures correct output from field_name_to_verbose.
In this case, verbose field names should have no units.
"""
these_verbose_field_names = [
soundings.field_name_to_verbose(field_name=f, include_units=False)
for f in NON_VERBOSE_FIELD_NAMES
]
self.assertTrue(
these_verbose_field_names == VERBOSE_FIELD_NAMES_NO_UNITS
)
def test_find_sounding_file_one_time(self):
"""Ensures correct output from find_sounding_file.
In this case, the file contains soundings for one time step.
"""
this_file_name = soundings.find_sounding_file(
top_directory_name=TOP_DIRECTORY_NAME,
spc_date_string=SPC_DATE_STRING,
lead_time_seconds=LEAD_TIME_IN_FILES_SEC,
lag_time_for_convective_contamination_sec=LAG_TIME_IN_FILES_SEC,
init_time_unix_sec=FILE_TIME_UNIX_SEC, raise_error_if_missing=False)
self.assertTrue(this_file_name == SOUNDING_FILE_NAME_ONE_TIME)
def test_find_sounding_file_one_spc_date(self):
"""Ensures correct output from find_sounding_file.
In this case, the file contains soundings for one SPC date.
"""
this_file_name = soundings.find_sounding_file(
top_directory_name=TOP_DIRECTORY_NAME,
spc_date_string=SPC_DATE_STRING,
lead_time_seconds=LEAD_TIME_IN_FILES_SEC,
lag_time_for_convective_contamination_sec=LAG_TIME_IN_FILES_SEC,
init_time_unix_sec=None, raise_error_if_missing=False)
self.assertTrue(this_file_name == SOUNDING_FILE_NAME_ONE_SPC_DATE)
if __name__ == '__main__':
unittest.main()
| [
"gewittergefahr.gg_utils.nwp_model_utils.get_lowest_height_name",
"gewittergefahr.gg_utils.soundings._get_nwp_fields_for_sounding",
"numpy.allclose",
"gewittergefahr.gg_utils.moisture_conversions.specific_humidity_to_dewpoint",
"numpy.full",
"pandas.DataFrame",
"unittest.main",
"gewittergefahr.gg_util... | [((638, 688), 'gewittergefahr.gg_utils.nwp_model_utils.get_lowest_height_name', 'nwp_model_utils.get_lowest_height_name', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (676, 688), False, 'from gewittergefahr.gg_utils import nwp_model_utils\n'), ((742, 797), 'gewittergefahr.gg_utils.nwp_model_utils.get_lowest_temperature_name', 'nwp_model_utils.get_lowest_temperature_name', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (785, 797), False, 'from gewittergefahr.gg_utils import nwp_model_utils\n'), ((859, 911), 'gewittergefahr.gg_utils.nwp_model_utils.get_lowest_humidity_name', 'nwp_model_utils.get_lowest_humidity_name', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (899, 911), False, 'from gewittergefahr.gg_utils import nwp_model_utils\n'), ((969, 1019), 'gewittergefahr.gg_utils.nwp_model_utils.get_lowest_u_wind_name', 'nwp_model_utils.get_lowest_u_wind_name', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (1007, 1019), False, 'from gewittergefahr.gg_utils import nwp_model_utils\n'), ((1077, 1127), 'gewittergefahr.gg_utils.nwp_model_utils.get_lowest_v_wind_name', 'nwp_model_utils.get_lowest_v_wind_name', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (1115, 1127), False, 'from gewittergefahr.gg_utils import nwp_model_utils\n'), ((1189, 1241), 'gewittergefahr.gg_utils.nwp_model_utils.get_lowest_pressure_name', 'nwp_model_utils.get_lowest_pressure_name', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (1229, 1241), False, 'from gewittergefahr.gg_utils import nwp_model_utils\n'), ((3752, 3794), 'numpy.array', 'numpy.array', (['[950, 975, 1000]'], {'dtype': 'float'}), '([950, 975, 1000], dtype=float)\n', (3763, 3794), False, 'import numpy\n'), ((4508, 4545), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['THIS_DICT'], {}), '(THIS_DICT)\n', (4534, 4545), False, 'import pandas\n'), ((5044, 5081), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['THIS_DICT'], {}), '(THIS_DICT)\n', (5070, 5081), False, 'import pandas\n'), ((5238, 5280), 'numpy.array', 'numpy.array', (['[0, 0, 0, 1, 1, 1]'], {'dtype': 'int'}), '([0, 0, 0, 1, 1, 1], dtype=int)\n', (5249, 5280), False, 'import numpy\n'), ((5303, 5353), 'numpy.array', 'numpy.array', (['[50, 55, 60, 51, 56, 61]'], {'dtype': 'float'}), '([50, 55, 60, 51, 56, 61], dtype=float)\n', (5314, 5353), False, 'import numpy\n'), ((5377, 5433), 'numpy.array', 'numpy.array', (['[250, 260, 270, 251, 261, 271]'], {'dtype': 'float'}), '([250, 260, 270, 251, 261, 271], dtype=float)\n', (5388, 5433), False, 'import numpy\n'), ((5464, 5497), 'numpy.full', 'numpy.full', (['(6)', '(10000)'], {'dtype': 'float'}), '(6, 10000, dtype=float)\n', (5474, 5497), False, 'import numpy\n'), ((5529, 5562), 'numpy.full', 'numpy.full', (['(6)', '(10000)'], {'dtype': 'float'}), '(6, 10000, dtype=float)\n', (5539, 5562), False, 'import numpy\n'), ((6000, 6037), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['THIS_DICT'], {}), '(THIS_DICT)\n', (6026, 6037), False, 'import pandas\n'), ((6067, 6097), 'numpy.array', 'numpy.array', (['[0, 1]'], {'dtype': 'int'}), '([0, 1], dtype=int)\n', (6078, 6097), False, 'import numpy\n'), ((6218, 6278), 'numpy.array', 'numpy.array', (['[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]'], {'dtype': 'int'}), '([0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=int)\n', (6229, 6278), False, 'import numpy\n'), ((6307, 6439), 'numpy.array', 'numpy.array', (['[50, 55, 60, 51, 56, 61, 50.08981978, 55.08972691, 60.08963404, 51.08980123,\n 56.08970834, 61.08961544]'], {'dtype': 'float'}), '([50, 55, 60, 51, 56, 61, 50.08981978, 55.08972691, 60.08963404,\n 51.08980123, 56.08970834, 61.08961544], dtype=float)\n', (6318, 6439), False, 'import numpy\n'), ((6469, 6614), 'numpy.array', 'numpy.array', (['[250, 260, 270, 251, 261, 271, 250.13973873, 260.15661394, 270.17969769, \n 251.14273048, 261.16064721, 271.18533962]'], {'dtype': 'float'}), '([250, 260, 270, 251, 261, 271, 250.13973873, 260.15661394, \n 270.17969769, 251.14273048, 261.16064721, 271.18533962], dtype=float)\n', (6480, 6614), False, 'import numpy\n'), ((6649, 6709), 'numpy.array', 'numpy.array', (['[0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2]'], {'dtype': 'int'}), '([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2], dtype=int)\n', (6660, 6709), False, 'import numpy\n'), ((6743, 6803), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]'], {'dtype': 'int'}), '([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=int)\n', (6754, 6803), False, 'import numpy\n'), ((6840, 6874), 'numpy.full', 'numpy.full', (['(12)', '(10000)'], {'dtype': 'float'}), '(12, 10000, dtype=float)\n', (6850, 6874), False, 'import numpy\n'), ((6906, 6940), 'numpy.full', 'numpy.full', (['(12)', '(10000)'], {'dtype': 'float'}), '(12, 10000, dtype=float)\n', (6916, 6940), False, 'import numpy\n'), ((7499, 7536), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['THIS_DICT'], {}), '(THIS_DICT)\n', (7525, 7536), False, 'import pandas\n'), ((7631, 7772), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [2, 4, 6, 8, 10, 12, \n 14, 16, 18, 20, 22, 24, 26, 28, 30]]'], {'dtype': 'float'}), '([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [2, 4, 6, \n 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]], dtype=float)\n', (7642, 7772), False, 'import numpy\n'), ((7805, 7834), 'pandas.DataFrame', 'pandas.DataFrame', (['THIS_MATRIX'], {}), '(THIS_MATRIX)\n', (7821, 7834), False, 'import pandas\n'), ((7899, 7931), 'numpy.array', 'numpy.array', (['[10, 10]'], {'dtype': 'int'}), '([10, 10], dtype=int)\n', (7910, 7931), False, 'import numpy\n'), ((7959, 7989), 'numpy.array', 'numpy.array', (['[5, 5]'], {'dtype': 'int'}), '([5, 5], dtype=int)\n', (7970, 7989), False, 'import numpy\n'), ((8202, 8239), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['THIS_DICT'], {}), '(THIS_DICT)\n', (8228, 8239), False, 'import pandas\n'), ((8410, 8495), 'numpy.array', 'numpy.array', (['[[0, 6, 3, 9, 12], [1, 7, 4, 10, 13], [2, 8, 5, 11, 14]]'], {'dtype': 'int'}), '([[0, 6, 3, 9, 12], [1, 7, 4, 10, 13], [2, 8, 5, 11, 14]], dtype=int\n )\n', (8421, 8495), False, 'import numpy\n'), ((8527, 8617), 'numpy.array', 'numpy.array', (['[[2, 14, 8, 20, 26], [4, 16, 10, 22, 28], [6, 18, 12, 24, 30]]'], {'dtype': 'int'}), '([[2, 14, 8, 20, 26], [4, 16, 10, 22, 28], [6, 18, 12, 24, 30]],\n dtype=int)\n', (8538, 8617), False, 'import numpy\n'), ((8652, 8712), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0)\n', (8663, 8712), False, 'import numpy\n'), ((8747, 8776), 'numpy.array', 'numpy.array', (['[950, 975, 1000]'], {}), '([950, 975, 1000])\n', (8758, 8776), False, 'import numpy\n'), ((9488, 9684), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, \n 2000], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34,\n 36, 38, 40, 4200]]'], {'dtype': 'float'}), '([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\n 18, 19, 2000], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,\n 32, 34, 36, 38, 40, 4200]], dtype=float)\n', (9499, 9684), False, 'import numpy\n'), ((9726, 9755), 'pandas.DataFrame', 'pandas.DataFrame', (['THIS_MATRIX'], {}), '(THIS_MATRIX)\n', (9742, 9755), False, 'import pandas\n'), ((9932, 10039), 'numpy.array', 'numpy.array', (['[[0, 8, 4, 12, 16], [1, 9, 5, 13, 17], [2, 10, 6, 14, 18], [3, 11, 7, 15, 19]]'], {'dtype': 'int'}), '([[0, 8, 4, 12, 16], [1, 9, 5, 13, 17], [2, 10, 6, 14, 18], [3, \n 11, 7, 15, 19]], dtype=int)\n', (9943, 10039), False, 'import numpy\n'), ((10075, 10187), 'numpy.array', 'numpy.array', (['[[2, 18, 10, 26, 34], [4, 20, 12, 28, 36], [6, 22, 14, 30, 38], [8, 24, 16,\n 32, 40]]'], {'dtype': 'int'}), '([[2, 18, 10, 26, 34], [4, 20, 12, 28, 36], [6, 22, 14, 30, 38],\n [8, 24, 16, 32, 40]], dtype=int)\n', (10086, 10187), False, 'import numpy\n'), ((10226, 10286), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0)\n', (10237, 10286), False, 'import numpy\n'), ((10321, 10361), 'numpy.array', 'numpy.array', (['[950, 975, 1000, numpy.nan]'], {}), '([950, 975, 1000, numpy.nan])\n', (10332, 10361), False, 'import numpy\n'), ((10391, 10412), 'numpy.array', 'numpy.array', (['[20, 42]'], {}), '([20, 42])\n', (10402, 10412), False, 'import numpy\n'), ((10961, 11022), 'numpy.array', 'numpy.array', (['[[95000, 97500, 100000], [95000, 97500, 100000]]'], {}), '([[95000, 97500, 100000], [95000, 97500, 100000]])\n', (10972, 11022), False, 'import numpy\n'), ((11073, 11146), 'numpy.array', 'numpy.array', (['[[95000, 97500, 100000, 2000], [95000, 97500, 100000, 4200]]'], {}), '([[95000, 97500, 100000, 2000], [95000, 97500, 100000, 4200]])\n', (11084, 11146), False, 'import numpy\n'), ((11256, 11292), 'numpy.array', 'numpy.array', (['[400, 300, 200, 100, 0]'], {}), '([400, 300, 200, 100, 0])\n', (11267, 11292), False, 'import numpy\n'), ((11322, 11375), 'numpy.array', 'numpy.array', (['[273.15, 278.15, 283.15, 288.15, 298.15]'], {}), '([273.15, 278.15, 283.15, 288.15, 298.15])\n', (11333, 11375), False, 'import numpy\n'), ((11404, 11436), 'numpy.array', 'numpy.array', (['[-10, -5, 0, 5, 10]'], {}), '([-10, -5, 0, 5, 10])\n', (11415, 11436), False, 'import numpy\n'), ((11459, 11494), 'numpy.array', 'numpy.array', (['[20, 30, -40, 15, 7.2]'], {}), '([20, 30, -40, 15, 7.2])\n', (11470, 11494), False, 'import numpy\n'), ((11599, 11651), 'numpy.array', 'numpy.array', (['[99000, 100000, 101000, 102000, 103000]'], {}), '([99000, 100000, 101000, 102000, 103000])\n', (11610, 11651), False, 'import numpy\n'), ((11678, 11724), 'numpy.reshape', 'numpy.reshape', (['THESE_PRESSURES_PASCALS', '(1, 5)'], {}), '(THESE_PRESSURES_PASCALS, (1, 5))\n', (11691, 11724), False, 'import numpy\n'), ((11752, 11974), 'gewittergefahr.gg_utils.moisture_conversions.specific_humidity_to_dewpoint', 'moisture_conversions.specific_humidity_to_dewpoint', ([], {'specific_humidities_kg_kg01': 'THESE_SPEC_HUMIDITIES_KG_KG01', 'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(specific_humidities_kg_kg01\n =THESE_SPEC_HUMIDITIES_KG_KG01, temperatures_kelvins=\n THESE_TEMPERATURES_KELVINS, total_pressures_pascals=THESE_PRESSURES_PASCALS\n )\n', (11802, 11974), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((12000, 12046), 'numpy.reshape', 'numpy.reshape', (['THESE_DEWPOINTS_KELVINS', '(1, 5)'], {}), '(THESE_DEWPOINTS_KELVINS, (1, 5))\n', (12013, 12046), False, 'import numpy\n'), ((12076, 12282), 'gewittergefahr.gg_utils.moisture_conversions.dewpoint_to_relative_humidity', 'moisture_conversions.dewpoint_to_relative_humidity', ([], {'dewpoints_kelvins': 'THESE_DEWPOINTS_KELVINS', 'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(dewpoints_kelvins=\n THESE_DEWPOINTS_KELVINS, temperatures_kelvins=\n THESE_TEMPERATURES_KELVINS, total_pressures_pascals=THESE_PRESSURES_PASCALS\n )\n', (12126, 12282), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((12644, 12676), 'numpy.full', 'numpy.full', (['(1, 5, 5)', 'numpy.nan'], {}), '((1, 5, 5), numpy.nan)\n', (12654, 12676), False, 'import numpy\n'), ((13122, 13177), 'numpy.reshape', 'numpy.reshape', (['THESE_SPEC_HUMIDITIES_KG_KG01', '(1, 5, 1)'], {}), '(THESE_SPEC_HUMIDITIES_KG_KG01, (1, 5, 1))\n', (13135, 13177), False, 'import numpy\n'), ((13201, 13268), 'numpy.concatenate', 'numpy.concatenate', (['(THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX)'], {'axis': '(-1)'}), '((THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX), axis=-1)\n', (13218, 13268), False, 'import numpy\n'), ((14168, 14200), 'numpy.full', 'numpy.full', (['(1, 5, 5)', 'numpy.nan'], {}), '((1, 5, 5), numpy.nan)\n', (14178, 14200), False, 'import numpy\n'), ((14640, 14691), 'numpy.reshape', 'numpy.reshape', (['THESE_RELATIVE_HUMIDITIES', '(1, 5, 1)'], {}), '(THESE_RELATIVE_HUMIDITIES, (1, 5, 1))\n', (14653, 14691), False, 'import numpy\n'), ((14715, 14782), 'numpy.concatenate', 'numpy.concatenate', (['(THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX)'], {'axis': '(-1)'}), '((THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX), axis=-1)\n', (14732, 14782), False, 'import numpy\n'), ((15269, 15473), 'gewittergefahr.gg_utils.moisture_conversions.dewpoint_to_vapour_pressure', 'moisture_conversions.dewpoint_to_vapour_pressure', ([], {'dewpoints_kelvins': 'THESE_DEWPOINTS_KELVINS', 'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(dewpoints_kelvins=\n THESE_DEWPOINTS_KELVINS, temperatures_kelvins=\n THESE_TEMPERATURES_KELVINS, total_pressures_pascals=THESE_PRESSURES_PASCALS\n )\n', (15317, 15473), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((15528, 15753), 'gewittergefahr.gg_utils.moisture_conversions.temperature_to_virtual_temperature', 'moisture_conversions.temperature_to_virtual_temperature', ([], {'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS', 'vapour_pressures_pascals': 'THESE_VAPOUR_PRESSURES_PASCALS'}), '(temperatures_kelvins\n =THESE_TEMPERATURES_KELVINS, total_pressures_pascals=\n THESE_PRESSURES_PASCALS, vapour_pressures_pascals=\n THESE_VAPOUR_PRESSURES_PASCALS)\n', (15583, 15753), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((15802, 15972), 'gewittergefahr.gg_utils.temperature_conversions.temperatures_to_potential_temperatures', 'temperature_conversions.temperatures_to_potential_temperatures', ([], {'temperatures_kelvins': 'THESE_VIRTUAL_TEMPS_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(\n temperatures_kelvins=THESE_VIRTUAL_TEMPS_KELVINS,\n total_pressures_pascals=THESE_PRESSURES_PASCALS)\n', (15864, 15972), False, 'from gewittergefahr.gg_utils import temperature_conversions\n'), ((16002, 16049), 'numpy.reshape', 'numpy.reshape', (['THESE_THETA_V_KELVINS', '(1, 5, 1)'], {}), '(THESE_THETA_V_KELVINS, (1, 5, 1))\n', (16015, 16049), False, 'import numpy\n'), ((16073, 16140), 'numpy.concatenate', 'numpy.concatenate', (['(THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX)'], {'axis': '(-1)'}), '((THIS_SOUNDING_MATRIX, THIS_NEW_MATRIX), axis=-1)\n', (16090, 16140), False, 'import numpy\n'), ((16520, 16575), 'numpy.array', 'numpy.array', (['[500, 600, 700, 850, 925, 1000, numpy.nan]'], {}), '([500, 600, 700, 850, 925, 1000, numpy.nan])\n', (16531, 16575), False, 'import numpy\n'), ((16611, 16635), 'numpy.array', 'numpy.array', (['[965, 1000]'], {}), '([965, 1000])\n', (16622, 16635), False, 'import numpy\n'), ((16675, 16798), 'numpy.array', 'numpy.array', (['[[50000, 60000, 70000, 85000, 92500, 100000, 96500], [50000, 60000, 70000, \n 85000, 92500, 100000, 100000]]'], {}), '([[50000, 60000, 70000, 85000, 92500, 100000, 96500], [50000, \n 60000, 70000, 85000, 92500, 100000, 100000]])\n', (16686, 16798), False, 'import numpy\n'), ((16828, 16883), 'numpy.array', 'numpy.array', (['[5700, numpy.nan, 3090, 1500, 770, 0, 385]'], {}), '([5700, numpy.nan, 3090, 1500, 770, 0, 385])\n', (16839, 16883), False, 'import numpy\n'), ((16906, 16961), 'numpy.array', 'numpy.array', (['[numpy.nan, 30, 20, 10, 0, -10, numpy.nan]'], {}), '([numpy.nan, 30, 20, 10, 0, -10, numpy.nan])\n', (16917, 16961), False, 'import numpy\n'), ((16984, 17030), 'numpy.array', 'numpy.array', (['[30, 25, 20, numpy.nan, 10, 5, 0]'], {}), '([30, 25, 20, numpy.nan, 10, 5, 0])\n', (16995, 17030), False, 'import numpy\n'), ((17060, 17126), 'numpy.array', 'numpy.array', (['[numpy.nan, numpy.nan, 275, numpy.nan, 290, 300, 301]'], {}), '([numpy.nan, numpy.nan, 275, numpy.nan, 290, 300, 301])\n', (17071, 17126), False, 'import numpy\n'), ((17165, 17243), 'numpy.array', 'numpy.array', (['[0.001, 0.002, numpy.nan, numpy.nan, 0.005, numpy.nan, numpy.nan]'], {}), '([0.001, 0.002, numpy.nan, numpy.nan, 0.005, numpy.nan, numpy.nan])\n', (17176, 17243), False, 'import numpy\n'), ((17641, 17730), 'numpy.array', 'numpy.array', (['[numpy.nan, numpy.nan, numpy.nan, 1600, numpy.nan, numpy.nan, numpy.nan]'], {}), '([numpy.nan, numpy.nan, numpy.nan, 1600, numpy.nan, numpy.nan,\n numpy.nan])\n', (17652, 17730), False, 'import numpy\n'), ((17944, 18004), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0)\n', (17955, 18004), False, 'import numpy\n'), ((18075, 18105), 'numpy.array', 'numpy.array', (['[6, 6]'], {'dtype': 'int'}), '([6, 6], dtype=int)\n', (18086, 18105), False, 'import numpy\n'), ((18133, 18163), 'numpy.array', 'numpy.array', (['[1, 1]'], {'dtype': 'int'}), '([1, 1], dtype=int)\n', (18144, 18163), False, 'import numpy\n'), ((18636, 18695), 'numpy.array', 'numpy.array', (['[5700, 4285.73988745, 3090, 1500, 770, 0, 385]'], {}), '([5700, 4285.73988745, 3090, 1500, 770, 0, 385])\n', (18647, 18695), False, 'import numpy\n'), ((18724, 18774), 'numpy.array', 'numpy.array', (['[41.82748964, 30, 20, 10, 0, -10, -5]'], {}), '([41.82748964, 30, 20, 10, 0, -10, -5])\n', (18735, 18774), False, 'import numpy\n'), ((18797, 18845), 'numpy.array', 'numpy.array', (['[30, 25, 20, 13.14655172, 10, 5, 0]'], {}), '([30, 25, 20, 13.14655172, 10, 5, 0])\n', (18808, 18845), False, 'import numpy\n'), ((18875, 18945), 'numpy.array', 'numpy.array', (['[258.125, 267.26892314, 275, 285.28017241, 290, 300, 301]'], {}), '([258.125, 267.26892314, 275, 285.28017241, 290, 300, 301])\n', (18886, 18945), False, 'import numpy\n'), ((18984, 19071), 'numpy.array', 'numpy.array', (['[0.001, 0.002, 0.00302033, 0.00437709, 0.005, 0.00565705, 0.00532852]'], {}), '([0.001, 0.002, 0.00302033, 0.00437709, 0.005, 0.00565705, \n 0.00532852])\n', (18995, 19071), False, 'import numpy\n'), ((19286, 19328), 'numpy.expand_dims', 'numpy.expand_dims', (['THIS_SOUNDING_MATRIX', '(0)'], {}), '(THIS_SOUNDING_MATRIX, 0)\n', (19303, 19328), False, 'import numpy\n'), ((19388, 19415), 'numpy.array', 'numpy.array', (['[6]'], {'dtype': 'int'}), '([6], dtype=int)\n', (19399, 19415), False, 'import numpy\n'), ((19443, 19470), 'numpy.array', 'numpy.array', (['[1]'], {'dtype': 'int'}), '([1], dtype=int)\n', (19454, 19470), False, 'import numpy\n'), ((19500, 19518), 'numpy.array', 'numpy.array', (['[965]'], {}), '([965])\n', (19511, 19518), False, 'import numpy\n'), ((20068, 20086), 'numpy.array', 'numpy.array', (['[385]'], {}), '([385])\n', (20079, 20086), False, 'import numpy\n'), ((20119, 20164), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_NO_NANS'], {}), '(SOUNDING_DICT_P_COORDS_NO_NANS)\n', (20132, 20164), False, 'import copy\n'), ((20295, 20334), 'numpy.array', 'numpy.array', (['[0, 385, 1115, 1910, 2705]'], {}), '([0, 385, 1115, 1910, 2705])\n', (20306, 20334), False, 'import numpy\n'), ((20361, 20414), 'numpy.array', 'numpy.array', (['[96500, 92500, 85000, 77136.2431, 70000]'], {}), '([96500, 92500, 85000, 77136.2431, 70000])\n', (20372, 20414), False, 'import numpy\n'), ((20437, 20477), 'numpy.array', 'numpy.array', (['[-5, 0, 10, 15.2425046, 20]'], {}), '([-5, 0, 10, 15.2425046, 20])\n', (20448, 20477), False, 'import numpy\n'), ((20500, 20549), 'numpy.array', 'numpy.array', (['[0, 10, 13.14655172, 16.7394751, 20]'], {}), '([0, 10, 13.14655172, 16.7394751, 20])\n', (20511, 20549), False, 'import numpy\n'), ((20579, 20633), 'numpy.array', 'numpy.array', (['[301, 290, 285.28017241, 279.890787, 275]'], {}), '([301, 290, 285.28017241, 279.890787, 275])\n', (20590, 20633), False, 'import numpy\n'), ((20672, 20743), 'numpy.array', 'numpy.array', (['[0.00532852, 0.005, 0.00437709, 0.00366580795, 0.00302033]'], {}), '([0.00532852, 0.005, 0.00437709, 0.00366580795, 0.00302033])\n', (20683, 20743), False, 'import numpy\n'), ((20783, 21005), 'gewittergefahr.gg_utils.moisture_conversions.specific_humidity_to_dewpoint', 'moisture_conversions.specific_humidity_to_dewpoint', ([], {'specific_humidities_kg_kg01': 'THESE_SPEC_HUMIDITIES_KG_KG01', 'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(specific_humidities_kg_kg01\n =THESE_SPEC_HUMIDITIES_KG_KG01, temperatures_kelvins=\n THESE_TEMPERATURES_KELVINS, total_pressures_pascals=THESE_PRESSURES_PASCALS\n )\n', (20833, 21005), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((21058, 21264), 'gewittergefahr.gg_utils.moisture_conversions.dewpoint_to_relative_humidity', 'moisture_conversions.dewpoint_to_relative_humidity', ([], {'dewpoints_kelvins': 'THESE_DEWPOINTS_KELVINS', 'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(dewpoints_kelvins=\n THESE_DEWPOINTS_KELVINS, temperatures_kelvins=\n THESE_TEMPERATURES_KELVINS, total_pressures_pascals=THESE_PRESSURES_PASCALS\n )\n', (21108, 21264), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((21322, 21526), 'gewittergefahr.gg_utils.moisture_conversions.dewpoint_to_vapour_pressure', 'moisture_conversions.dewpoint_to_vapour_pressure', ([], {'dewpoints_kelvins': 'THESE_DEWPOINTS_KELVINS', 'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(dewpoints_kelvins=\n THESE_DEWPOINTS_KELVINS, temperatures_kelvins=\n THESE_TEMPERATURES_KELVINS, total_pressures_pascals=THESE_PRESSURES_PASCALS\n )\n', (21370, 21526), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((21581, 21806), 'gewittergefahr.gg_utils.moisture_conversions.temperature_to_virtual_temperature', 'moisture_conversions.temperature_to_virtual_temperature', ([], {'temperatures_kelvins': 'THESE_TEMPERATURES_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS', 'vapour_pressures_pascals': 'THESE_VAPOUR_PRESSURES_PASCALS'}), '(temperatures_kelvins\n =THESE_TEMPERATURES_KELVINS, total_pressures_pascals=\n THESE_PRESSURES_PASCALS, vapour_pressures_pascals=\n THESE_VAPOUR_PRESSURES_PASCALS)\n', (21636, 21806), False, 'from gewittergefahr.gg_utils import moisture_conversions\n'), ((21855, 22025), 'gewittergefahr.gg_utils.temperature_conversions.temperatures_to_potential_temperatures', 'temperature_conversions.temperatures_to_potential_temperatures', ([], {'temperatures_kelvins': 'THESE_VIRTUAL_TEMPS_KELVINS', 'total_pressures_pascals': 'THESE_PRESSURES_PASCALS'}), '(\n temperatures_kelvins=THESE_VIRTUAL_TEMPS_KELVINS,\n total_pressures_pascals=THESE_PRESSURES_PASCALS)\n', (21917, 22025), False, 'from gewittergefahr.gg_utils import temperature_conversions\n'), ((22561, 22603), 'numpy.expand_dims', 'numpy.expand_dims', (['THIS_SOUNDING_MATRIX', '(0)'], {}), '(THIS_SOUNDING_MATRIX, 0)\n', (22578, 22603), False, 'import numpy\n'), ((11535, 11575), 'numpy.array', 'numpy.array', (['[0.1, 1.0, 5.0, 10.0, 20.0]'], {}), '([0.1, 1.0, 5.0, 10.0, 20.0])\n', (11546, 11575), False, 'import numpy\n'), ((17467, 17612), 'numpy.vstack', 'numpy.vstack', (['(THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,\n THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01)'], {}), '((THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01,\n THESE_V_WINDS_M_S01, THESE_TEMPERATURES_KELVINS,\n THESE_SPEC_HUMIDITIES_KG_KG01))\n', (17479, 17612), False, 'import numpy\n'), ((17770, 17915), 'numpy.vstack', 'numpy.vstack', (['(THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,\n THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01)'], {}), '((THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01,\n THESE_V_WINDS_M_S01, THESE_TEMPERATURES_KELVINS,\n THESE_SPEC_HUMIDITIES_KG_KG01))\n', (17782, 17915), False, 'import numpy\n'), ((19113, 19258), 'numpy.vstack', 'numpy.vstack', (['(THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,\n THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01)'], {}), '((THESE_HEIGHTS_METRES, THESE_U_WINDS_M_S01,\n THESE_V_WINDS_M_S01, THESE_TEMPERATURES_KELVINS,\n THESE_SPEC_HUMIDITIES_KG_KG01))\n', (19125, 19258), False, 'import numpy\n'), ((22329, 22531), 'numpy.vstack', 'numpy.vstack', (['(THESE_PRESSURES_PASCALS, THESE_U_WINDS_M_S01, THESE_V_WINDS_M_S01,\n THESE_TEMPERATURES_KELVINS, THESE_SPEC_HUMIDITIES_KG_KG01,\n THESE_RELATIVE_HUMIDITIES, THESE_THETA_V_KELVINS)'], {}), '((THESE_PRESSURES_PASCALS, THESE_U_WINDS_M_S01,\n THESE_V_WINDS_M_S01, THESE_TEMPERATURES_KELVINS,\n THESE_SPEC_HUMIDITIES_KG_KG01, THESE_RELATIVE_HUMIDITIES,\n THESE_THETA_V_KELVINS))\n', (22341, 22531), False, 'import numpy\n'), ((39219, 39234), 'unittest.main', 'unittest.main', ([], {}), '()\n', (39232, 39234), False, 'import unittest\n'), ((26109, 26168), 'numpy.array', 'numpy.array', (['first_sounding_dict[soundings.FIELD_NAMES_KEY]'], {}), '(first_sounding_dict[soundings.FIELD_NAMES_KEY])\n', (26120, 26168), False, 'import numpy\n'), ((26503, 26563), 'numpy.array', 'numpy.array', (['second_sounding_dict[soundings.FIELD_NAMES_KEY]'], {}), '(second_sounding_dict[soundings.FIELD_NAMES_KEY])\n', (26514, 26563), False, 'import numpy\n'), ((31019, 31157), 'gewittergefahr.gg_utils.soundings._create_target_points_for_interp', 'soundings._create_target_points_for_interp', ([], {'storm_object_table': 'DUMMY_STORM_OBJECT_TABLE', 'lead_times_seconds': 'UNIQUE_LEAD_TIMES_SECONDS'}), '(storm_object_table=\n DUMMY_STORM_OBJECT_TABLE, lead_times_seconds=UNIQUE_LEAD_TIMES_SECONDS)\n', (31061, 31157), False, 'from gewittergefahr.gg_utils import soundings\n'), ((31537, 31759), 'gewittergefahr.gg_utils.soundings._convert_interp_table_to_soundings', 'soundings._convert_interp_table_to_soundings', ([], {'interp_table': 'INTERP_TABLE_NO_SURFACE', 'target_point_table': 'TARGET_POINT_TABLE', 'model_name': 'MODEL_NAME', 'include_surface': '(False)', 'minimum_pressure_mb': 'MINIMUM_PRESSURE_MB'}), '(interp_table=\n INTERP_TABLE_NO_SURFACE, target_point_table=TARGET_POINT_TABLE,\n model_name=MODEL_NAME, include_surface=False, minimum_pressure_mb=\n MINIMUM_PRESSURE_MB)\n', (31581, 31759), False, 'from gewittergefahr.gg_utils import soundings\n'), ((32164, 32387), 'gewittergefahr.gg_utils.soundings._convert_interp_table_to_soundings', 'soundings._convert_interp_table_to_soundings', ([], {'interp_table': 'INTERP_TABLE_WITH_SURFACE', 'target_point_table': 'TARGET_POINT_TABLE', 'model_name': 'MODEL_NAME', 'include_surface': '(True)', 'minimum_pressure_mb': 'MINIMUM_PRESSURE_MB'}), '(interp_table=\n INTERP_TABLE_WITH_SURFACE, target_point_table=TARGET_POINT_TABLE,\n model_name=MODEL_NAME, include_surface=True, minimum_pressure_mb=\n MINIMUM_PRESSURE_MB)\n', (32208, 32387), False, 'from gewittergefahr.gg_utils import soundings\n'), ((32786, 32845), 'gewittergefahr.gg_utils.soundings._get_pressures', 'soundings._get_pressures', (['SOUNDING_DICT_P_COORDS_NO_SURFACE'], {}), '(SOUNDING_DICT_P_COORDS_NO_SURFACE)\n', (32810, 32845), False, 'from gewittergefahr.gg_utils import soundings\n'), ((33221, 33282), 'gewittergefahr.gg_utils.soundings._get_pressures', 'soundings._get_pressures', (['SOUNDING_DICT_P_COORDS_WITH_SURFACE'], {}), '(SOUNDING_DICT_P_COORDS_WITH_SURFACE)\n', (33245, 33282), False, 'from gewittergefahr.gg_utils import soundings\n'), ((36647, 36719), 'gewittergefahr.gg_utils.soundings.check_field_name', 'soundings.check_field_name', (['soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME'], {}), '(soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME)\n', (36673, 36719), False, 'from gewittergefahr.gg_utils import soundings\n'), ((38157, 38451), 'gewittergefahr.gg_utils.soundings.find_sounding_file', 'soundings.find_sounding_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'spc_date_string': 'SPC_DATE_STRING', 'lead_time_seconds': 'LEAD_TIME_IN_FILES_SEC', 'lag_time_for_convective_contamination_sec': 'LAG_TIME_IN_FILES_SEC', 'init_time_unix_sec': 'FILE_TIME_UNIX_SEC', 'raise_error_if_missing': '(False)'}), '(top_directory_name=TOP_DIRECTORY_NAME,\n spc_date_string=SPC_DATE_STRING, lead_time_seconds=\n LEAD_TIME_IN_FILES_SEC, lag_time_for_convective_contamination_sec=\n LAG_TIME_IN_FILES_SEC, init_time_unix_sec=FILE_TIME_UNIX_SEC,\n raise_error_if_missing=False)\n', (38185, 38451), False, 'from gewittergefahr.gg_utils import soundings\n'), ((38786, 39067), 'gewittergefahr.gg_utils.soundings.find_sounding_file', 'soundings.find_sounding_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'spc_date_string': 'SPC_DATE_STRING', 'lead_time_seconds': 'LEAD_TIME_IN_FILES_SEC', 'lag_time_for_convective_contamination_sec': 'LAG_TIME_IN_FILES_SEC', 'init_time_unix_sec': 'None', 'raise_error_if_missing': '(False)'}), '(top_directory_name=TOP_DIRECTORY_NAME,\n spc_date_string=SPC_DATE_STRING, lead_time_seconds=\n LEAD_TIME_IN_FILES_SEC, lag_time_for_convective_contamination_sec=\n LAG_TIME_IN_FILES_SEC, init_time_unix_sec=None, raise_error_if_missing=\n False)\n', (38814, 39067), False, 'from gewittergefahr.gg_utils import soundings\n'), ((3903, 3927), 'numpy.array', 'numpy.array', (['[numpy.nan]'], {}), '([numpy.nan])\n', (3914, 3927), False, 'import numpy\n'), ((24964, 25088), 'numpy.array_equal', 'numpy.array_equal', (['first_target_point_table[this_column_name].values', 'second_target_point_table[this_column_name].values'], {}), '(first_target_point_table[this_column_name].values,\n second_target_point_table[this_column_name].values)\n', (24981, 25088), False, 'import numpy\n'), ((25164, 25301), 'numpy.allclose', 'numpy.allclose', (['first_target_point_table[this_column_name].values', 'second_target_point_table[this_column_name].values'], {'atol': 'TOLERANCE'}), '(first_target_point_table[this_column_name].values,\n second_target_point_table[this_column_name].values, atol=TOLERANCE)\n', (25178, 25301), False, 'import numpy\n'), ((28192, 28342), 'gewittergefahr.gg_utils.soundings._get_nwp_fields_for_sounding', 'soundings._get_nwp_fields_for_sounding', ([], {'model_name': 'MODEL_NAME', 'return_table': '(False)', 'minimum_pressure_mb': 'MINIMUM_PRESSURE_MB', 'include_surface': '(False)'}), '(model_name=MODEL_NAME, return_table=\n False, minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=False)\n', (28230, 28342), False, 'from gewittergefahr.gg_utils import soundings\n'), ((28904, 29053), 'gewittergefahr.gg_utils.soundings._get_nwp_fields_for_sounding', 'soundings._get_nwp_fields_for_sounding', ([], {'model_name': 'MODEL_NAME', 'return_table': '(False)', 'minimum_pressure_mb': 'MINIMUM_PRESSURE_MB', 'include_surface': '(True)'}), '(model_name=MODEL_NAME, return_table=\n False, minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=True)\n', (28942, 29053), False, 'from gewittergefahr.gg_utils import soundings\n'), ((29584, 29733), 'gewittergefahr.gg_utils.soundings._get_nwp_fields_for_sounding', 'soundings._get_nwp_fields_for_sounding', ([], {'model_name': 'MODEL_NAME', 'return_table': '(True)', 'minimum_pressure_mb': 'MINIMUM_PRESSURE_MB', 'include_surface': '(False)'}), '(model_name=MODEL_NAME, return_table=\n True, minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=False)\n', (29622, 29733), False, 'from gewittergefahr.gg_utils import soundings\n'), ((30346, 30494), 'gewittergefahr.gg_utils.soundings._get_nwp_fields_for_sounding', 'soundings._get_nwp_fields_for_sounding', ([], {'model_name': 'MODEL_NAME', 'return_table': '(True)', 'minimum_pressure_mb': 'MINIMUM_PRESSURE_MB', 'include_surface': '(True)'}), '(model_name=MODEL_NAME, return_table=\n True, minimum_pressure_mb=MINIMUM_PRESSURE_MB, include_surface=True)\n', (30384, 30494), False, 'from gewittergefahr.gg_utils import soundings\n'), ((32884, 32984), 'numpy.allclose', 'numpy.allclose', (['this_pressure_matrix_pascals', 'PRESSURE_MATRIX_NO_SURFACE_PASCALS'], {'atol': 'TOLERANCE'}), '(this_pressure_matrix_pascals,\n PRESSURE_MATRIX_NO_SURFACE_PASCALS, atol=TOLERANCE)\n', (32898, 32984), False, 'import numpy\n'), ((33321, 33423), 'numpy.allclose', 'numpy.allclose', (['this_pressure_matrix_pascals', 'PRESSURE_MATRIX_WITH_SURFACE_PASCALS'], {'atol': 'TOLERANCE'}), '(this_pressure_matrix_pascals,\n PRESSURE_MATRIX_WITH_SURFACE_PASCALS, atol=TOLERANCE)\n', (33335, 33423), False, 'import numpy\n'), ((33873, 33984), 'numpy.allclose', 'numpy.allclose', (['this_dewpoint_matrix_kelvins', 'DEWPOINT_MATRIX_KELVINS'], {'atol': 'TOLERANCE_FOR_CONVERTED_VALUES'}), '(this_dewpoint_matrix_kelvins, DEWPOINT_MATRIX_KELVINS, atol=\n TOLERANCE_FOR_CONVERTED_VALUES)\n', (33887, 33984), False, 'import numpy\n'), ((34579, 34690), 'numpy.allclose', 'numpy.allclose', (['this_dewpoint_matrix_kelvins', 'DEWPOINT_MATRIX_KELVINS'], {'atol': 'TOLERANCE_FOR_CONVERTED_VALUES'}), '(this_dewpoint_matrix_kelvins, DEWPOINT_MATRIX_KELVINS, atol=\n TOLERANCE_FOR_CONVERTED_VALUES)\n', (34593, 34690), False, 'import numpy\n'), ((36942, 37000), 'gewittergefahr.gg_utils.soundings.check_field_name', 'soundings.check_field_name', (['soundings.STORM_ELEVATIONS_KEY'], {}), '(soundings.STORM_ELEVATIONS_KEY)\n', (36968, 37000), False, 'from gewittergefahr.gg_utils import soundings\n'), ((37242, 37307), 'gewittergefahr.gg_utils.soundings.field_name_to_verbose', 'soundings.field_name_to_verbose', ([], {'field_name': 'f', 'include_units': '(True)'}), '(field_name=f, include_units=True)\n', (37273, 37307), False, 'from gewittergefahr.gg_utils import soundings\n'), ((37713, 37779), 'gewittergefahr.gg_utils.soundings.field_name_to_verbose', 'soundings.field_name_to_verbose', ([], {'field_name': 'f', 'include_units': '(False)'}), '(field_name=f, include_units=False)\n', (37744, 37779), False, 'from gewittergefahr.gg_utils import soundings\n'), ((31872, 31920), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_NO_SURFACE'], {}), '(SOUNDING_DICT_P_COORDS_NO_SURFACE)\n', (31885, 31920), False, 'import copy\n'), ((32512, 32562), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_WITH_SURFACE'], {}), '(SOUNDING_DICT_P_COORDS_WITH_SURFACE)\n', (32525, 32562), False, 'import copy\n'), ((33726, 33771), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_NO_SPFH'], {}), '(SOUNDING_DICT_P_COORDS_NO_SPFH)\n', (33739, 33771), False, 'import copy\n'), ((34104, 34151), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_WITH_SPFH'], {}), '(SOUNDING_DICT_P_COORDS_WITH_SPFH)\n', (34117, 34151), False, 'import copy\n'), ((34434, 34477), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_NO_RH'], {}), '(SOUNDING_DICT_P_COORDS_NO_RH)\n', (34447, 34477), False, 'import copy\n'), ((34810, 34855), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_WITH_RH'], {}), '(SOUNDING_DICT_P_COORDS_WITH_RH)\n', (34823, 34855), False, 'import copy\n'), ((35105, 35153), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_NO_THETA_V'], {}), '(SOUNDING_DICT_P_COORDS_NO_THETA_V)\n', (35118, 35153), False, 'import copy\n'), ((35378, 35428), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_WITH_THETA_V'], {}), '(SOUNDING_DICT_P_COORDS_WITH_THETA_V)\n', (35391, 35428), False, 'import copy\n'), ((35658, 35705), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_WITH_NANS'], {}), '(SOUNDING_DICT_P_COORDS_WITH_NANS)\n', (35671, 35705), False, 'import copy\n'), ((35938, 35983), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_P_COORDS_NO_NANS'], {}), '(SOUNDING_DICT_P_COORDS_NO_NANS)\n', (35951, 35983), False, 'import copy\n'), ((36222, 36266), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_PRESSURE_COORDS'], {}), '(SOUNDING_DICT_PRESSURE_COORDS)\n', (36235, 36266), False, 'import copy\n'), ((36427, 36469), 'copy.deepcopy', 'copy.deepcopy', (['SOUNDING_DICT_HEIGHT_COORDS'], {}), '(SOUNDING_DICT_HEIGHT_COORDS)\n', (36440, 36469), False, 'import copy\n'), ((27407, 27521), 'numpy.allclose', 'numpy.allclose', (['first_sounding_dict[this_key]', 'second_sounding_dict[this_key]'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(first_sounding_dict[this_key], second_sounding_dict[this_key\n ], atol=TOLERANCE, equal_nan=True)\n', (27421, 27521), False, 'import numpy\n')] |
import numpy as np
from hips import LPStatus, HeuristicStatus
from hips.heuristics._heuristic import Heuristic
from hips.models import MIPModel, Variable, HIPSArray
class SimpleRounding(Heuristic):
"""
Implements a simple rounding heuristic, that rounds each integer variable to the closest integer of the relaxation LP solution.
"""
def __init__(self, mip_model: MIPModel):
super().__init__(mip_model)
self._x = {}
def compute(self, max_iter=None):
self.relaxation.optimize()
if (self.relaxation.get_status() != LPStatus.OPTIMAL):
raise Exception("LP relaxation does not have an optimal solution.")
for var in self.binary + self.integer:
var_value = self.relaxation.variable_solution(var).to_numpy()
self._x[var] = HIPSArray(np.rint(var_value))
if self.mip_model.is_feasible(self._x):
self.logger.info("SimpleRounding found an integer feasible solution.")
else:
self.logger.info("SimpleRounding did not find an integer feasible solution.")
def variable_solution(self, var: Variable):
return self._x[var]
def get_objective_value(self) -> float:
return self.relaxation.objective.eval(self._x).reshape(-1).to_numpy()[0]
def get_status(self):
lp_status = self.relaxation.get_status()
if lp_status == LPStatus.ERROR:
return HeuristicStatus.ERROR
elif lp_status != LPStatus.OPTIMAL:
return HeuristicStatus.NO_SOL_FOUND
else:
if self.mip_model.is_feasible(self._x):
return HeuristicStatus.SOL_FOUND
else:
return HeuristicStatus.NO_SOL_FOUND
| [
"numpy.rint"
] | [((826, 844), 'numpy.rint', 'np.rint', (['var_value'], {}), '(var_value)\n', (833, 844), True, 'import numpy as np\n')] |
import numpy as np
import torch as t
def bbox_iou(bbox1,bbox2):
"""计算bbox1=(x1,y1,x2,y2)和bbox2=(x3,y3,x4,y4)两个bbox的iou"""
intersect_bbox = [0., 0., 0., 0.] # bbox1和bbox2的交集
if bbox1[2]<bbox2[0] or bbox1[0]>bbox2[2] or bbox1[3]<bbox2[1] or bbox1[1]>bbox2[3]:
pass
else:
intersect_bbox[0] = max(bbox1[0],bbox2[0])
intersect_bbox[1] = max(bbox1[1],bbox2[1])
intersect_bbox[2] = min(bbox1[2],bbox2[2])
intersect_bbox[3] = min(bbox1[3],bbox2[3])
area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]) # bbox1面积
area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]) # bbox2面积
area_intersect = (intersect_bbox[2] - intersect_bbox[0]) * (intersect_bbox[3] - intersect_bbox[1]) # 交集面积
# print(bbox1,bbox2)
# print(intersect_bbox)
# input()
if area_intersect>0:
return area_intersect / (area1 + area2 - area_intersect) # 计算iou
else:
return 0
def kms_result_anchor(device):
return t.tensor([[101.30232 , 65.9907 ],
[ 84.436264, 106.35694 ],
[ 48.126507, 66.313255]],dtype = t.float32,device=device)
def xywh2xxyy(bbox):
cx,cy,w,h = bbox[0],bbox[1],bbox[2],bbox[3]
return t.tensor([cx-w/2,cy-h/2,cx+w/2,cy+h/2],dtype=bbox.dtype,device=bbox.device)
def anchor_generate(kms_anchor,hh = 4,ww = 4,h = 128,w = 128):
dtype = kms_anchor.dtype
device = kms_anchor.device
assert h == w , 'input image is not a square'
# 归一化anchor的长宽
kms_anchor = kms_anchor/h
shifts_x = ((t.arange(0, ww)+0.5) / ww).to(dtype=dtype,device=device)
shifts_y = ((t.arange(0, hh) + 0.5) / hh).to(dtype=dtype,device=device)
shifts_x,shifts_y = t.meshgrid(shifts_x,shifts_y)
shifts_x = shifts_x.reshape(-1,1)
shifts_y = shifts_y.reshape(-1,1)
anchor_list = []
for i in range(kms_anchor.shape[0]):
ah,aw = kms_anchor[i][0],kms_anchor[i][1]
anchor = t.cat((shifts_x,shifts_y,t.ones(shifts_x.shape,dtype=dtype,device=device)*aw,
t.ones(shifts_x.shape,dtype=dtype,device=device)*ah),dim=1)
anchor_list.append(anchor)
# print(shifts_x,shifts_y)
return t.cat(anchor_list,dim=0).view(3,16,4).permute(0,2,1).contiguous().view(3,4,4,4)
def anchor_encode(anchors,bbox,anchor_num_per_grid,feature_map_size,img_size,dtype,device): # bbox: (B,4)
hh,ww = feature_map_size
h,w = img_size
assert h == w, 'input image is not a square'
bbox_h = bbox[:, 3] - bbox[:, 1]
bbox_w = bbox[:, 2] - bbox[:, 0]
bbox_cx = bbox_w / 2 + bbox[:, 0]
bbox_cy = bbox_h / 2 + bbox[:, 1]
bbox_xywh = t.cat((bbox_cx.reshape(-1,1),bbox_cy.reshape(-1,1),
bbox_w.reshape(-1,1),bbox_h.reshape(-1,1)),dim=1)
# print(bbox_xywh)
gt = t.zeros((bbox.size()[0],anchor_num_per_grid,5,hh,ww),dtype=dtype,device=device)
iy = np.arange(0, h, h // hh)
ix = np.arange(0, w, w // ww)
for i in range(bbox.size()[0]):
iy = np.where(iy <= float(bbox_cy[i]))[0][-1]
ix = np.where(ix <= float(bbox_cx[i]))[0][-1]
best = 0.
best_indice = -1
for ia in range(anchor_num_per_grid):
iou = bbox_iou(xywh2xxyy(anchors[ia,:,iy,ix]),xywh2xxyy(bbox_xywh[i]/h))
if iou > best:
best = iou
best_indice = ia
gt[i,best_indice, 0, iy,ix] = 1.
gt[i,best_indice,1:5, iy,ix] = xywh2offset(anchors[best_indice,:,iy,ix],bbox_xywh[i]/h)
return gt
def xywh2offset(anchor,bbox):
acx,acy,aw,ah = anchor[0],anchor[1],anchor[2],anchor[3]
gcx,gcy,gw,gh = bbox[0],bbox[1],bbox[2],bbox[3]
offset0 = (gcx - acx) / acx
offset1 = (gcy - acy) / acy
offset2 = t.log(gw/aw)
offset3 = t.log(gh/ah)
return t.tensor([offset0,offset1,offset2,offset3])
def offset2xywh(anchor,offset):
acx, acy, aw, ah = anchor[0], anchor[1], anchor[2], anchor[3]
cx = offset[0]*acx + acx
cy = offset[1]*acy + acy
w = t.exp(offset[2]) * aw
h = t.exp(offset[3]) * ah
x1 = cx - w/2
x2 = cx + w/2
y1 = cy - h/2
y2 = cy + h/2
return t.tensor([x1,y1,x2,y2])
def anchor_decode(objects,scores,offsets,anchors):# offsets:[anchor_num_per_grid,4,hh,ww]
# objects:[anchor_num_per_grid,hh,ww],score:[class_num]
anchor_num_per_gird,_,hh,ww = offsets.shape
objects = objects.view(anchor_num_per_gird*hh*ww)
score = t.argmax(scores,dim=0)
predicts = t.zeros(offsets.shape,dtype=offsets.dtype,device=offsets.device)
for ia in range(anchor_num_per_gird):
for ih in range(hh):
for iw in range(ww):
predicts[ia,:,ih,iw] = offset2xywh(anchors[ia,:,ih,iw],offsets[ia,:,ih,iw])
predicts = predicts.permute(0, 2, 3, 1).contiguous().view(anchor_num_per_gird * hh * ww, 4)
o_max = t.max(objects,dim=0)[1]
loc = predicts[o_max,:]
return loc,score
if __name__ == '__main__':
anchors = anchor_generate(kms_anchor=kms_result_anchor(t.device('cuda')))
print(anchors)
print(anchors.shape)
from dataset import tiny_dataset
dataset = tiny_dataset()
# print(len(dataset))
from torch.utils.data import DataLoader
dataloader = DataLoader(dataset, batch_size=2, shuffle=False)
for each in dataloader:
bbox = each['bbox'].to('cuda')
break
gt = anchor_encode(anchors,bbox,3, (4, 4), (128, 128),t.float32,t.device('cuda'))
print(gt[:,:,0])
print(gt.shape)
batch_size = bbox.size()[0]
ww = 4
hh = 4
# for i in range(batch_size):
# for ix in range(ww):
# for iy in range(hh):
# if gt[i, 0, iy, ix] == 1.:
# print(gt[i, 1:5, iy, ix]) | [
"dataset.tiny_dataset",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.argmax",
"torch.cat",
"torch.meshgrid",
"torch.exp",
"numpy.arange",
"torch.max",
"torch.arange",
"torch.device",
"torch.zeros",
"torch.log",
"torch.tensor"
] | [((986, 1103), 'torch.tensor', 't.tensor', (['[[101.30232, 65.9907], [84.436264, 106.35694], [48.126507, 66.313255]]'], {'dtype': 't.float32', 'device': 'device'}), '([[101.30232, 65.9907], [84.436264, 106.35694], [48.126507, \n 66.313255]], dtype=t.float32, device=device)\n', (994, 1103), True, 'import torch as t\n'), ((1202, 1302), 'torch.tensor', 't.tensor', (['[cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2]'], {'dtype': 'bbox.dtype', 'device': 'bbox.device'}), '([cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2], dtype=bbox.dtype,\n device=bbox.device)\n', (1210, 1302), True, 'import torch as t\n'), ((1677, 1707), 'torch.meshgrid', 't.meshgrid', (['shifts_x', 'shifts_y'], {}), '(shifts_x, shifts_y)\n', (1687, 1707), True, 'import torch as t\n'), ((2854, 2878), 'numpy.arange', 'np.arange', (['(0)', 'h', '(h // hh)'], {}), '(0, h, h // hh)\n', (2863, 2878), True, 'import numpy as np\n'), ((2888, 2912), 'numpy.arange', 'np.arange', (['(0)', 'w', '(w // ww)'], {}), '(0, w, w // ww)\n', (2897, 2912), True, 'import numpy as np\n'), ((3693, 3707), 'torch.log', 't.log', (['(gw / aw)'], {}), '(gw / aw)\n', (3698, 3707), True, 'import torch as t\n'), ((3720, 3734), 'torch.log', 't.log', (['(gh / ah)'], {}), '(gh / ah)\n', (3725, 3734), True, 'import torch as t\n'), ((3745, 3791), 'torch.tensor', 't.tensor', (['[offset0, offset1, offset2, offset3]'], {}), '([offset0, offset1, offset2, offset3])\n', (3753, 3791), True, 'import torch as t\n'), ((4091, 4117), 'torch.tensor', 't.tensor', (['[x1, y1, x2, y2]'], {}), '([x1, y1, x2, y2])\n', (4099, 4117), True, 'import torch as t\n'), ((4381, 4404), 'torch.argmax', 't.argmax', (['scores'], {'dim': '(0)'}), '(scores, dim=0)\n', (4389, 4404), True, 'import torch as t\n'), ((4420, 4486), 'torch.zeros', 't.zeros', (['offsets.shape'], {'dtype': 'offsets.dtype', 'device': 'offsets.device'}), '(offsets.shape, dtype=offsets.dtype, device=offsets.device)\n', (4427, 4486), True, 'import torch as t\n'), ((5069, 5083), 'dataset.tiny_dataset', 'tiny_dataset', ([], {}), '()\n', (5081, 5083), False, 'from dataset import tiny_dataset\n'), ((5172, 5220), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(2)', 'shuffle': '(False)'}), '(dataset, batch_size=2, shuffle=False)\n', (5182, 5220), False, 'from torch.utils.data import DataLoader\n'), ((3954, 3970), 'torch.exp', 't.exp', (['offset[2]'], {}), '(offset[2])\n', (3959, 3970), True, 'import torch as t\n'), ((3984, 4000), 'torch.exp', 't.exp', (['offset[3]'], {}), '(offset[3])\n', (3989, 4000), True, 'import torch as t\n'), ((4791, 4812), 'torch.max', 't.max', (['objects'], {'dim': '(0)'}), '(objects, dim=0)\n', (4796, 4812), True, 'import torch as t\n'), ((5371, 5387), 'torch.device', 't.device', (['"""cuda"""'], {}), "('cuda')\n", (5379, 5387), True, 'import torch as t\n'), ((4953, 4969), 'torch.device', 't.device', (['"""cuda"""'], {}), "('cuda')\n", (4961, 4969), True, 'import torch as t\n'), ((1520, 1535), 'torch.arange', 't.arange', (['(0)', 'ww'], {}), '(0, ww)\n', (1528, 1535), True, 'import torch as t\n'), ((1594, 1609), 'torch.arange', 't.arange', (['(0)', 'hh'], {}), '(0, hh)\n', (1602, 1609), True, 'import torch as t\n'), ((1938, 1988), 'torch.ones', 't.ones', (['shifts_x.shape'], {'dtype': 'dtype', 'device': 'device'}), '(shifts_x.shape, dtype=dtype, device=device)\n', (1944, 1988), True, 'import torch as t\n'), ((2015, 2065), 'torch.ones', 't.ones', (['shifts_x.shape'], {'dtype': 'dtype', 'device': 'device'}), '(shifts_x.shape, dtype=dtype, device=device)\n', (2021, 2065), True, 'import torch as t\n'), ((2154, 2179), 'torch.cat', 't.cat', (['anchor_list'], {'dim': '(0)'}), '(anchor_list, dim=0)\n', (2159, 2179), True, 'import torch as t\n')] |
import os
import tensorflow
if tensorflow.__version__ >= '2':
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D
from tensorflow.keras.utils import plot_model
else:
from keras.initializers import RandomNormal
from keras.models import Sequential, Model
from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D
from keras.utils import plot_model
import numpy as np
import matplotlib as mpl
if os.getenv('DISPLAY') is None: mpl.use('Agg')
import matplotlib.pyplot as py
import matplotlib.image as mpimg
from tempfile import NamedTemporaryFile
#----------------------------------------------------------------------------------------
def apetnet(n_ch = 2,
n_ind_layers = 1,
n_common_layers = 7,
n_kernels_ind = 15,
n_kernels_common = 30,
kernel_shape = (3,3,3),
res_channels = [0],
add_final_relu = False,
add_batchnorm = True,
disp = False):
"""
Create CNN model for multiple inputs and one voxel-wise prediction channel
|----input_0 input_1 ... input_n
| | | ... |
| conv+prelu conv+prelu ... conv+prelu
| | | ... |
| conv+prelu conv+prelu ... conv+prelu
| | | ... |
| ---------concatenate ... -------
| |
| conv+prelu
| |
| conv+prelu
| |
| conv+prelu
| |
| V
------------------>add
|
(relu)
|
output
keyword arguments
-----------------
n_ch ... (int) number of input channels
n_ind_layers ... (int) number of individual layers
n_common_layers ... (int) number of common layers
n_kernels_ind ... (int) number of kernels for individual layers
n_kernels_common ... (int) number of kernels for common layers
kernel_shape ... (tuple) shape of kernels
res_channels ... (list) of channels to add to output of common layers
add_batchnorm ... (bool) add batch normalization layers between the conv and the
PRELU layers
add_final_relu ... (bool) add a final ReLU layer before output to make sure that
output is non-negative
disp ... (bool) show the model
"""
inputs = [Input(shape = (None, None, None,1), name = 'input_' + str(x)) for x in range(n_ch)]
# individual paths
if n_ind_layers > 0:
#init_val_ind = RandomNormal(mean = 0.0, stddev = np.sqrt(2/(np.prod(kernel_shape)*n_kernels_ind)))
x1_list = [i for i in inputs]
for i in range(n_ind_layers):
for j in range(n_ch):
x1_list[j] = Conv3D(n_kernels_ind, kernel_shape, padding = 'same',
kernel_initializer = 'glorot_uniform',
name = 'conv3d_ind_' + str(i) + '_' + str(j))(x1_list[j])
if add_batchnorm:
x1_list[j] = BatchNormalization(name = 'batchnorm_ind_' + str(i) + '_' + str(j))(x1_list[j])
x1_list[j] = PReLU(shared_axes=[1,2,3], name = 'prelu_ind_' + str(i) + '_' + str(j))(x1_list[j])
# concatenate inputs
x1 = Concatenate(name = 'concat_0')(x1_list)
else:
# concatenate inputs
x1 = Concatenate(name = 'concat_0')(inputs)
# common path
#init_val = RandomNormal(mean = 0.0, stddev = np.sqrt(2/(np.prod(kernel_shape)*n_kernels_common)))
for i in range(n_common_layers):
x1 = Conv3D(n_kernels_common, kernel_shape, padding = 'same',
kernel_initializer = 'glorot_uniform',
name = 'conv3d_' + str(i))(x1)
if add_batchnorm:
x1 = BatchNormalization(name = 'batchnorm_' + str(i))(x1)
x1 = PReLU(shared_axes=[1,2,3], name = 'prelu_' + str(i))(x1)
# layers that adds all features
x1 = Conv3D(1, (1,1,1), padding='same', name = 'conv_111',
kernel_initializer = RandomNormal(mean = 0.0, stddev = np.sqrt(2)))(x1)
if res_channels is not None:
x1 = Add(name = 'add_0')([x1] + [inputs[i] for i in res_channels])
if add_final_relu:
x1 = ReLU(name = 'final_relu')(x1)
model = Model(inputs = inputs, outputs = x1)
if disp:
tmp_file = NamedTemporaryFile(prefix = 'model', suffix = '.png')
plot_model(model, to_file= tmp_file.name)
img = mpimg.imread(tmp_file)
fig, ax = py.subplots()
ax = py.imshow(img)
py.draw()
return model
#------------------------------------------------------------------------------------------
def apetnet_vv5_onnx(input_tensor = None,
n_ind_layers = 1,
n_common_layers = 7,
n_kernels_ind = 15,
n_kernels_common = 30,
kernel_shape = (3,3,3),
add_final_relu = False,
debug = False):
""" Stacked single channel version of apetnet
For description of input parameters see apetnet
The input_tensor argument is only used determine the input shape.
If None the input shape us set to (32,16,16,1).
"""
# define input (stacked PET and MRI image)
if input_tensor is not None:
ipt = Input(input_tensor.shape[1:5], name = 'input')
else:
ipt = Input(shape = (32, 16, 16, 1), name = 'input')
# extract pet and mri image
# - first image in order is pet
ipt_dim_crop = int(ipt.shape[1] // 2)
mri_image = Cropping3D(cropping=((ipt_dim_crop, 0), (0, 0), (0, 0)), name = 'extract_mri')(ipt)
pet_image = Cropping3D(cropping=(( 0, ipt_dim_crop), (0, 0), (0, 0)), name = 'extract_pet')(ipt)
# create the full model
if not debug:
# individual paths
if n_ind_layers > 0:
init_val_ind = RandomNormal(mean = 0.0, stddev = np.sqrt(2/(np.prod(kernel_shape)*n_kernels_ind)))
pet_image_ind = pet_image
mri_image_ind = mri_image
for i in range(n_ind_layers):
pet_image_ind = Conv3D(n_kernels_ind, kernel_shape, padding = 'same', name = 'conv3d_pet_ind_' + str(i), kernel_initializer = init_val_ind)(pet_image_ind)
pet_image_ind = PReLU(shared_axes=[1,2,3], name = 'prelu_pet_ind_' + str(i))(pet_image_ind)
mri_image_ind = Conv3D(n_kernels_ind, kernel_shape, padding = 'same', name = 'conv3d_mri_ind_' + str(i), kernel_initializer = init_val_ind)(mri_image_ind)
mri_image_ind = PReLU(shared_axes=[1,2,3], name = 'prelu_mri_ind_' + str(i))(mri_image_ind)
# concatenate inputs
net = Concatenate(name = 'concat_0')([pet_image_ind, mri_image_ind])
else:
# concatenate inputs
net = Concatenate(name = 'concat_0')([pet_image, mri_image])
# common path
init_val_common = RandomNormal(mean = 0.0, stddev = np.sqrt(2/(np.prod(kernel_shape)*n_kernels_common)))
for i in range(n_common_layers):
net = Conv3D(n_kernels_common, kernel_shape, padding = 'same', name = 'conv3d_' + str(i), kernel_initializer = init_val_common)(net)
net = PReLU(shared_axes=[1,2,3], name = 'prelu_' + str(i))(net)
# layers that adds all features
net = Conv3D(1, (1,1,1), padding='valid', name = 'conv_final', kernel_initializer = RandomNormal(mean = 0.0, stddev = np.sqrt(2)))(net)
# add pet_image to prediction
net = Add(name = 'add_0')([net, pet_image])
# ensure that output is non-negative
if add_final_relu:
net = ReLU(name = 'final_relu')(net)
# in debug mode only add up pet and mri image
else:
net = Concatenate(name = 'add_0')([pet_image, mri_image])
# create model
model = Model(inputs=ipt, outputs=net)
# return the model
return model
| [
"tempfile.NamedTemporaryFile",
"matplotlib.image.imread",
"matplotlib.pyplot.imshow",
"keras.layers.Cropping3D",
"keras.models.Model",
"matplotlib.pyplot.draw",
"keras.layers.Concatenate",
"keras.utils.plot_model",
"matplotlib.use",
"keras.layers.Add",
"keras.layers.ReLU",
"keras.layers.Input"... | [((664, 684), 'os.getenv', 'os.getenv', (['"""DISPLAY"""'], {}), "('DISPLAY')\n", (673, 684), False, 'import os\n'), ((694, 708), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (701, 708), True, 'import matplotlib as mpl\n'), ((4770, 4802), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'x1'}), '(inputs=inputs, outputs=x1)\n', (4775, 4802), False, 'from keras.models import Sequential, Model\n'), ((8444, 8474), 'keras.models.Model', 'Model', ([], {'inputs': 'ipt', 'outputs': 'net'}), '(inputs=ipt, outputs=net)\n', (8449, 8474), False, 'from keras.models import Sequential, Model\n'), ((4834, 4883), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'prefix': '"""model"""', 'suffix': '""".png"""'}), "(prefix='model', suffix='.png')\n", (4852, 4883), False, 'from tempfile import NamedTemporaryFile\n'), ((4892, 4932), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': 'tmp_file.name'}), '(model, to_file=tmp_file.name)\n', (4902, 4932), False, 'from keras.utils import plot_model\n'), ((4944, 4966), 'matplotlib.image.imread', 'mpimg.imread', (['tmp_file'], {}), '(tmp_file)\n', (4956, 4966), True, 'import matplotlib.image as mpimg\n'), ((4981, 4994), 'matplotlib.pyplot.subplots', 'py.subplots', ([], {}), '()\n', (4992, 4994), True, 'import matplotlib.pyplot as py\n'), ((5004, 5018), 'matplotlib.pyplot.imshow', 'py.imshow', (['img'], {}), '(img)\n', (5013, 5018), True, 'import matplotlib.pyplot as py\n'), ((5023, 5032), 'matplotlib.pyplot.draw', 'py.draw', ([], {}), '()\n', (5030, 5032), True, 'import matplotlib.pyplot as py\n'), ((5855, 5899), 'keras.layers.Input', 'Input', (['input_tensor.shape[1:5]'], {'name': '"""input"""'}), "(input_tensor.shape[1:5], name='input')\n", (5860, 5899), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((5927, 5969), 'keras.layers.Input', 'Input', ([], {'shape': '(32, 16, 16, 1)', 'name': '"""input"""'}), "(shape=(32, 16, 16, 1), name='input')\n", (5932, 5969), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((6101, 6177), 'keras.layers.Cropping3D', 'Cropping3D', ([], {'cropping': '((ipt_dim_crop, 0), (0, 0), (0, 0))', 'name': '"""extract_mri"""'}), "(cropping=((ipt_dim_crop, 0), (0, 0), (0, 0)), name='extract_mri')\n", (6111, 6177), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((6206, 6282), 'keras.layers.Cropping3D', 'Cropping3D', ([], {'cropping': '((0, ipt_dim_crop), (0, 0), (0, 0))', 'name': '"""extract_pet"""'}), "(cropping=((0, ipt_dim_crop), (0, 0), (0, 0)), name='extract_pet')\n", (6216, 6282), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((3807, 3835), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""concat_0"""'}), "(name='concat_0')\n", (3818, 3835), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((3890, 3918), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""concat_0"""'}), "(name='concat_0')\n", (3901, 3918), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((4633, 4650), 'keras.layers.Add', 'Add', ([], {'name': '"""add_0"""'}), "(name='add_0')\n", (4636, 4650), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((4726, 4749), 'keras.layers.ReLU', 'ReLU', ([], {'name': '"""final_relu"""'}), "(name='final_relu')\n", (4730, 4749), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((8101, 8118), 'keras.layers.Add', 'Add', ([], {'name': '"""add_0"""'}), "(name='add_0')\n", (8104, 8118), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((8354, 8379), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""add_0"""'}), "(name='add_0')\n", (8365, 8379), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((7258, 7286), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""concat_0"""'}), "(name='concat_0')\n", (7269, 7286), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((7387, 7415), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""concat_0"""'}), "(name='concat_0')\n", (7398, 7415), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((8236, 8259), 'keras.layers.ReLU', 'ReLU', ([], {'name': '"""final_relu"""'}), "(name='final_relu')\n", (8240, 8259), False, 'from keras.layers import Input, Conv3D, ReLU, PReLU, BatchNormalization, Add, Concatenate, Cropping3D\n'), ((4573, 4583), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4580, 4583), True, 'import numpy as np\n'), ((7536, 7557), 'numpy.prod', 'np.prod', (['kernel_shape'], {}), '(kernel_shape)\n', (7543, 7557), True, 'import numpy as np\n'), ((8024, 8034), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8031, 8034), True, 'import numpy as np\n'), ((6467, 6488), 'numpy.prod', 'np.prod', (['kernel_shape'], {}), '(kernel_shape)\n', (6474, 6488), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.