code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
rig_hardware.py
Hardware Interface and Mock Layers for Hydration project Rig subsystem.
"""
from abc import ABC, abstractmethod
import configparser
import time, threading
import numpy, serial
import re
from pymodbus.client.sync import ModbusSerialClient
from pymodbus.payload import BinaryPayloadDecoder
from . import hardware
config = configparser.ConfigParser()
config.read('config.ini')
if not config.getboolean('Operating System', 'RunningInCoreSensorsRPi'):
import HydrationServo
if config.getboolean('Operating System', 'RunningInRPi'):
from gpiozero import PWMLED
from gpiozero import CPUTemperature
import RPi.GPIO as GPIO
Z1Cal = config.getfloat('Rig', 'Z1Cal')
Z2Cal = config.getfloat('Rig', 'Z2Cal')
XCal = config.getfloat('Rig', 'XCal')
YCal = config.getfloat('Rig', 'YCal')
HomingError =config.getfloat('Rig', 'HomingError')
if config.getboolean('Mocks', 'MockRig'):
iZ1 = 0
iZ2 = 1
iX = 2
iY = 3
else:
iZ1 = 0
iZ2 = 1
iX = -1
iY = 2
MotorMap = [0, 1, 3]
NMotors = 3
# these indices are used for the current position variables
kZ1 = 0
kZ2 = 1
kX = 2
kY = 3
class AbstractRigHardware(ABC):
def isHomeZ1(self):
current_pos = self.getPosition()
return (not self.isZ1Moving()) \
and (numpy.abs(current_pos[kZ1]) < HomingError)
def isHomeY(self):
current_pos = self.getPosition()
return (not self.isYMoving()) \
and (numpy.abs(current_pos[kY]) < HomingError)
def isHomeZ2(self):
current_pos = self.getPosition()
return (not self.isYMoving()) \
and (numpy.abs(current_pos[kZ2]) < HomingError)
def movePositionZ1(self, delta, vel):
cur_pos = self.getPosition().copy()
new_z1 = cur_pos[kZ1] + delta
return self.gotoPositionZ1(new_z1, vel)
def movePositionZ2(self, delta, vel):
cur_pos = self.getPosition().copy()
new_z1 = cur_pos[kZ2] + delta
return self.gotoPositionZ2(new_z1, vel)
def movePositionY(self, delta, vel):
cur_pos = self.getPosition().copy()
new_y = cur_pos[kY] + delta
return self.gotoPositionY(new_y, vel)
@abstractmethod
def motorStatus(self):
pass
@abstractmethod
def clearAlert(self):
pass
@abstractmethod
def getPosition(self):
pass
@abstractmethod
def homeX(self):
pass
@abstractmethod
def homeY(self):
pass
@abstractmethod
def homeZ1(self):
pass
@abstractmethod
def isYMoving(self):
pass
@abstractmethod
def isZ1Moving(self):
pass
@abstractmethod
def emergencyStop(self):
pass
@abstractmethod
# Returns torque of motor i (0, 1, 2, 3) => (z1, z2, x, y)
def getTorque(self, i):
pass
@abstractmethod
def setHomeZ1(self):
pass
@abstractmethod
def setHomeY(self):
pass
@abstractmethod
def gotoPositionY(self, y, v):
pass
@abstractmethod
def gotoPositionZ1(self, z, v):
pass
@abstractmethod
def gotoPositionZ2(self, z, v):
pass
class MockRigHardware(AbstractRigHardware):
def __init__(self):
#self.position = [-0.4, -0.3, 0.0, 0.50]
self.position = [-0.0, -0.00, 0.0, 0.00]
self.target = [0.0, 0.0, 0.0, 0.0]
self.vel = 0.05 # m/s
self.homing = [False, False, False, False]
self.homingTime = [0.0, 0.0, 0.0, 0.0]
self.move_tolerance = config.getfloat(
"Rig", "MoveDetectionTolerance")
def _update(self, i):
VEL = (self.target[i] - self.position[i])*self.vel
if self.homing[i]:
new_t = time.time()
dt = new_t - self.homingTime[i]
ds = VEL*dt
s = self.position[i] + ds
ds = self.target[i] - s
if numpy.abs(ds) <= self.move_tolerance*100:
self.homing[i] = False
s = self.target[i]
self.position[i] = s
self.homingTime[i] = new_t
def getPosition(self):
N = len(self.position)
for n in range(N):
self._update(n)
return self.position
def _home(self, i):
self.vel = 0.05 # m/s
self.homing[i] = True
self.target[i] = 0.0
self.homingTime[i] = time.time()
def homeZ1(self):
self._home(iZ1)
return True
def homeZ2(self):
self._home(iZ2)
return True
def homeX(self):
self._home(iX)
return True
def homeY(self):
self._home(iY)
return True
def emergencyStop(self):
N = len(self.position)
for n in range(N):
self.homing[n] = False
def isZ1Moving(self):
#print(f"X is moving {self.homing[0]}")
return self.homing[0]
def isZ2Moving(self):
#print(f"X is moving {self.homing[0]}")
return self.homing[1]
def getTorque(self, i):
return 9 # maximum is 3.5, so if we see more it indicates simulated value
def isXMoving(self):
#print(f"X is moving {self.homing[0]}")
return self.homing[2]
def isYMoving(self):
return self.homing[3]
def gotoPosition(self, x, y):
t = time.time()
self.target[2] = x
self.target[3] = y
self.homing[2] = True
self.homingTime[2] = t
self.homing[3] = True
self.homingTime[3] = t
return True
def gotoPositionY(self, y, v):
t = time.time()
self.vel = v/5000.0
self.target[3] = y
self.homing[3] = True
self.homingTime[3] = t
return True
def gotoPositionZ1(self, z, v):
self.vel = v/5000.0
self.target[0] = z
self.homing[0] = True
self.homingTime[0] = time.time()
return True
def gotoPositionZ2(self, z, v):
self.vel = v/5000.0
self.target[1] = z
self.homing[1] = True
self.homingTime[1] = time.time()
return True
def setHomeZ1(self):
print("Setting Home Z1")
self.position[0] = 0.0
def setHomeZ2(self):
self.position[1] = 0.0
def setHomeX(self):
self.position[2] = 0.0
def setHomeY(self):
self.position[3] = 0.0
def motorStatus(self):
return ["0", "0", "0", "0"]
def clearAlert(self):
return True
class FileWriterThread(threading.Thread):
def __init__(self, rig_hardware):
threading.Thread.__init__(self)
self.rig_hardware = rig_hardware
self.stopped = True
def run(self):
self.stopped = False
time_start_s = time.time()
fp = open(f"rig_{time_start_s}.csv", "w")
fp.write(f"time_s,")
for i in range(NMotors):
fp.write(f"pos_{i}_m,torque_{i}_Percent,")
fp.write("\n")
sampling_time = config.getfloat("Rig", "SamplingTime")
while not self.stopped: #read sensor continuously
loop_start = time.time()
position = self.rig_hardware.getPosition()
fp.write(f"{loop_start},")
for i in range(NMotors):
fp.write(f"{position[MotorMap[i]]},")
fp.write(f"{self.rig_hardware.getTorque(i)},")
fp.write("\n")
loop_end = time.time()
delta_time = loop_end - loop_start
if (delta_time < sampling_time):
time.sleep(sampling_time - delta_time)
fp.close()
def stop(self):
self.stopped = True
class RigHardware(AbstractRigHardware):
def __init__(self):
print("Initializing Rig Hardware ...")
self.current_pos = [0.0, 0.0, 0.0, 0.0]
self.getPosition()
print(f"Position found {self.current_pos}")
self.prev_pos = self.current_pos.copy()
self.move_tolerance = config.getfloat(
"Rig", "MoveDetectionTolerance")
print("Done initializing rig hardware")
self.file_writer_thread = FileWriterThread(self)
self.file_writer_thread.start()
def motorStatus(self):
responses = []
for i in range(NMotors):
responses.append(HydrationServo.motor_status(i)) #need to somehow make the motor status return into the error at hand
return responses
def clearAlert(self):
for i in range(NMotors):
HydrationServo.clear_alert(i)
return True
def homingMotorZ1(self):
# ensure Z-poisions are zero within tolerance
homing_error = config.getfloat("Rig", "HomingError")
pos = self.getPosition()
if (numpy.abs(pos[iZ1]) > homing_error):
return False
# stop existing moves
self.emergencyStop()
HydrationServo.homing_motor(iZ1)
return True
def gotoPositionY(self, y, v):
# ensure Z-poisions are zero within tolerance
homing_error = config.getfloat("Rig", "HomingError")
pos = self.getPosition()
if (pos[kZ1] < -homing_error) or (pos[kZ2] < -homing_error):
return False
# stop existing moves
self.emergencyStop()
HydrationServo.set_position_unique(iY, y/YCal, v)
return True
def gotoPositionZ1(self, z, v):
# stop existing threads
self.emergencyStop()
HydrationServo.set_position_unique(iZ1, z/Z1Cal, v)
return True
def gotoPositionZ2(self, z, v):
# stop existing threads
self.emergencyStop()
HydrationServo.set_position_unique(iZ2, z/Z2Cal, v)
return True
def homeY(self):
# ensure Z-poisions are zero within tolerance
homing_error = config.getfloat("Rig", "HomingError")
pos = self.getPosition()
if (numpy.abs(pos[kZ1]) > homing_error) or \
(numpy.abs(pos[kZ2]) > homing_error):
return -1
# stop existing moves
self.emergencyStop()
return HydrationServo.homing_motor(iY)
def homeX(self):
# ensure Z-poisions are zero within tolerance
homing_error = config.getfloat("Rig", "HomingError")
pos = self.getPosition()
if (numpy.abs(pos[kZ1]) > homing_error) or \
(numpy.abs(pos[kZ2]) > homing_error):
return -1
# stop existing moves
self.emergencyStop()
return HydrationServo.homing_motor(iX)
def homeZ1(self):
# stop existing moves
self.emergencyStop()
return HydrationServo.homing_motor(iZ1)
def homeZ2(self):
# stop existing moves
self.emergencyStop()
return HydrationServo.homing_motor(iZ2)
def getPosition(self):
self.prev_pos = self.current_pos.copy()
z1 = z2 = x = y = 0.0
if iZ1 >= 0:
z1 = HydrationServo.get_position(iZ1)*Z1Cal
if iZ2 >= 0:
z2 = HydrationServo.get_position(iZ2)*Z2Cal
if iX >= 0:
x = HydrationServo.get_position(iX)*XCal
if iY >= 0:
y = HydrationServo.get_position(iY)*YCal
self.current_pos = numpy.array([z1, z2, x, y])
return self.current_pos
def emergencyStop(self):
HydrationServo.stop_all_motors()
def isNMoving(self, n):
return numpy.abs(self.prev_pos[n] - self.current_pos[n]) > self.move_tolerance
def isYMoving(self):
if iY < 0:
return False
else:
return self.isNMoving(kY)
def isZ1Moving(self):
if iZ1 < 0:
return False
else:
return self.isNMoving(kZ1)
def isZ2Moving(self):
if iZ2 < 0:
return False
else:
return self.isNMoving(kZ2)
def getTorque(self, i):
return HydrationServo.get_torque(i)
def setHomeZ1(self):
HydrationServo.set_home(iZ1)
def setHomeZ2(self):
HydrationServo.set_home(iZ2)
def setHomeY(self):
HydrationServo.set_home(iY)
def set_speed_rpm(self, i, rpm):
HydrationServo.set_speed_rpm(i, rpm)
| [
"threading.Thread.__init__",
"HydrationServo.homing_motor",
"HydrationServo.set_position_unique",
"HydrationServo.set_home",
"HydrationServo.get_torque",
"numpy.abs",
"HydrationServo.clear_alert",
"time.time",
"time.sleep",
"HydrationServo.set_speed_rpm",
"numpy.array",
"HydrationServo.stop_al... | [((343, 370), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (368, 370), False, 'import configparser\n'), ((4443, 4454), 'time.time', 'time.time', ([], {}), '()\n', (4452, 4454), False, 'import time, threading\n'), ((5408, 5419), 'time.time', 'time.time', ([], {}), '()\n', (5417, 5419), False, 'import time, threading\n'), ((5668, 5679), 'time.time', 'time.time', ([], {}), '()\n', (5677, 5679), False, 'import time, threading\n'), ((5973, 5984), 'time.time', 'time.time', ([], {}), '()\n', (5982, 5984), False, 'import time, threading\n'), ((6173, 6184), 'time.time', 'time.time', ([], {}), '()\n', (6182, 6184), False, 'import time, threading\n'), ((6665, 6696), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (6690, 6696), False, 'import time, threading\n'), ((6838, 6849), 'time.time', 'time.time', ([], {}), '()\n', (6847, 6849), False, 'import time, threading\n'), ((8950, 8982), 'HydrationServo.homing_motor', 'HydrationServo.homing_motor', (['iZ1'], {}), '(iZ1)\n', (8977, 8982), False, 'import HydrationServo\n'), ((9358, 9409), 'HydrationServo.set_position_unique', 'HydrationServo.set_position_unique', (['iY', '(y / YCal)', 'v'], {}), '(iY, y / YCal, v)\n', (9392, 9409), False, 'import HydrationServo\n'), ((9542, 9595), 'HydrationServo.set_position_unique', 'HydrationServo.set_position_unique', (['iZ1', '(z / Z1Cal)', 'v'], {}), '(iZ1, z / Z1Cal, v)\n', (9576, 9595), False, 'import HydrationServo\n'), ((9736, 9789), 'HydrationServo.set_position_unique', 'HydrationServo.set_position_unique', (['iZ2', '(z / Z2Cal)', 'v'], {}), '(iZ2, z / Z2Cal, v)\n', (9770, 9789), False, 'import HydrationServo\n'), ((10186, 10217), 'HydrationServo.homing_motor', 'HydrationServo.homing_motor', (['iY'], {}), '(iY)\n', (10213, 10217), False, 'import HydrationServo\n'), ((10596, 10627), 'HydrationServo.homing_motor', 'HydrationServo.homing_motor', (['iX'], {}), '(iX)\n', (10623, 10627), False, 'import HydrationServo\n'), ((10729, 10761), 'HydrationServo.homing_motor', 'HydrationServo.homing_motor', (['iZ1'], {}), '(iZ1)\n', (10756, 10761), False, 'import HydrationServo\n'), ((10867, 10899), 'HydrationServo.homing_motor', 'HydrationServo.homing_motor', (['iZ2'], {}), '(iZ2)\n', (10894, 10899), False, 'import HydrationServo\n'), ((11341, 11368), 'numpy.array', 'numpy.array', (['[z1, z2, x, y]'], {}), '([z1, z2, x, y])\n', (11352, 11368), False, 'import numpy, serial\n'), ((11447, 11479), 'HydrationServo.stop_all_motors', 'HydrationServo.stop_all_motors', ([], {}), '()\n', (11477, 11479), False, 'import HydrationServo\n'), ((12027, 12055), 'HydrationServo.get_torque', 'HydrationServo.get_torque', (['i'], {}), '(i)\n', (12052, 12055), False, 'import HydrationServo\n'), ((12098, 12126), 'HydrationServo.set_home', 'HydrationServo.set_home', (['iZ1'], {}), '(iZ1)\n', (12121, 12126), False, 'import HydrationServo\n'), ((12161, 12189), 'HydrationServo.set_home', 'HydrationServo.set_home', (['iZ2'], {}), '(iZ2)\n', (12184, 12189), False, 'import HydrationServo\n'), ((12227, 12254), 'HydrationServo.set_home', 'HydrationServo.set_home', (['iY'], {}), '(iY)\n', (12250, 12254), False, 'import HydrationServo\n'), ((12301, 12337), 'HydrationServo.set_speed_rpm', 'HydrationServo.set_speed_rpm', (['i', 'rpm'], {}), '(i, rpm)\n', (12329, 12337), False, 'import HydrationServo\n'), ((3788, 3799), 'time.time', 'time.time', ([], {}), '()\n', (3797, 3799), False, 'import time, threading\n'), ((7187, 7198), 'time.time', 'time.time', ([], {}), '()\n', (7196, 7198), False, 'import time, threading\n'), ((7497, 7508), 'time.time', 'time.time', ([], {}), '()\n', (7506, 7508), False, 'import time, threading\n'), ((8570, 8599), 'HydrationServo.clear_alert', 'HydrationServo.clear_alert', (['i'], {}), '(i)\n', (8596, 8599), False, 'import HydrationServo\n'), ((8812, 8831), 'numpy.abs', 'numpy.abs', (['pos[iZ1]'], {}), '(pos[iZ1])\n', (8821, 8831), False, 'import numpy, serial\n'), ((11535, 11584), 'numpy.abs', 'numpy.abs', (['(self.prev_pos[n] - self.current_pos[n])'], {}), '(self.prev_pos[n] - self.current_pos[n])\n', (11544, 11584), False, 'import numpy, serial\n'), ((1302, 1329), 'numpy.abs', 'numpy.abs', (['current_pos[kZ1]'], {}), '(current_pos[kZ1])\n', (1311, 1329), False, 'import numpy, serial\n'), ((1468, 1494), 'numpy.abs', 'numpy.abs', (['current_pos[kY]'], {}), '(current_pos[kY])\n', (1477, 1494), False, 'import numpy, serial\n'), ((1634, 1661), 'numpy.abs', 'numpy.abs', (['current_pos[kZ2]'], {}), '(current_pos[kZ2])\n', (1643, 1661), False, 'import numpy, serial\n'), ((3957, 3970), 'numpy.abs', 'numpy.abs', (['ds'], {}), '(ds)\n', (3966, 3970), False, 'import numpy, serial\n'), ((7617, 7655), 'time.sleep', 'time.sleep', (['(sampling_time - delta_time)'], {}), '(sampling_time - delta_time)\n', (7627, 7655), False, 'import time, threading\n'), ((8368, 8398), 'HydrationServo.motor_status', 'HydrationServo.motor_status', (['i'], {}), '(i)\n', (8395, 8398), False, 'import HydrationServo\n'), ((9990, 10009), 'numpy.abs', 'numpy.abs', (['pos[kZ1]'], {}), '(pos[kZ1])\n', (9999, 10009), False, 'import numpy, serial\n'), ((10044, 10063), 'numpy.abs', 'numpy.abs', (['pos[kZ2]'], {}), '(pos[kZ2])\n', (10053, 10063), False, 'import numpy, serial\n'), ((10400, 10419), 'numpy.abs', 'numpy.abs', (['pos[kZ1]'], {}), '(pos[kZ1])\n', (10409, 10419), False, 'import numpy, serial\n'), ((10454, 10473), 'numpy.abs', 'numpy.abs', (['pos[kZ2]'], {}), '(pos[kZ2])\n', (10463, 10473), False, 'import numpy, serial\n'), ((11052, 11084), 'HydrationServo.get_position', 'HydrationServo.get_position', (['iZ1'], {}), '(iZ1)\n', (11079, 11084), False, 'import HydrationServo\n'), ((11129, 11161), 'HydrationServo.get_position', 'HydrationServo.get_position', (['iZ2'], {}), '(iZ2)\n', (11156, 11161), False, 'import HydrationServo\n'), ((11204, 11235), 'HydrationServo.get_position', 'HydrationServo.get_position', (['iX'], {}), '(iX)\n', (11231, 11235), False, 'import HydrationServo\n'), ((11277, 11308), 'HydrationServo.get_position', 'HydrationServo.get_position', (['iY'], {}), '(iY)\n', (11304, 11308), False, 'import HydrationServo\n')] |
from collections import OrderedDict
import pandas as pd
import numpy as np
from datetime import date, timedelta
pd.options.display.float_format = '{:.8f}'.format
def _generate_random_tickers(n_tickers=None):
min_ticker_len = 3
max_ticker_len = 5
tickers = []
if not n_tickers:
n_tickers = np.random.randint(8, 14)
ticker_symbol_random = np.random.randint(ord('A'), ord('Z')+1, (n_tickers, max_ticker_len))
ticker_symbol_lengths = np.random.randint(min_ticker_len, max_ticker_len, n_tickers)
for ticker_symbol_rand, ticker_symbol_length in zip(ticker_symbol_random, ticker_symbol_lengths):
ticker_symbol = ''.join([chr(c_id) for c_id in ticker_symbol_rand[:ticker_symbol_length]])
tickers.append(ticker_symbol)
return tickers
def _generate_random_dates(n_days=None):
if not n_days:
n_days = np.random.randint(14, 20)
start_year = np.random.randint(1999, 2017)
start_month = np.random.randint(1, 12)
start_day = np.random.randint(1, 29)
start_date = date(start_year, start_month, start_day)
dates = []
for i in range(n_days):
dates.append(start_date + timedelta(days=i))
return dates
def _generate_random_dfs(n_df, index, columns):
all_df_data = np.random.random((n_df, len(index), len(columns)))
return [pd.DataFrame(df_data, index, columns) for df_data in all_df_data]
def _generate_output_error_msg(fn_name, fn_inputs, fn_outputs, fn_expected_outputs):
formatted_inputs = []
formatted_outputs = []
formatted_expected_outputs = []
for input_name, input_value in fn_inputs.items():
formatted_outputs.append('INPUT {}:\n{}\n'.format(
input_name, str(input_value)))
for output_name, output_value in fn_outputs.items():
formatted_outputs.append('OUTPUT {}:\n{}\n'.format(
output_name, str(output_value)))
for expected_output_name, expected_output_value in fn_expected_outputs.items():
formatted_expected_outputs.append('EXPECTED OUTPUT FOR {}:\n{}\n'.format(
expected_output_name, str(expected_output_value)))
return 'Wrong value for {}.\n' \
'{}\n' \
'{}\n' \
'{}' \
.format(
fn_name,
'\n'.join(formatted_inputs),
'\n'.join(formatted_outputs),
'\n'.join(formatted_expected_outputs))
def _assert_output(fn, fn_inputs, fn_expected_outputs):
assert type(fn_expected_outputs) == OrderedDict
fn_outputs = OrderedDict()
fn_raw_out = fn(**fn_inputs)
if len(fn_expected_outputs) == 1:
fn_outputs[list(fn_expected_outputs)[0]] = fn_raw_out
elif len(fn_expected_outputs) > 1:
assert type(fn_raw_out) == tuple,\
'Expecting function to return tuple, got type {}'.format(type(fn_raw_out))
assert len(fn_raw_out) == len(fn_expected_outputs),\
'Expected {} outputs in tuple, only found {} outputs'.format(len(fn_expected_outputs), len(fn_raw_out))
for key_i, output_key in enumerate(fn_expected_outputs.keys()):
fn_outputs[output_key] = fn_raw_out[key_i]
err_message = _generate_output_error_msg(
fn.__name__,
fn_inputs,
fn_outputs,
fn_expected_outputs)
for fn_out, (out_name, expected_out) in zip(fn_outputs.values(), fn_expected_outputs.items()):
assert isinstance(fn_out, type(expected_out)),\
'Wrong type for output {}. Got {}, expected {}'.format(out_name, type(fn_out), type(expected_out))
if hasattr(expected_out, 'shape'):
assert fn_out.shape == expected_out.shape, \
'Wrong shape for output {}. Got {}, expected {}'.format(out_name, fn_out.shape, expected_out.shape)
if type(expected_out) == pd.DataFrame:
assert set(fn_out.columns) == set(expected_out.columns), \
'Incorrect columns for output {}\n' \
'COLUMNS: {}\n' \
'EXPECTED COLUMNS: {}'.format(out_name, sorted(fn_out.columns), sorted(expected_out.columns))
if type(expected_out) in {pd.DataFrame, pd.Series}:
assert set(fn_out.index) == set(expected_out.index), \
'Incorrect indices for output {}\n' \
'INDICES: {}\n' \
'EXPECTED INDICES: {}'.format(out_name, sorted(fn_out.index), sorted(expected_out.index))
out_is_close = np.isclose(fn_out, expected_out, equal_nan=True)
if not isinstance(out_is_close, bool):
out_is_close = out_is_close.all()
assert out_is_close, err_message
def project_test(func):
def func_wrapper(*args):
result = func(*args)
print('Tests Passed')
return result
return func_wrapper
@project_test
def test_generate_weighted_returns(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[1.59904743, 1.66397210, 1.67345829],
[-0.37065629, -0.36541822, -0.36015840],
[-0.41055669, 0.60004777, 0.00536958]],
dates, tickers),
'weights': pd.DataFrame(
[
[0.03777059, 0.04733924, 0.05197790],
[0.82074874, 0.48533938, 0.75792752],
[0.10196420, 0.05866016, 0.09578226],
[0.03951647, 0.40866122, 0.09431233]],
dates, tickers)}
fn_correct_outputs = OrderedDict([
(
'weighted_returns',
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[1.31241616, 0.80759119, 1.26836009],
[-0.03779367, -0.02143549, -0.03449679],
[-0.01622375, 0.24521625, 0.00050642]],
dates, tickers))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_generate_returns(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'prices': pd.DataFrame(
[
[35.4411, 34.1799, 34.0223],
[92.1131, 91.0543, 90.9572],
[57.9708, 57.7814, 58.1982],
[34.1705, 92.453, 58.5107]],
dates, tickers)}
fn_correct_outputs = OrderedDict([
(
'returns',
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[1.59904743, 1.66397210, 1.67345829],
[-0.37065629, -0.36541822, -0.36015840],
[-0.41055669, 0.60004777, 0.00536958]],
dates, tickers))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_generate_dollar_volume_weights(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'close': pd.DataFrame(
[
[35.4411, 34.1799, 34.0223],
[92.1131, 91.0543, 90.9572],
[57.9708, 57.7814, 58.1982],
[34.1705, 92.453, 58.5107]],
dates, tickers),
'volume': pd.DataFrame(
[
[9.83683e+06, 1.78072e+07, 8.82982e+06],
[8.22427e+07, 6.85315e+07, 4.81601e+07],
[1.62348e+07, 1.30527e+07, 9.51201e+06],
[1.06742e+07, 5.68313e+07, 9.31601e+06]],
dates, tickers)}
fn_correct_outputs = OrderedDict([
(
'dollar_volume_weights',
pd.DataFrame(
[
[0.27719777, 0.48394253, 0.23885970],
[0.41632975, 0.34293308, 0.24073717],
[0.41848548, 0.33536102, 0.24615350],
[0.05917255, 0.85239760, 0.08842984]],
dates, tickers))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_get_optimal_weights(fn):
fn_inputs = {
'covariance_returns': np.array(
[
[0.143123, 0.0216755, 0.014273],
[0.0216755, 0.0401826, 0.00663152],
[0.014273, 0.00663152, 0.044963]]),
'index_weights': pd.Series([0.23623892, 0.0125628, 0.7511982], ['A', 'B', 'C'])}
fn_correct_outputs = OrderedDict([
(
'x',
np.array([0.23623897, 0.01256285, 0.75119817]))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_calculate_cumulative_returns(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[1.59904743, 1.66397210, 1.67345829],
[-0.37065629, -0.36541822, -0.36015840],
[-0.41055669, 0.60004777, 0.00536958]],
dates, tickers)}
fn_correct_outputs = OrderedDict([
(
'cumulative_returns',
pd.Series(
[np.nan, 5.93647782, -0.57128454, -0.68260542],
dates))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_calculate_dividend_weights(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'ex_dividend': pd.DataFrame(
[
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.1],
[0.0, 1.0, 0.3],
[0.0, 0.2, 0.0]],
dates, tickers)}
fn_correct_outputs = OrderedDict([
(
'dividend_weights',
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[0.00000000, 0.00000000, 1.00000000],
[0.00000000, 0.71428571, 0.28571429],
[0.00000000, 0.75000000, 0.25000000]],
dates, tickers))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_get_covariance_returns(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[1.59904743, 1.66397210, 1.67345829],
[-0.37065629, -0.36541822, -0.36015840],
[-0.41055669, 0.60004777, 0.00536958]],
dates, tickers)}
fn_correct_outputs = OrderedDict([(
'returns_covariance',
np.array(
[
[0.89856076, 0.7205586, 0.8458721],
[0.7205586, 0.78707297, 0.76450378],
[0.8458721, 0.76450378, 0.83182775]]))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_rebalance_portfolio(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(11)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[-0.02202381, 0.02265285, 0.01441961],
[0.01947657, 0.00551985, 0.00047382],
[0.00537313, -0.00803232, 0.01160313],
[0.00593824, -0.00567773, 0.02247191],
[0.02479339, 0.01758824, -0.00824176],
[-0.0109447, -0.00383568, 0.01361958],
[0.01164822, 0.01558719, 0.00614894],
[0.0109384, -0.00182079, 0.02900868],
[0.01138952, 0.00218049, -0.00954495],
[0.0106982, 0.00644535, -0.01815329]],
dates, tickers),
'median_index_weights': pd.DataFrame(
[
[0.00449404, 0.11586048, 0.00359727],
[0.00403487, 0.12534048, 0.0034428, ],
[0.00423485, 0.12854258, 0.00347404],
[0.00395679, 0.1243466, 0.00335064],
[0.00368729, 0.11750295, 0.00333929],
[0.00369562, 0.11447422, 0.00325973],
[0.00379612, 0.11088075, 0.0031734, ],
[0.00366501, 0.10806014, 0.00314648],
[0.00361268, 0.10376514, 0.00323257],
[0.00358844, 0.10097531, 0.00319009],
[0.00362045, 0.09791232, 0.00318071]],
dates, tickers),
'shift_size': 2,
'chunk_size': 4}
fn_correct_outputs = OrderedDict([
(
'all_rebalance_weights',
[
np.array([0.29341237, 0.41378419, 0.29280344]),
np.array([0.29654088, 0.40731481, 0.29614432]),
np.array([0.29868214, 0.40308791, 0.29822995]),
np.array([0.30100044, 0.39839644, 0.30060312])]
)])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_get_portfolio_turnover(fn):
fn_inputs = {
'all_rebalance_weights': [
np.array([0.00012205033508460705, 0.0003019915743383353, 0.999575958090577]),
np.array([1.305709815242165e-05, 8.112998801084706e-06, 0.9999788299030465]),
np.array([0.3917481750142896, 0.5607687848565064, 0.0474830401292039])],
'shift_size': 3,
'rebalance_count': 11}
fn_correct_outputs = OrderedDict([('portfolio_turnover', 14.553361377)])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_tracking_error(fn):
dates = _generate_random_dates(4)
fn_inputs = {
'index_weighted_cumulative_returns': pd.Series(
[np.nan, 0.99880148, 0.99876653, 1.00024411],
dates),
'etf_weighted_cumulative_returns': pd.Series(
[np.nan, 0.63859274, 0.93475823, 2.57295727],
dates)}
fn_correct_outputs = OrderedDict([
(
'tracking_error',
pd.Series([np.nan, 0.03243758, 0.00102427, 0.61835667], dates))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
| [
"pandas.DataFrame",
"datetime.date",
"numpy.isclose",
"numpy.random.randint",
"numpy.array",
"pandas.Series",
"datetime.timedelta",
"collections.OrderedDict"
] | [((468, 528), 'numpy.random.randint', 'np.random.randint', (['min_ticker_len', 'max_ticker_len', 'n_tickers'], {}), '(min_ticker_len, max_ticker_len, n_tickers)\n', (485, 528), True, 'import numpy as np\n'), ((911, 940), 'numpy.random.randint', 'np.random.randint', (['(1999)', '(2017)'], {}), '(1999, 2017)\n', (928, 940), True, 'import numpy as np\n'), ((959, 983), 'numpy.random.randint', 'np.random.randint', (['(1)', '(12)'], {}), '(1, 12)\n', (976, 983), True, 'import numpy as np\n'), ((1000, 1024), 'numpy.random.randint', 'np.random.randint', (['(1)', '(29)'], {}), '(1, 29)\n', (1017, 1024), True, 'import numpy as np\n'), ((1042, 1082), 'datetime.date', 'date', (['start_year', 'start_month', 'start_day'], {}), '(start_year, start_month, start_day)\n', (1046, 1082), False, 'from datetime import date, timedelta\n'), ((2516, 2529), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2527, 2529), False, 'from collections import OrderedDict\n'), ((13218, 13269), 'collections.OrderedDict', 'OrderedDict', (["[('portfolio_turnover', 14.553361377)]"], {}), "([('portfolio_turnover', 14.553361377)])\n", (13229, 13269), False, 'from collections import OrderedDict\n'), ((318, 342), 'numpy.random.randint', 'np.random.randint', (['(8)', '(14)'], {}), '(8, 14)\n', (335, 342), True, 'import numpy as np\n'), ((867, 892), 'numpy.random.randint', 'np.random.randint', (['(14)', '(20)'], {}), '(14, 20)\n', (884, 892), True, 'import numpy as np\n'), ((1330, 1367), 'pandas.DataFrame', 'pd.DataFrame', (['df_data', 'index', 'columns'], {}), '(df_data, index, columns)\n', (1342, 1367), True, 'import pandas as pd\n'), ((4438, 4486), 'numpy.isclose', 'np.isclose', (['fn_out', 'expected_out'], {'equal_nan': '(True)'}), '(fn_out, expected_out, equal_nan=True)\n', (4448, 4486), True, 'import numpy as np\n'), ((4958, 5139), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829], [-\n 0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, 0.00536958]\n ]', 'dates', 'tickers'], {}), '([[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829],\n [-0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, \n 0.00536958]], dates, tickers)\n', (4970, 5139), True, 'import pandas as pd\n'), ((5243, 5432), 'pandas.DataFrame', 'pd.DataFrame', (['[[0.03777059, 0.04733924, 0.0519779], [0.82074874, 0.48533938, 0.75792752],\n [0.1019642, 0.05866016, 0.09578226], [0.03951647, 0.40866122, 0.09431233]]', 'dates', 'tickers'], {}), '([[0.03777059, 0.04733924, 0.0519779], [0.82074874, 0.48533938,\n 0.75792752], [0.1019642, 0.05866016, 0.09578226], [0.03951647, \n 0.40866122, 0.09431233]], dates, tickers)\n', (5255, 5432), True, 'import pandas as pd\n'), ((6149, 6299), 'pandas.DataFrame', 'pd.DataFrame', (['[[35.4411, 34.1799, 34.0223], [92.1131, 91.0543, 90.9572], [57.9708, \n 57.7814, 58.1982], [34.1705, 92.453, 58.5107]]', 'dates', 'tickers'], {}), '([[35.4411, 34.1799, 34.0223], [92.1131, 91.0543, 90.9572], [\n 57.9708, 57.7814, 58.1982], [34.1705, 92.453, 58.5107]], dates, tickers)\n', (6161, 6299), True, 'import pandas as pd\n'), ((6994, 7144), 'pandas.DataFrame', 'pd.DataFrame', (['[[35.4411, 34.1799, 34.0223], [92.1131, 91.0543, 90.9572], [57.9708, \n 57.7814, 58.1982], [34.1705, 92.453, 58.5107]]', 'dates', 'tickers'], {}), '([[35.4411, 34.1799, 34.0223], [92.1131, 91.0543, 90.9572], [\n 57.9708, 57.7814, 58.1982], [34.1705, 92.453, 58.5107]], dates, tickers)\n', (7006, 7144), True, 'import pandas as pd\n'), ((7249, 7437), 'pandas.DataFrame', 'pd.DataFrame', (['[[9836830.0, 17807200.0, 8829820.0], [82242700.0, 68531500.0, 48160100.0],\n [16234800.0, 13052700.0, 9512010.0], [10674200.0, 56831300.0, 9316010.0]]', 'dates', 'tickers'], {}), '([[9836830.0, 17807200.0, 8829820.0], [82242700.0, 68531500.0, \n 48160100.0], [16234800.0, 13052700.0, 9512010.0], [10674200.0, \n 56831300.0, 9316010.0]], dates, tickers)\n', (7261, 7437), True, 'import pandas as pd\n'), ((8089, 8207), 'numpy.array', 'np.array', (['[[0.143123, 0.0216755, 0.014273], [0.0216755, 0.0401826, 0.00663152], [\n 0.014273, 0.00663152, 0.044963]]'], {}), '([[0.143123, 0.0216755, 0.014273], [0.0216755, 0.0401826, \n 0.00663152], [0.014273, 0.00663152, 0.044963]])\n', (8097, 8207), True, 'import numpy as np\n'), ((8291, 8353), 'pandas.Series', 'pd.Series', (['[0.23623892, 0.0125628, 0.7511982]', "['A', 'B', 'C']"], {}), "([0.23623892, 0.0125628, 0.7511982], ['A', 'B', 'C'])\n", (8300, 8353), True, 'import pandas as pd\n'), ((8715, 8896), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829], [-\n 0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, 0.00536958]\n ]', 'dates', 'tickers'], {}), '([[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829],\n [-0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, \n 0.00536958]], dates, tickers)\n', (8727, 8896), True, 'import pandas as pd\n'), ((9411, 9513), 'pandas.DataFrame', 'pd.DataFrame', (['[[0.0, 0.0, 0.0], [0.0, 0.0, 0.1], [0.0, 1.0, 0.3], [0.0, 0.2, 0.0]]', 'dates', 'tickers'], {}), '([[0.0, 0.0, 0.0], [0.0, 0.0, 0.1], [0.0, 1.0, 0.3], [0.0, 0.2,\n 0.0]], dates, tickers)\n', (9423, 9513), True, 'import pandas as pd\n'), ((10208, 10389), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829], [-\n 0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, 0.00536958]\n ]', 'dates', 'tickers'], {}), '([[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829],\n [-0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, \n 0.00536958]], dates, tickers)\n', (10220, 10389), True, 'import pandas as pd\n'), ((10962, 11434), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.nan, np.nan, np.nan], [-0.02202381, 0.02265285, 0.01441961], [\n 0.01947657, 0.00551985, 0.00047382], [0.00537313, -0.00803232, \n 0.01160313], [0.00593824, -0.00567773, 0.02247191], [0.02479339, \n 0.01758824, -0.00824176], [-0.0109447, -0.00383568, 0.01361958], [\n 0.01164822, 0.01558719, 0.00614894], [0.0109384, -0.00182079, \n 0.02900868], [0.01138952, 0.00218049, -0.00954495], [0.0106982, \n 0.00644535, -0.01815329]]', 'dates', 'tickers'], {}), '([[np.nan, np.nan, np.nan], [-0.02202381, 0.02265285, \n 0.01441961], [0.01947657, 0.00551985, 0.00047382], [0.00537313, -\n 0.00803232, 0.01160313], [0.00593824, -0.00567773, 0.02247191], [\n 0.02479339, 0.01758824, -0.00824176], [-0.0109447, -0.00383568, \n 0.01361958], [0.01164822, 0.01558719, 0.00614894], [0.0109384, -\n 0.00182079, 0.02900868], [0.01138952, 0.00218049, -0.00954495], [\n 0.0106982, 0.00644535, -0.01815329]], dates, tickers)\n', (10974, 11434), True, 'import pandas as pd\n'), ((11640, 12114), 'pandas.DataFrame', 'pd.DataFrame', (['[[0.00449404, 0.11586048, 0.00359727], [0.00403487, 0.12534048, 0.0034428],\n [0.00423485, 0.12854258, 0.00347404], [0.00395679, 0.1243466, \n 0.00335064], [0.00368729, 0.11750295, 0.00333929], [0.00369562, \n 0.11447422, 0.00325973], [0.00379612, 0.11088075, 0.0031734], [\n 0.00366501, 0.10806014, 0.00314648], [0.00361268, 0.10376514, \n 0.00323257], [0.00358844, 0.10097531, 0.00319009], [0.00362045, \n 0.09791232, 0.00318071]]', 'dates', 'tickers'], {}), '([[0.00449404, 0.11586048, 0.00359727], [0.00403487, 0.12534048,\n 0.0034428], [0.00423485, 0.12854258, 0.00347404], [0.00395679, \n 0.1243466, 0.00335064], [0.00368729, 0.11750295, 0.00333929], [\n 0.00369562, 0.11447422, 0.00325973], [0.00379612, 0.11088075, 0.0031734\n ], [0.00366501, 0.10806014, 0.00314648], [0.00361268, 0.10376514, \n 0.00323257], [0.00358844, 0.10097531, 0.00319009], [0.00362045, \n 0.09791232, 0.00318071]], dates, tickers)\n', (11652, 12114), True, 'import pandas as pd\n'), ((13472, 13534), 'pandas.Series', 'pd.Series', (['[np.nan, 0.99880148, 0.99876653, 1.00024411]', 'dates'], {}), '([np.nan, 0.99880148, 0.99876653, 1.00024411], dates)\n', (13481, 13534), True, 'import pandas as pd\n'), ((13612, 13674), 'pandas.Series', 'pd.Series', (['[np.nan, 0.63859274, 0.93475823, 2.57295727]', 'dates'], {}), '([np.nan, 0.63859274, 0.93475823, 2.57295727], dates)\n', (13621, 13674), True, 'import pandas as pd\n'), ((12884, 12960), 'numpy.array', 'np.array', (['[0.00012205033508460705, 0.0003019915743383353, 0.999575958090577]'], {}), '([0.00012205033508460705, 0.0003019915743383353, 0.999575958090577])\n', (12892, 12960), True, 'import numpy as np\n'), ((12974, 13050), 'numpy.array', 'np.array', (['[1.305709815242165e-05, 8.112998801084706e-06, 0.9999788299030465]'], {}), '([1.305709815242165e-05, 8.112998801084706e-06, 0.9999788299030465])\n', (12982, 13050), True, 'import numpy as np\n'), ((13064, 13134), 'numpy.array', 'np.array', (['[0.3917481750142896, 0.5607687848565064, 0.0474830401292039]'], {}), '([0.3917481750142896, 0.5607687848565064, 0.0474830401292039])\n', (13072, 13134), True, 'import numpy as np\n'), ((1161, 1178), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (1170, 1178), False, 'from datetime import date, timedelta\n'), ((5614, 5798), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.nan, np.nan, np.nan], [1.31241616, 0.80759119, 1.26836009], [-\n 0.03779367, -0.02143549, -0.03449679], [-0.01622375, 0.24521625, \n 0.00050642]]', 'dates', 'tickers'], {}), '([[np.nan, np.nan, np.nan], [1.31241616, 0.80759119, 1.26836009\n ], [-0.03779367, -0.02143549, -0.03449679], [-0.01622375, 0.24521625, \n 0.00050642]], dates, tickers)\n', (5626, 5798), True, 'import pandas as pd\n'), ((6470, 6651), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829], [-\n 0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, 0.00536958]\n ]', 'dates', 'tickers'], {}), '([[np.nan, np.nan, np.nan], [1.59904743, 1.6639721, 1.67345829],\n [-0.37065629, -0.36541822, -0.3601584], [-0.41055669, 0.60004777, \n 0.00536958]], dates, tickers)\n', (6482, 6651), True, 'import pandas as pd\n'), ((7633, 7821), 'pandas.DataFrame', 'pd.DataFrame', (['[[0.27719777, 0.48394253, 0.2388597], [0.41632975, 0.34293308, 0.24073717],\n [0.41848548, 0.33536102, 0.2461535], [0.05917255, 0.8523976, 0.08842984]]', 'dates', 'tickers'], {}), '([[0.27719777, 0.48394253, 0.2388597], [0.41632975, 0.34293308,\n 0.24073717], [0.41848548, 0.33536102, 0.2461535], [0.05917255, \n 0.8523976, 0.08842984]], dates, tickers)\n', (7645, 7821), True, 'import pandas as pd\n'), ((8433, 8479), 'numpy.array', 'np.array', (['[0.23623897, 0.01256285, 0.75119817]'], {}), '([0.23623897, 0.01256285, 0.75119817])\n', (8441, 8479), True, 'import numpy as np\n'), ((9076, 9140), 'pandas.Series', 'pd.Series', (['[np.nan, 5.93647782, -0.57128454, -0.68260542]', 'dates'], {}), '([np.nan, 5.93647782, -0.57128454, -0.68260542], dates)\n', (9085, 9140), True, 'import pandas as pd\n'), ((9694, 9822), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.nan, np.nan, np.nan], [0.0, 0.0, 1.0], [0.0, 0.71428571, 0.28571429],\n [0.0, 0.75, 0.25]]', 'dates', 'tickers'], {}), '([[np.nan, np.nan, np.nan], [0.0, 0.0, 1.0], [0.0, 0.71428571, \n 0.28571429], [0.0, 0.75, 0.25]], dates, tickers)\n', (9706, 9822), True, 'import pandas as pd\n'), ((10552, 10677), 'numpy.array', 'np.array', (['[[0.89856076, 0.7205586, 0.8458721], [0.7205586, 0.78707297, 0.76450378], [\n 0.8458721, 0.76450378, 0.83182775]]'], {}), '([[0.89856076, 0.7205586, 0.8458721], [0.7205586, 0.78707297, \n 0.76450378], [0.8458721, 0.76450378, 0.83182775]])\n', (10560, 10677), True, 'import numpy as np\n'), ((13800, 13862), 'pandas.Series', 'pd.Series', (['[np.nan, 0.03243758, 0.00102427, 0.61835667]', 'dates'], {}), '([np.nan, 0.03243758, 0.00102427, 0.61835667], dates)\n', (13809, 13862), True, 'import pandas as pd\n'), ((12459, 12505), 'numpy.array', 'np.array', (['[0.29341237, 0.41378419, 0.29280344]'], {}), '([0.29341237, 0.41378419, 0.29280344])\n', (12467, 12505), True, 'import numpy as np\n'), ((12523, 12569), 'numpy.array', 'np.array', (['[0.29654088, 0.40731481, 0.29614432]'], {}), '([0.29654088, 0.40731481, 0.29614432])\n', (12531, 12569), True, 'import numpy as np\n'), ((12587, 12633), 'numpy.array', 'np.array', (['[0.29868214, 0.40308791, 0.29822995]'], {}), '([0.29868214, 0.40308791, 0.29822995])\n', (12595, 12633), True, 'import numpy as np\n'), ((12651, 12697), 'numpy.array', 'np.array', (['[0.30100044, 0.39839644, 0.30060312]'], {}), '([0.30100044, 0.39839644, 0.30060312])\n', (12659, 12697), True, 'import numpy as np\n')] |
import numpy as np
def linear_interpolate(data):
"""
A function to linearly interpolate the data of a signal
"""
nans = np.isnan(data)
x = lambda z: z.nonzero()[0]
data[nans] = np.interp(x(nans), x(~nans), data[~nans])
return data
| [
"numpy.isnan"
] | [((139, 153), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (147, 153), True, 'import numpy as np\n')] |
"""test_compare.py"""
import numpy as np
import impyute as impy
mask = np.zeros((5, 5), dtype=bool)
mask[0][0] = True
data_m = impy.dataset.test_data(mask=mask)
labels = np.array([1, 0, 1, 1, 0])
imputed_mode = []
imputed_mode.append(["mode", (impy.mode(np.copy(data_m)), labels)])
imputed_mode.append(["mean", (impy.mean(np.copy(data_m)), labels)])
def test_output_file_exists():
""" Small test to just check that it runs without fialing"""
path = "./results.txt"
impy.util.compare(imputed_mode, log_path=path)
| [
"numpy.copy",
"numpy.zeros",
"numpy.array",
"impyute.util.compare",
"impyute.dataset.test_data"
] | [((72, 100), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {'dtype': 'bool'}), '((5, 5), dtype=bool)\n', (80, 100), True, 'import numpy as np\n'), ((128, 161), 'impyute.dataset.test_data', 'impy.dataset.test_data', ([], {'mask': 'mask'}), '(mask=mask)\n', (150, 161), True, 'import impyute as impy\n'), ((171, 196), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 0]'], {}), '([1, 0, 1, 1, 0])\n', (179, 196), True, 'import numpy as np\n'), ((479, 525), 'impyute.util.compare', 'impy.util.compare', (['imputed_mode'], {'log_path': 'path'}), '(imputed_mode, log_path=path)\n', (496, 525), True, 'import impyute as impy\n'), ((255, 270), 'numpy.copy', 'np.copy', (['data_m'], {}), '(data_m)\n', (262, 270), True, 'import numpy as np\n'), ((323, 338), 'numpy.copy', 'np.copy', (['data_m'], {}), '(data_m)\n', (330, 338), True, 'import numpy as np\n')] |
# read in the JSON-data from the request and convert them to a scopus query string
# (one could add alternative query targets here, for example transforming the individual query strings to a WoS-Search
import random
import numpy as np
from model.KeywordFrequency import KeywordFrequency
from model.SdgWheel import SdgWheel
from service import eids_service
import nltk
from nltk.corpus import stopwords
def convert_search_to_scopus_search_string(search):
"""uses the stored query to construct a search string for Scopus"""
search_string = ""
if search["author_name"]:
if search_string != "":
search_string += " AND "
search_string += "AUTH(" + search["author_name"] + ")"
if search["topic"]:
if search_string != "":
search_string += " AND "
search_string += "TITLE-ABS-KEY(" + search["topic"] + ")"
if search["start_year"]:
if search_string != "":
search_string += " AND "
search_string += "PUBYEAR AFT " + search["start_year"] + ""
if search["end_year"]:
if search_string != "":
search_string += " AND "
search_string += "PUBYEAR BEF " + search["end_year"] + ""
if search["title"]:
if search_string != "":
search_string += " AND "
search_string += "TITLE(" + search["title"] + ")"
if search["subject"]:
if search_string != "":
search_string += " AND "
search_string += "SUBJAREA(" + search["subject"] + ")"
if search["author_id"]:
if search_string != "":
search_string += " AND "
search_string += "AU-ID(" + search["author_id"] + ")"
if search["affiliation_id"]:
if search_string != "":
search_string += " AND "
affil_search = '(AF-ID(' + search["affiliation_id"].replace("\n", "") + '))'
affil_search = affil_search.replace(" OR ", ") OR AF-ID(")
affil_search = affil_search.replace(" OR", ") OR AF-ID(")
affil_search = affil_search.replace(" AND ", ") AND AF-ID(")
search_string += affil_search
return search_string
# TO DO: apply Altmeric search fields to procedure. Up to now only copy of Scopus procedure.
def convert_search_to_altmetric_seach_string(search):
"""Uses the stored query to construct a search string for Altmetric"""
search_string = ""
if search["author_name"]:
if search_string != "":
search_string += " AND "
search_string += "AUTH(" + search["author_name"] + ")"
if search["topic"]:
if search_string != "":
search_string += " AND "
search_string += "TITLE-ABS-KEY(" + search["topic"] + ")"
if search["year"]:
if search_string != "":
search_string += " AND "
search_string += "PUBYEAR(" + search["year"] + ")"
if search["title"]:
if search_string != "":
search_string += " AND "
search_string += "TITLE(" + search["title"] + ")"
if search["subject"]:
if search_string != "":
search_string += " AND "
search_string += "SUBJAREA(" + search["subject"] + ")"
if search["author_id"]:
if search_string != "":
search_string += " AND "
search_string += "AU-ID(" + search["author_id"] + ")"
if search["affiliation_id"]:
if search_string != "":
search_string += " AND "
affil_search = 'AF-ID(' + search["affiliation_id"] + ')'
affil_search = affil_search.replace(" OR ", ") OR AF-ID(")
affil_search = affil_search.replace(" AND ", ") AND AF-ID(")
search_string += affil_search
return search_string
def generate_scopus_search_from_eid_list(eids):
"""constructs a search string for Scopus based to retrieve data for a list of EIDs"""
print(eids)
search_string = 'EID('
for eid in eids:
search_string = search_string + eid + ' OR '
search_string = search_string[:-4] + ')'
return search_string
# Given a list of words, return a dictionary of
# word-frequency pairs.
# THANKS TO <NAME> and <NAME> (https://programminghistorian.org/en/lessons/counting-frequencies)
def wordlist_to_freq_dict(wordlist):
"""Given a list of words, return a dictionary of word-frequency pairs.
THANKS TO <NAME> and <NAME> (https://programminghistorian.org/en/lessons/counting-frequencies)
"""
wordfreq = [wordlist.count(p) for p in wordlist]
freqdict = dict(list(zip(wordlist, wordfreq)))
aux = [(freqdict[key], key) for key in freqdict]
aux.sort()
aux.reverse()
return aux
def clean_up_wordlist(wordlist):
clean_tokens = wordlist[:]
for token in wordlist:
if token in stopwords.words('english'):
clean_tokens.remove(token)
freq = nltk.FreqDist(clean_tokens)
return dict(zip(wordlist, freq))
# Sort a dictionary of word-frequency pairs in
# order of descending frequency.
# THANKS TO <NAME> and <NAME> (https://programminghistorian.org/en/lessons/counting-frequencies)
def sort_freq_dict(freqdict):
aux = [(freqdict[key], key) for key in freqdict]
aux.sort()
aux.reverse()
list_of_frequencies = []
for keyword_freq in aux:
list_of_frequencies.append(KeywordFrequency(keyword_freq[1], keyword_freq[0]))
return list_of_frequencies
def calculate_symmetric_overlap(primary):
primary_length = len(primary)
overlap_map = np.empty((primary_length, primary_length), dtype=object)
data = {}
for key in primary:
data[key] = eids_service.load_eid_list(key, '')
for i in range(0, primary_length):
for entry in data[primary[i]]:
found = False
for j in range(i + 1, primary_length):
if entry in data[primary[j]]:
if overlap_map[i, j] is None:
overlap_map[i, j] = [entry]
overlap_map[j, i] = [entry]
else:
overlap_map[i, j].append(entry)
overlap_map[j, i].append(entry)
found = True
if not found:
if overlap_map[i, i] is None:
overlap_map[i, i] = [entry]
else:
overlap_map[i, i].append(entry)
return overlap_map
def calculate_asymmetric_overlap(primary, secondary):
primary_length = primary.__len__()
secondary_length = secondary.__len__()
overlap_map = np.empty((primary_length, secondary_length), dtype=object)
data = {}
for key in primary:
data[key] = eids_service.load_eid_list(key, '')
for key in secondary:
data[key] = eids_service.load_eid_list(key, '')
for i in range(0, primary_length):
for entry in data[primary[i]]:
found = False
for j in range(0, secondary):
if j == i:
continue
if entry in data[secondary[j]]:
if overlap_map[i, j] is None:
overlap_map[i, j] = [entry]
else:
overlap_map[i, j].append(entry)
found = True
if not found:
if overlap_map[i, i] is None:
overlap_map[i, i] = [entry]
else:
overlap_map[i, i].append(entry)
return overlap_map
def get_sdg_classification(doi):
print("retrieving sdg_classification for doi" + doi)
classifications = [
0.50 + random.uniform(-0.4, 0.4),
0.8 + random.uniform(-0.2, 0.2),
0.90 + random.uniform(-0.1, 0.1),
0.25 + random.uniform(-0.2, 0.2),
0.2 + random.uniform(-0.2, 0.2),
0.1 + random.uniform(-0.2, 0.2),
0.80 + random.uniform(-0.2, 0.2),
0.4 + random.uniform(-0.4, 0.4),
0.20 + random.uniform(-0.2, 0.2),
0, 0, 0, 0, 0, 0, 0, 0]
random.shuffle(classifications)
return classifications
def get_sdg_wheel(doi):
print("retrieving sdg_classification for doi" + doi)
classifications = [
0.50 + random.uniform(-0.4, 0.4),
0.8 + random.uniform(-0.2, 0.2),
0.90 + random.uniform(-0.1, 0.1),
0.25 + random.uniform(-0.2, 0.2),
0.2 + random.uniform(-0.2, 0.2),
0.1 + random.uniform(-0.1, 0.1),
0.80 + random.uniform(-0.2, 0.2),
0.4 + random.uniform(-0.4, 0.4),
0.20 + random.uniform(-0.2, 0.2),
0, 0, 0, 0, 0, 0, 0, 0]
random.shuffle(classifications)
sdg_wheel = SdgWheel(classifications)
return sdg_wheel
def replace_index_by_clear_name(list_of_indices, clear_names):
for index, value in enumerate(list_of_indices):
list_of_indices[index] = clear_names[value]
def get_path(location, project_id, query_id, filename, prefix=''):
if prefix == '':
path = '{}/out/{}/{}/{}'.format(location, project_id, query_id, filename)
else:
path = '{}/out/{}/{}/{}_{}'.format(location, project_id, query_id, prefix, filename)
if path[-1] == '/':
path = path[:-1]
return path
| [
"model.SdgWheel.SdgWheel",
"service.eids_service.load_eid_list",
"random.uniform",
"numpy.empty",
"random.shuffle",
"nltk.corpus.stopwords.words",
"nltk.FreqDist",
"model.KeywordFrequency.KeywordFrequency"
] | [((4789, 4816), 'nltk.FreqDist', 'nltk.FreqDist', (['clean_tokens'], {}), '(clean_tokens)\n', (4802, 4816), False, 'import nltk\n'), ((5421, 5477), 'numpy.empty', 'np.empty', (['(primary_length, primary_length)'], {'dtype': 'object'}), '((primary_length, primary_length), dtype=object)\n', (5429, 5477), True, 'import numpy as np\n'), ((6471, 6529), 'numpy.empty', 'np.empty', (['(primary_length, secondary_length)'], {'dtype': 'object'}), '((primary_length, secondary_length), dtype=object)\n', (6479, 6529), True, 'import numpy as np\n'), ((7916, 7947), 'random.shuffle', 'random.shuffle', (['classifications'], {}), '(classifications)\n', (7930, 7947), False, 'import random\n'), ((8492, 8523), 'random.shuffle', 'random.shuffle', (['classifications'], {}), '(classifications)\n', (8506, 8523), False, 'import random\n'), ((8540, 8565), 'model.SdgWheel.SdgWheel', 'SdgWheel', (['classifications'], {}), '(classifications)\n', (8548, 8565), False, 'from model.SdgWheel import SdgWheel\n'), ((5536, 5571), 'service.eids_service.load_eid_list', 'eids_service.load_eid_list', (['key', '""""""'], {}), "(key, '')\n", (5562, 5571), False, 'from service import eids_service\n'), ((6588, 6623), 'service.eids_service.load_eid_list', 'eids_service.load_eid_list', (['key', '""""""'], {}), "(key, '')\n", (6614, 6623), False, 'from service import eids_service\n'), ((6670, 6705), 'service.eids_service.load_eid_list', 'eids_service.load_eid_list', (['key', '""""""'], {}), "(key, '')\n", (6696, 6705), False, 'from service import eids_service\n'), ((4711, 4737), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4726, 4737), False, 'from nltk.corpus import stopwords\n'), ((5242, 5292), 'model.KeywordFrequency.KeywordFrequency', 'KeywordFrequency', (['keyword_freq[1]', 'keyword_freq[0]'], {}), '(keyword_freq[1], keyword_freq[0])\n', (5258, 5292), False, 'from model.KeywordFrequency import KeywordFrequency\n'), ((7521, 7546), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (7535, 7546), False, 'import random\n'), ((7562, 7587), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (7576, 7587), False, 'import random\n'), ((7604, 7629), 'random.uniform', 'random.uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (7618, 7629), False, 'import random\n'), ((7646, 7671), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (7660, 7671), False, 'import random\n'), ((7687, 7712), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (7701, 7712), False, 'import random\n'), ((7728, 7753), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (7742, 7753), False, 'import random\n'), ((7770, 7795), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (7784, 7795), False, 'import random\n'), ((7811, 7836), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (7825, 7836), False, 'import random\n'), ((7853, 7878), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (7867, 7878), False, 'import random\n'), ((8097, 8122), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (8111, 8122), False, 'import random\n'), ((8138, 8163), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (8152, 8163), False, 'import random\n'), ((8180, 8205), 'random.uniform', 'random.uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (8194, 8205), False, 'import random\n'), ((8222, 8247), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (8236, 8247), False, 'import random\n'), ((8263, 8288), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (8277, 8288), False, 'import random\n'), ((8304, 8329), 'random.uniform', 'random.uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (8318, 8329), False, 'import random\n'), ((8346, 8371), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (8360, 8371), False, 'import random\n'), ((8387, 8412), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (8401, 8412), False, 'import random\n'), ((8429, 8454), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (8443, 8454), False, 'import random\n')] |
import re
import ujson
from collections import defaultdict, OrderedDict
import numpy as np
from events_classifier import EventClassifier
from max_heap import MaxHeap
from models_manager import Method
from word2vec_wiki_model import Word2VecWikiModel
min_year = 1981
max_year = 2015
all_years = list(range(min_year, max_year + 1))
class WordOnthology(object):
def __init__(self, models_manager, knn_threshold, w2v_threshold, num_of_neighbors, support_events=False,
global_model=None, transformed_temporal_models=False, limit_years_around=0):
self.knn_threshold = knn_threshold
self.w2v_threshold = w2v_threshold
self.models_manager = models_manager
self.num_of_neighbors = num_of_neighbors
event_year = ujson.load(open('data/event_year_since1980.json', encoding='utf-8'))
self.event_to_content = ujson.load(open('data/event_content_since1980.json', encoding='utf-8'))
self.event_to_text_content = ujson.load(open('data/event_text_content_since1980.json', encoding='utf-8'))
self.year_to_event = defaultdict(list)
for event, year in event_year.items():
self.year_to_event[year].append(event)
self.support_events = support_events
self.limit_years_around = limit_years_around
if support_events:
if transformed_temporal_models:
self.classifier = EventClassifier(models_manager=self.models_manager)
else:
self.classifier = EventClassifier(global_model=global_model)
self.classifier.create_classifier(train=True)
self.global_model_inner = global_model
self.transformed_temporal_models = transformed_temporal_models
def get_similar_words_per_year(self, word):
if not word:
return None
year_to_similar_words = OrderedDict()
for year in range(min_year, max_year):
similar_words = self.models_manager.most_similar_words_in_year(word, year, self.num_of_neighbors)
year_to_similar_words[year] = similar_words
return year_to_similar_words
def find_key_years(self, word, method):
"""
find key years of a given word, according to a given method (e.g. KNN, word2vec)
"""
if not word:
return None
method_threshold = self.knn_threshold if method == Method.KNN else self.w2v_threshold
year_to_sim, peaks = self.models_manager.get_scores_peaks(word, min_year, max_year, method,
threshold=method_threshold, k=self.num_of_neighbors)
return peaks
def find_key_events_by_classifier(self, word, min_classifier_score, max_events_per_year,
existing_key_years_to_events, include_score=False):
"""
find important events using our events classifier, and word2vec similarities as a filter.
'key_years_to_events' should be calculated by another method ('find_key_events_...'),
preferably with a bigger max_events_num, as we don't want to just filter an existing method.
"""
if not word:
return None
word = word.lower()
key_years_to_events = OrderedDict([(year, []) for year in all_years])
for key_year, top_events_scores in existing_key_years_to_events.items():
if not top_events_scores:
continue
# run the classifier for these events
event_to_features = {}
event_to_prev_method_score = {}
for event, score in top_events_scores:
event_to_prev_method_score[event] = float(score)
feature_vector, feature_names = self.classifier.featurize_event_word((event, word))
if feature_vector is not None:
event_to_features[event] = feature_vector
probs = list(self.classifier.classifier.classifier.predict_proba(
list(event_to_features.values()))) # probabilities for the true class
y_prob = np.array(probs)[:, 1]
top_key_events = MaxHeap(max_events_per_year)
for event_i, event in enumerate(list(event_to_features.keys())):
event_score = (y_prob[event_i] * 4 + event_to_prev_method_score[event] * 6) / 10
top_key_events.add(event_score, event)
top_key_events = sorted(top_key_events.heap, reverse=True)
key_years_to_events[key_year] = [item[1] + '--' + str(round(item[0], 2)) if include_score else item[1] for
item in top_key_events if item[0] > min_classifier_score]
return key_years_to_events
def find_key_events_by_knn(self, word, max_events_per_year, years, include_score=False):
"""
find events that are closest to the given word and its nearest neighbors
"""
if not word:
return None
word = word.lower()
year_to_similar_words = self.get_similar_words_per_year(word)
key_years_to_events = OrderedDict([(year, []) for year in years])
for key_year in years:
model = self.get_model(key_year)
# find the key events from that year
top_key_events = MaxHeap(max_events_per_year)
# take the events that are most similar to the KNN
word_knn = [word] + year_to_similar_words[key_year] if year_to_similar_words[key_year] is not None else [
word]
events = self.get_relevant_events(key_year)
for e in events:
knn_similarities = [model.similarity(e, sim_word) for sim_word in word_knn
if word in self.event_to_content[e] and model.contains_all_words([e, sim_word])]
if len(knn_similarities) > 0:
similarity = np.mean(knn_similarities)
if similarity > self.knn_threshold:
top_key_events.add(similarity, e)
top_key_events = sorted(top_key_events.heap, reverse=True)
key_years_to_events[key_year] = [(item[1], str(round(item[0], 2))) if include_score else item[1] for
item in top_key_events]
return key_years_to_events
def find_key_events_by_word(self, word, max_events_per_year, years, include_score=False):
"""
find events closest to the given word
"""
if not word:
return None
word = word.lower()
key_years_to_events = OrderedDict([(year, []) for year in years])
for key_year in years:
model = self.get_model(key_year)
# find the key events from that year
top_key_events = MaxHeap(max_events_per_year)
# take the events that are most similar to the event
events = self.get_relevant_events(key_year)
for e in events:
if word in self.event_to_content[e] and model.contains_all_words([e, word]):
similarity = model.similarity(e, word)
if similarity > self.knn_threshold:
top_key_events.add(similarity, e)
top_key_events = sorted(top_key_events.heap, reverse=True)
key_years_to_events[key_year] = [item[1] + '--' + str(round(item[0], 2)) if include_score else item[1] for
item in top_key_events]
return key_years_to_events
def find_new_words_knn(self, word, years):
"""
find for each given year: words that were added since the previous year
"""
if not word:
return None
word = word.lower()
year_to_similar_words = self.get_similar_words_per_year(word)
year_to_new_words = OrderedDict()
prev_similar_words = None
for year in years:
similar_words = year_to_similar_words[year]
if prev_similar_words and similar_words is not None: # mark new words
year_to_new_words[year] = [w for w in similar_words if w not in prev_similar_words]
else:
year_to_new_words[year] = []
prev_similar_words = similar_words
return year_to_new_words
def find_events_from_wikipedia_baseline(self, word, max_events_per_year, years, include_score=False,
min_occurrences=5):
"""
find for each given year: events that contain the given word the most times
"""
if not word:
return None
word = word.lower()
key_years_to_events = OrderedDict([(year, []) for year in years])
for key_year in years:
# find the key events from that year
top_key_events = MaxHeap(max_events_per_year)
# take the events that are most similar to the event
for e in self.year_to_event[key_year]:
# count number of occurrences of the given word in the Wiki content
score = sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(word), self.event_to_text_content[e].lower()))
if score > min_occurrences:
top_key_events.add(score, e)
top_key_events = sorted(top_key_events.heap, reverse=True)
key_years_to_events[key_year] = [item[1] + '--' + str(round(item[0], 2)) if include_score else item[1] for
item in top_key_events]
return key_years_to_events
def get_relevant_events(self, year):
relevant_years = [y for y in range(year, year + self.limit_years_around + 1)] + [y for y in range(
year - self.limit_years_around, year)]
return [event for year, events in self.year_to_event.items() for event in events if year in relevant_years]
def get_model(self, year=None):
"""
returns the temporal model if we're using transformed models (o.w. they won't contain events)
:param year:
:return:
"""
if year and self.transformed_temporal_models:
return self.models_manager.get_model(year)
else:
return self.global_model
@property
def global_model(self):
if not self.global_model_inner:
title_id_map = ujson.load(open('data/title_id_map.json', encoding='utf-8'))
self.global_model_inner = Word2VecWikiModel(
'data/WikipediaClean5Negative300Skip10/WikipediaClean5Negative300Skip10',
title_id_map)
if self.classifier and self.classifier.global_model is None:
self.classifier.global_model = self.global_model_inner
return self.global_model_inner
| [
"events_classifier.EventClassifier",
"word2vec_wiki_model.Word2VecWikiModel",
"re.escape",
"collections.defaultdict",
"max_heap.MaxHeap",
"numpy.mean",
"numpy.array",
"collections.OrderedDict"
] | [((1084, 1101), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1095, 1101), False, 'from collections import defaultdict, OrderedDict\n'), ((1852, 1865), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1863, 1865), False, 'from collections import defaultdict, OrderedDict\n'), ((3257, 3304), 'collections.OrderedDict', 'OrderedDict', (['[(year, []) for year in all_years]'], {}), '([(year, []) for year in all_years])\n', (3268, 3304), False, 'from collections import defaultdict, OrderedDict\n'), ((5099, 5142), 'collections.OrderedDict', 'OrderedDict', (['[(year, []) for year in years]'], {}), '([(year, []) for year in years])\n', (5110, 5142), False, 'from collections import defaultdict, OrderedDict\n'), ((6599, 6642), 'collections.OrderedDict', 'OrderedDict', (['[(year, []) for year in years]'], {}), '([(year, []) for year in years])\n', (6610, 6642), False, 'from collections import defaultdict, OrderedDict\n'), ((7860, 7873), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7871, 7873), False, 'from collections import defaultdict, OrderedDict\n'), ((8698, 8741), 'collections.OrderedDict', 'OrderedDict', (['[(year, []) for year in years]'], {}), '([(year, []) for year in years])\n', (8709, 8741), False, 'from collections import defaultdict, OrderedDict\n'), ((4140, 4168), 'max_heap.MaxHeap', 'MaxHeap', (['max_events_per_year'], {}), '(max_events_per_year)\n', (4147, 4168), False, 'from max_heap import MaxHeap\n'), ((5298, 5326), 'max_heap.MaxHeap', 'MaxHeap', (['max_events_per_year'], {}), '(max_events_per_year)\n', (5305, 5326), False, 'from max_heap import MaxHeap\n'), ((6797, 6825), 'max_heap.MaxHeap', 'MaxHeap', (['max_events_per_year'], {}), '(max_events_per_year)\n', (6804, 6825), False, 'from max_heap import MaxHeap\n'), ((8851, 8879), 'max_heap.MaxHeap', 'MaxHeap', (['max_events_per_year'], {}), '(max_events_per_year)\n', (8858, 8879), False, 'from max_heap import MaxHeap\n'), ((10474, 10588), 'word2vec_wiki_model.Word2VecWikiModel', 'Word2VecWikiModel', (['"""data/WikipediaClean5Negative300Skip10/WikipediaClean5Negative300Skip10"""', 'title_id_map'], {}), "(\n 'data/WikipediaClean5Negative300Skip10/WikipediaClean5Negative300Skip10',\n title_id_map)\n", (10491, 10588), False, 'from word2vec_wiki_model import Word2VecWikiModel\n'), ((1403, 1454), 'events_classifier.EventClassifier', 'EventClassifier', ([], {'models_manager': 'self.models_manager'}), '(models_manager=self.models_manager)\n', (1418, 1454), False, 'from events_classifier import EventClassifier\n'), ((1507, 1549), 'events_classifier.EventClassifier', 'EventClassifier', ([], {'global_model': 'global_model'}), '(global_model=global_model)\n', (1522, 1549), False, 'from events_classifier import EventClassifier\n'), ((4089, 4104), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (4097, 4104), True, 'import numpy as np\n'), ((5902, 5927), 'numpy.mean', 'np.mean', (['knn_similarities'], {}), '(knn_similarities)\n', (5909, 5927), True, 'import numpy as np\n'), ((9143, 9158), 're.escape', 're.escape', (['word'], {}), '(word)\n', (9152, 9158), False, 'import re\n')] |
import argparse
import cv2
import json
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models
class FeatureExtractor():
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, pre_features, features, target_layers):
self.model = model
self.pre_features = pre_features
self.features = features
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for pref in self.pre_features:
x = getattr(self.model, pref)(x)
submodel = getattr(self.model, self.features)
# go through the feature extractor's forward pass
for name, module in submodel._modules.items():
# print(name, module)
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs():
""" Class for making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers. """
def __init__(self, model,
pre_feature_block=[],
feature_block='features',
target_layers='35',
classifier_block=['classifier']):
self.model = model
self.classifier_block = classifier_block
# assume the model has a module named `feature` ⬇⬇⬇⬇⬇⬇⬇⬇
self.feature_extractor = FeatureExtractor(self.model,
pre_feature_block,
feature_block,
target_layers)
def get_gradients(self):
return self.feature_extractor.gradients
def __call__(self, x):
# ⬇ target layer ⬇ final layer's output
target_activations, output = self.feature_extractor(x)
print('target_activations[0].size: {}'.format(target_activations[0].size()))# for vgg'35 ([1, 512, 14, 14])
print('output.size: {}'.format(output.size())) # for vgg'36 ([1, 512, 7, 7])
for i, classifier in enumerate(self.classifier_block):
if i == len(self.classifier_block) - 1:
output = output.view(output.size(0), -1)
print('output.view.size: {}'.format(output.size())) # for vgg'36 ([1, 25088])
output = getattr(self.model, classifier)(output)
print('output.size: {}'.format(output.size())) # for vgg'36 ([1, 1000])
return target_activations, output
def preprocess_image(img):
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
preprocessed_img = img.copy()[:, :, ::-1]
for i in range(3):
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
preprocessed_img = \
np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
preprocessed_img = torch.from_numpy(preprocessed_img)
preprocessed_img.unsqueeze_(0)
input = preprocessed_img.requires_grad_(True)
return input
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
cv2.imwrite("cam.jpg", np.uint8(255 * cam))
class GradCam:
def __init__(self, model, pre_feature_block, feature_block, target_layer_names, classifier_block, use_cuda):
self.model = model
self.model.eval()
self.pre_feature_block = pre_feature_block
self.feature_block = feature_block
self.classifier_block = classifier_block
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
self.extractor = ModelOutputs(self.model,
pre_feature_block,
feature_block,
target_layer_names,
classifier_block)
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
features, output = self.extractor(input.cuda())
else:
features, output = self.extractor(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
with open('imagenet_class_index.json') as f:
labels = json.load(f)
print('prediction[{}]: {}'.format(index, labels[str(index)][1]))
print('output.size: {}'.format(output.size())) # for vgg'36 ([1, 1000])
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
#print('output: {}'.format(output))
#print('one_hot: {}'.format(one_hot)) #
getattr(self.model, self.feature_block).zero_grad()
for classifier in self.classifier_block:
getattr(self.model, classifier).zero_grad()
one_hot.backward(retain_graph=True)
gradients = self.extractor.get_gradients()
#print('len(gradients): {}'.format(len(gradients)))
print('gradients[0].size(): {}'.format(gradients[0].size()))
grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()
target = features[-1]
print('target.size(): {}'.format(target.size()))
target = target.cpu().data.numpy()[0, :]
print('target.shape: {}'.format(target.shape))
weights = np.mean(grads_val, axis=(2, 3))[0, :]
print('weights.shape: {}'.format(weights.shape))
cam = np.zeros(target.shape[1:], dtype=np.float32)
print('cam.shape: {}'.format(cam.shape)) # (14, 14)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
#print('cam: {}'.format(cam))
print('cam.shape: {}'.format(cam.shape))
cam = np.maximum(cam, 0) # remove negative numbers
cam = cv2.resize(cam, (224, 224))
print('cam.shape: {}'.format(cam.shape))
#print('cam: {}'.format(cam))
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input):
positive_mask = (input > 0).type_as(input)
output = torch.addcmul(torch.zeros(input.size()).type_as(input), input, positive_mask)
self.save_for_backward(input, output)
return output
@staticmethod
def backward(self, grad_output):
input, output = self.saved_tensors
grad_input = None
positive_mask_1 = (input > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(torch.zeros(input.size()).type_as(input),
torch.addcmul(torch.zeros(input.size()).type_as(input), grad_output,
positive_mask_1), positive_mask_2)
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
# replace ReLU with GuidedBackpropReLU
for idx, module in self.model.features._modules.items():
if module.__class__.__name__ == 'ReLU':
self.model.features._modules[idx] = GuidedBackpropReLU.apply
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
output = self.forward(input.cuda())
else:
output = self.forward(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
one_hot.backward(retain_graph=True)
output = input.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--image-path', type=str, default='./examples/both.png',
help='Input image path')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img*255)
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for VGG19 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
image_path = './examples/wolf.png'
use_cuda = False
# Can work with any model, but it assumes that the model has a
# feature method, and a classifier method,
# as in the VGG models in torchvision.
config = {
'vgg19': {
'pre_feature': [],
'features': 'features',
'target': ['35'],
'classifier': ['classifier']
},
'resnet50': {
'pre_feature': ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3'],
'features': 'layer4',
'target': ['2'],
'classifier': ['avgpool', 'fc']
}
}
model_name = 'resnet50'
config = config[model_name]
model = getattr(models, model_name)(pretrained=True)
grad_cam = GradCam(model,
pre_feature_block=config['pre_feature'],
feature_block=config['features'], # features
target_layer_names=config['target'],
classifier_block=config['classifier'], # classifier
use_cuda=use_cuda)
img = cv2.imread(image_path, 1)
img = np.float32(cv2.resize(img, (224, 224))) / 255
#print('img.size(): {}'.format(img.size))
input = preprocess_image(img)
#print('input.size(): {}'.format(input.size()))
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested index.
target_index = None
mask = grad_cam(input, target_index)
show_cam_on_image(img, mask)
'''
gb_model = GuidedBackpropReLUModel(model=models.vgg19(pretrained=True), use_cuda=use_cuda)
gb = gb_model(input, index=target_index)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([mask, mask, mask])
cam_gb = deprocess_image(cam_mask*gb)
gb = deprocess_image(gb)
cv2.imwrite('gb.jpg', gb)
cv2.imwrite('cam_gb.jpg', cam_gb)
''' | [
"numpy.uint8",
"numpy.maximum",
"argparse.ArgumentParser",
"json.load",
"numpy.std",
"numpy.float32",
"numpy.transpose",
"numpy.zeros",
"numpy.clip",
"cv2.imread",
"numpy.max",
"numpy.mean",
"torch.cuda.is_available",
"numpy.min",
"torch.sum",
"cv2.resize",
"torch.from_numpy"
] | [((3346, 3380), 'torch.from_numpy', 'torch.from_numpy', (['preprocessed_img'], {}), '(preprocessed_img)\n', (3362, 3380), False, 'import torch\n'), ((8972, 8997), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8995, 8997), False, 'import argparse\n'), ((9723, 9741), 'numpy.clip', 'np.clip', (['img', '(0)', '(1)'], {}), '(img, 0, 1)\n', (9730, 9741), True, 'import numpy as np\n'), ((9753, 9772), 'numpy.uint8', 'np.uint8', (['(img * 255)'], {}), '(img * 255)\n', (9761, 9772), True, 'import numpy as np\n'), ((11216, 11241), 'cv2.imread', 'cv2.imread', (['image_path', '(1)'], {}), '(image_path, 1)\n', (11226, 11241), False, 'import cv2\n'), ((3280, 3321), 'numpy.transpose', 'np.transpose', (['preprocessed_img', '(2, 0, 1)'], {}), '(preprocessed_img, (2, 0, 1))\n', (3292, 3321), True, 'import numpy as np\n'), ((3551, 3571), 'numpy.uint8', 'np.uint8', (['(255 * mask)'], {}), '(255 * mask)\n', (3559, 3571), True, 'import numpy as np\n'), ((3605, 3624), 'numpy.float32', 'np.float32', (['heatmap'], {}), '(heatmap)\n', (3615, 3624), True, 'import numpy as np\n'), ((3651, 3666), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (3661, 3666), True, 'import numpy as np\n'), ((3683, 3694), 'numpy.max', 'np.max', (['cam'], {}), '(cam)\n', (3689, 3694), True, 'import numpy as np\n'), ((3722, 3741), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (3730, 3741), True, 'import numpy as np\n'), ((6273, 6317), 'numpy.zeros', 'np.zeros', (['target.shape[1:]'], {'dtype': 'np.float32'}), '(target.shape[1:], dtype=np.float32)\n', (6281, 6317), True, 'import numpy as np\n'), ((6579, 6597), 'numpy.maximum', 'np.maximum', (['cam', '(0)'], {}), '(cam, 0)\n', (6589, 6597), True, 'import numpy as np\n'), ((6665, 6692), 'cv2.resize', 'cv2.resize', (['cam', '(224, 224)'], {}), '(cam, (224, 224))\n', (6675, 6692), False, 'import cv2\n'), ((9331, 9356), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9354, 9356), False, 'import torch\n'), ((9623, 9635), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (9630, 9635), True, 'import numpy as np\n'), ((4849, 4861), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4858, 4861), False, 'import json\n'), ((5330, 5357), 'torch.sum', 'torch.sum', (['(one_hot * output)'], {}), '(one_hot * output)\n', (5339, 5357), False, 'import torch\n'), ((6164, 6195), 'numpy.mean', 'np.mean', (['grads_val'], {'axis': '(2, 3)'}), '(grads_val, axis=(2, 3))\n', (6171, 6195), True, 'import numpy as np\n'), ((6800, 6811), 'numpy.min', 'np.min', (['cam'], {}), '(cam)\n', (6806, 6811), True, 'import numpy as np\n'), ((6832, 6843), 'numpy.max', 'np.max', (['cam'], {}), '(cam)\n', (6838, 6843), True, 'import numpy as np\n'), ((8761, 8788), 'torch.sum', 'torch.sum', (['(one_hot * output)'], {}), '(one_hot * output)\n', (8770, 8788), False, 'import torch\n'), ((9653, 9664), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (9659, 9664), True, 'import numpy as np\n'), ((11263, 11290), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (11273, 11290), False, 'import cv2\n'), ((5168, 5193), 'torch.from_numpy', 'torch.from_numpy', (['one_hot'], {}), '(one_hot)\n', (5184, 5193), False, 'import torch\n'), ((8599, 8624), 'torch.from_numpy', 'torch.from_numpy', (['one_hot'], {}), '(one_hot)\n', (8615, 8624), False, 'import torch\n')] |
from __future__ import absolute_import
from ann_benchmarks.algorithms.base import BaseANN
import subprocess
import struct
import subprocess
import sys
import os
import glob
import numpy as np
import random
import string
class Countrymaam(BaseANN):
def __init__(self, metric, params):
self._metric = metric
self._index = params.get("index", "kd-tree")
self._n_trees = params.get("n_trees", 8)
self._leaf_size = params.get("leaf_size", 8)
def fit(self, X):
X = X.astype(np.float64)
suffix = "".join(random.choices(string.ascii_lowercase, k=16))
index_file_path = f"index_{suffix}_{os.getpid()}.bin"
p = subprocess.Popen([
"countrymaam",
"train",
"--dim", str(len(X[0])),
"--index", self._index,
"--leaf-size", str(self._leaf_size),
"--tree-num", str(self._n_trees),
"--output", index_file_path
], stdin=subprocess.PIPE)
p.stdin.write(struct.pack(f"={X.size}d", *np.ravel(X)))
p.communicate()
p.stdin.close()
self._pipe = subprocess.Popen([
"countrymaam",
"predict",
"--dim", str(len(X[0])),
"--index", self._index,
"--input", index_file_path
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def set_query_arguments(self, search_k):
self._search_k = search_k
def query(self, v, n):
v = v.astype(np.float64)
self._pipe.stdin.write(struct.pack(f"=i", self._search_k))
self._pipe.stdin.write(struct.pack(f"=i", n))
self._pipe.stdin.write(struct.pack(f"={v.size}d", *v))
self._pipe.stdin.flush()
rn = struct.unpack("=i", self._pipe.stdout.read(4))[0]
ret = [0] * rn
for i in range(rn):
ret[i] = struct.unpack("=i", self._pipe.stdout.read(4))[0]
return np.array(ret)
def __str__(self):
return f"Countrymaam(index={self._index}, leaf_size={self._leaf_size} n_trees={self._n_trees}, search_k={self._search_k})"
| [
"os.getpid",
"numpy.ravel",
"random.choices",
"struct.pack",
"numpy.array"
] | [((1919, 1932), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (1927, 1932), True, 'import numpy as np\n'), ((557, 601), 'random.choices', 'random.choices', (['string.ascii_lowercase'], {'k': '(16)'}), '(string.ascii_lowercase, k=16)\n', (571, 601), False, 'import random\n'), ((1532, 1566), 'struct.pack', 'struct.pack', (['f"""=i"""', 'self._search_k'], {}), "(f'=i', self._search_k)\n", (1543, 1566), False, 'import struct\n'), ((1599, 1620), 'struct.pack', 'struct.pack', (['f"""=i"""', 'n'], {}), "(f'=i', n)\n", (1610, 1620), False, 'import struct\n'), ((1653, 1683), 'struct.pack', 'struct.pack', (['f"""={v.size}d"""', '*v'], {}), "(f'={v.size}d', *v)\n", (1664, 1683), False, 'import struct\n'), ((647, 658), 'os.getpid', 'os.getpid', ([], {}), '()\n', (656, 658), False, 'import os\n'), ((1037, 1048), 'numpy.ravel', 'np.ravel', (['X'], {}), '(X)\n', (1045, 1048), True, 'import numpy as np\n')] |
"""
Dataset loader for CIFAR100
"""
from copy import deepcopy
import torch
import numpy as np
from torch.utils.data import WeightedRandomSampler
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
def wif(id):
"""
Used to fix randomization bug for pytorch dataloader + numpy
Code from https://github.com/pytorch/pytorch/issues/5059
"""
process_seed = torch.initial_seed()
# Back out the base_seed so we can use all the bits.
base_seed = process_seed - id
ss = np.random.SeedSequence([id, base_seed])
# More than 128 bits (4 32-bit words) would be overkill.
np.random.seed(ss.generate_state(4))
class MultiTaskDataHandler():
"""
Template class for a Multi-task data handler
"""
def __init__(self) -> None:
self.trainset: Dataset
self.testset: Dataset
def get_data_loader(self,
batch_size: int,
workers: int,
train: bool = True) -> DataLoader:
"""
Get the Dataloader for the entire dataset
Args:
- shuf : Shuffle
- wtd_loss : Dataloader also has wts along with targets
- wtd_sampler: Sample data from dataloader with weights
according to self.tr_wts
"""
data = self.trainset if train else self.testset
loader = DataLoader(
data, batch_size=batch_size, shuffle=train,
num_workers=workers, pin_memory=True,
worker_init_fn=wif)
return loader
def get_task_data_loader(self,
task: int,
batch_size: int,
workers: int,
train: bool = False) -> DataLoader:
"""
Get Dataloader for a specific task
"""
if train:
task_set = deepcopy(self.trainset)
else:
task_set = deepcopy(self.testset)
task_ind = [task == i[0] for i in task_set.targets]
task_set.data = task_set.data[task_ind]
task_set.targets = np.array(task_set.targets)[task_ind, :]
task_set.targets = [(lab[0], lab[1]) for lab in task_set.targets]
loader = DataLoader(
task_set, batch_size=batch_size,
shuffle=False, num_workers=workers, pin_memory=True,
worker_init_fn=wif)
return loader
| [
"copy.deepcopy",
"torch.utils.data.DataLoader",
"numpy.random.SeedSequence",
"numpy.array",
"torch.initial_seed"
] | [((400, 420), 'torch.initial_seed', 'torch.initial_seed', ([], {}), '()\n', (418, 420), False, 'import torch\n'), ((521, 560), 'numpy.random.SeedSequence', 'np.random.SeedSequence', (['[id, base_seed]'], {}), '([id, base_seed])\n', (543, 560), True, 'import numpy as np\n'), ((1406, 1522), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'batch_size', 'shuffle': 'train', 'num_workers': 'workers', 'pin_memory': '(True)', 'worker_init_fn': 'wif'}), '(data, batch_size=batch_size, shuffle=train, num_workers=workers,\n pin_memory=True, worker_init_fn=wif)\n', (1416, 1522), False, 'from torch.utils.data import DataLoader\n'), ((2270, 2391), 'torch.utils.data.DataLoader', 'DataLoader', (['task_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'workers', 'pin_memory': '(True)', 'worker_init_fn': 'wif'}), '(task_set, batch_size=batch_size, shuffle=False, num_workers=\n workers, pin_memory=True, worker_init_fn=wif)\n', (2280, 2391), False, 'from torch.utils.data import DataLoader\n'), ((1917, 1940), 'copy.deepcopy', 'deepcopy', (['self.trainset'], {}), '(self.trainset)\n', (1925, 1940), False, 'from copy import deepcopy\n'), ((1978, 2000), 'copy.deepcopy', 'deepcopy', (['self.testset'], {}), '(self.testset)\n', (1986, 2000), False, 'from copy import deepcopy\n'), ((2138, 2164), 'numpy.array', 'np.array', (['task_set.targets'], {}), '(task_set.targets)\n', (2146, 2164), True, 'import numpy as np\n')] |
from magicgui.widgets import FunctionGui
import napari
import inspect
import numpy as np
from ..utils import image_tuple, label_tuple
from ..._const import SetConst
# TODO: add "apply" button to avoid filtering whole image stack.
RANGES = {"None": (None, None),
"gaussian_filter": (0.2, 30),
"median_filter": (1, 30),
"mean_filter": (1, 30),
"lowpass_filter": (0.005, 0.5),
"highpass_filter": (0.005, 0.5),
"erosion": (1, 30),
"dilation": (1, 30),
"opening": (1, 30),
"closing": (1, 30),
"tophat": (5, 30),
"entropy_filter": (1, 30),
"enhance_contrast": (1, 30),
"std_filter": (1, 30),
"coef_filter": (1, 30),
"dog_filter": (0.2, 30),
"doh_filter": (0.2, 30),
"log_filter": (0.2, 30),
"rolling_ball": (5, 30),
}
class FunctionCaller(FunctionGui):
def __init__(self, viewer:"napari.Viewer"):
self.viewer = viewer # parent napari viewer object
self.running_function = None # currently running function
self.current_layer = None # currently selected layer
self.last_inputs = None # last inputs including function name
self.last_outputs = None # last output from the function
opt = dict(funcname={"choices": list(RANGES.keys()), "label": "function"},
param={"widget_type": "FloatSlider", "min":0.01, "max": 30,
"tooltip": "The first parameter."},
dims={"choices": ["2D", "3D"], "tooltip": "Spatial dimensions"},
fix_clims={"widget_type": "CheckBox", "label": "fix contrast limits",
"tooltip": "If you'd like to fix the contrast limits\n"
"while parameter sweeping, check here"}
)
def _func(layer:napari.layers.Image, funcname:str, param, dims="2D",
fix_clims=False) -> napari.types.LayerDataTuple:
self.current_layer = layer
if layer is None or funcname == "None" or not self.visible:
return None
name = f"Result of {layer.name}"
inputs = (layer.name, funcname, param, dims)
# run function if needed
if self.last_inputs == inputs:
pass
else:
try:
with SetConst("SHOW_PROGRESS", False):
self.last_outputs = self.running_function(param, dims=int(dims[0]))
except Exception as e:
self.viewer.status = f"{funcname} finished with {e.__class__.__name__}: {e}"
return None
else:
self.last_inputs = inputs
# set the parameters for the output layer
try:
if fix_clims:
props_to_inherit = ["colormap", "blending", "translate", "scale", "contrast_limits"]
else:
props_to_inherit = ["colormap", "blending", "translate", "scale"]
kwargs = {k: getattr(self.viewer.layers[name], k, None) for k in props_to_inherit}
except KeyError:
kwargs = dict(translate="inherit")
return image_tuple(layer, self.last_outputs, name=name, **kwargs)
super().__init__(_func, auto_call=True, param_options=opt)
self.funcname.changed.connect(self.update_widget)
def update_widget(self, event=None):
"""
Update the widget labels and sliders every time function is changed.
"""
name = self.funcname.value
self.running_function = getattr(self.current_layer.data, name, None)
if name == "None" or self.running_function is None:
return None
pmin, pmax = RANGES[name]
sig = inspect.signature(self.running_function)
first_param = list(sig.parameters.keys())[0]
self.param.label = first_param
self.param.min = pmin
self.param.max = pmax
self.param.value = sig.parameters[first_param].default
return None
class ThresholdAndLabel(FunctionGui):
cache = dict()
def __init__(self, viewer:"napari.Viewer"):
self.viewer = viewer
opt = dict(percentile={"widget_type": "FloatSlider",
"min": 0, "max": 100,
"tooltip": "Threshold percentile"},
label={"widget_type": "CheckBox"}
)
def _func(layer:napari.layers.Image, percentile=50, label=False) -> napari.types.LayerDataTuple:
if not self.visible:
return None
if layer is None:
return None
# define the name for the new layer
if label:
name = f"[L]{layer.name}"
else:
name = f"Threshold of {layer.name}"
with SetConst("SHOW_PROGRESS", False):
thr = np.percentile(layer.data, percentile) # TODO: this is slow.
if label:
out = layer.data.label_threshold(thr)
props_to_inherit = ["opacity", "blending", "translate", "scale"]
_as_layer_data_tuple = label_tuple
else:
out = layer.data.threshold(thr)
props_to_inherit = ["colormap", "opacity", "blending", "translate", "scale"]
_as_layer_data_tuple = image_tuple
try:
kwargs = {k: getattr(viewer.layers[name], k, None) for k in props_to_inherit}
except KeyError:
if label:
kwargs = dict(translate=layer.translate, opacity=0.3)
else:
kwargs = dict(translate=layer.translate, colormap="red", blending="additive")
return _as_layer_data_tuple(layer, out, name=name, **kwargs)
super().__init__(_func, auto_call=True, param_options=opt)
class Rotator(FunctionGui):
def __init__(self, viewer:"napari.Viewer"):
self.viewer = viewer
opt = dict(rotate={"widget_type": "FloatSlider",
"min": -180, "max": 180,
"step": 1,
"tooltip": "Rotation Angle"},
)
def _func(layer:napari.layers.Image, rotate) -> napari.types.LayerDataTuple:
if not self.visible:
return None
if layer is None:
return None
name = f"Rotation of {layer.name}"
with SetConst("SHOW_PROGRESS", False):
out = layer.data.rotate(rotate)
return image_tuple(layer, out, contrast_limits=layer.contrast_limits, name=name)
super().__init__(_func, auto_call=True, param_options=opt)
class RectangleEditor(FunctionGui):
def __init__(self, viewer:"napari.Viewer"):
self.viewer = viewer
opt = dict(len_v={"widget_type": "SpinBox",
"label": "V",
"tooltip": "vertical length in pixel"},
len_h={"widget_type": "SpinBox",
"label": "H",
"tooltip": "horizontal length in pixel"})
def _func(len_v=128, len_h=128):
selected_layer = self.get_selected_shapes_layer()
# check if one shape/point is selected
new_data = selected_layer.data
selected_data = selected_layer.selected_data
count = 0
for i, data in enumerate(new_data):
if selected_layer.shape_type[i] == "rectangle" and i in selected_data:
dh = data[1, -2:] - data[0, -2:]
dv = data[3, -2:] - data[0, -2:]
data[1, -2:] = dh / np.hypot(*dh) * len_h + data[0, -2:]
data[3, -2:] = dv / np.hypot(*dv) * len_v + data[0, -2:]
data[2, -2:] = data[1, -2:] - data[0, -2:] + data[3, -2:]
count += 1
if count == 0:
if selected_layer.nshapes == 0:
# TODO: https://github.com/napari/napari/pull/2961
# May be solved in near future
return None
data = np.zeros((4, selected_layer.ndim), dtype=np.float64)
data[:, :-2] = viewer.dims.current_step[:-2]
data[1, -2:] = np.array([ 0.0, len_h])
data[2, -2:] = np.array([len_v, len_h])
data[3, -2:] = np.array([len_v, 0.0])
new_data = selected_layer.data + [data]
selected_data = {len(new_data) - 1}
selected_layer.data = new_data
selected_layer.selected_data = selected_data
selected_layer._set_highlight()
return None
super().__init__(_func, auto_call=True, param_options=opt)
def get_selected_shapes_layer(self):
selected_layer = list(self.viewer.layers.selection)
if len(selected_layer) != 1:
return None
selected_layer = selected_layer[0]
if not isinstance(selected_layer, napari.layers.Shapes):
return None
elif len(selected_layer.selected_data) == 0:
return None
return selected_layer | [
"numpy.zeros",
"numpy.percentile",
"numpy.hypot",
"inspect.signature",
"numpy.array"
] | [((3978, 4018), 'inspect.signature', 'inspect.signature', (['self.running_function'], {}), '(self.running_function)\n', (3995, 4018), False, 'import inspect\n'), ((5154, 5191), 'numpy.percentile', 'np.percentile', (['layer.data', 'percentile'], {}), '(layer.data, percentile)\n', (5167, 5191), True, 'import numpy as np\n'), ((8542, 8594), 'numpy.zeros', 'np.zeros', (['(4, selected_layer.ndim)'], {'dtype': 'np.float64'}), '((4, selected_layer.ndim), dtype=np.float64)\n', (8550, 8594), True, 'import numpy as np\n'), ((8687, 8709), 'numpy.array', 'np.array', (['[0.0, len_h]'], {}), '([0.0, len_h])\n', (8695, 8709), True, 'import numpy as np\n'), ((8743, 8767), 'numpy.array', 'np.array', (['[len_v, len_h]'], {}), '([len_v, len_h])\n', (8751, 8767), True, 'import numpy as np\n'), ((8799, 8821), 'numpy.array', 'np.array', (['[len_v, 0.0]'], {}), '([len_v, 0.0])\n', (8807, 8821), True, 'import numpy as np\n'), ((8033, 8046), 'numpy.hypot', 'np.hypot', (['*dh'], {}), '(*dh)\n', (8041, 8046), True, 'import numpy as np\n'), ((8110, 8123), 'numpy.hypot', 'np.hypot', (['*dv'], {}), '(*dv)\n', (8118, 8123), True, 'import numpy as np\n')] |
## Running random forests
## Importing packages
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sksurv.ensemble import RandomSurvivalForest
import matplotlib.pyplot as plt
import csv
from sksurv.preprocessing import OneHotEncoder
## Import data and removing variables...
adult_data = pd.read_csv('allimputed.csv')
#adult_data = adult_data.drop(columns=['Unnamed: 0'])
adult_data = adult_data.drop(columns = ['Number'])
adult_data
## Get x and y datasets- separate predictors to y outcome variables
X = adult_data[['BMI', 'Systolic', 'Diastolic', 'regularity', 'Chol2', 'Ethnicity', 'Gender', 'Age', 'heart_attack', 'relative_ha', 'liver_problem', 'cancer', 'stroke', 'days_active', 'smoking_status']]
y = adult_data[['mortstat', 'permth_int']]
mort = list(y['mortstat'])
time = list(y['permth_int'])
## Change to binary value rather than number
for n,i in enumerate(mort):
if i == 0:
mort[n] = False
else:
mort[n] = True
mort
## Zip lists together to get list of tuples
survival = zip(mort, time)
Y = list(survival)
## Need to turn list of tuples into structured array
## Have to tell it what type of data you have in the struct. array
## Get this from the toy data imported above
dt = np.dtype([('fstat', '?'),('lenfol', '<f8')])
Y = np.array(Y,dtype=dt)
## Get test and train data values and then split X data
train_vals, test_vals = train_test_split(range(len(adult_data)), test_size = 0.2, random_state=1)
x_train = X.loc[train_vals].reset_index(drop = True)
x_test = X.loc[test_vals].reset_index(drop = True)
print(x_train[:10])
print(x_test[:10])
## Get Y outcome data as test and train
y_train = []
for val in train_vals:
y_train.append(Y[val])
y_train = np.asarray(y_train)
#print(y_train)
y_test = []
for val in test_vals:
y_test.append(Y[val])
y_test = np.asarray(y_test)
#print(y_test)
print('starting to print to csv')
x_train = OneHotEncoder().fit_transform(x_train)
x_test = OneHotEncoder().fit_transform(x_test)
## Instantiate the random forest
rsf = RandomSurvivalForest(n_estimators= 150,min_samples_split= 25,
min_samples_leaf= 10,
max_features= 6,
n_jobs=-1,
random_state= None)
estimate = rsf.fit(x_train, y_train)
pred = rsf.predict_survival_function(x_test)
cstat = rsf.score(x_test, y_test)
print('testing score: ', cstat)
### Get train score c-statistic
traincstat = rsf.score(x_train, y_train)
print('c-stat for training data: ',traincstat)
## Import data and removing variables
adult_data2 = pd.read_csv('adult_datatest2.csv')
#adult_data2 = adult_data2.drop(columns=['Unnamed: 0'])
adult_data2 = adult_data2.drop(columns = ['Number'])
adult_data2
## Get x and y datasets- separate predictors to y outcome variables
X2 = adult_data2[['BMI', 'Systolic', 'Diastolic', 'regularity', 'Chol2', 'Ethnicity', 'Gender', 'Age', 'heart_attack', 'relative_ha', 'liver_problem', 'cancer', 'stroke', 'days_active', 'smoking_status']]
y2 = adult_data2[['mortstat', 'permth_int']]
mort2 = list(y2['mortstat'])
time2 = list(y2['permth_int'])
## Change to binary value rather than number
for n,i in enumerate(mort2):
if i == 0:
mort2[n] = False
else:
mort2[n] = True
mort2
## Zip lists together to get list of tuples
survival2 = zip(mort2, time2)
Y2 = list(survival2)
## Need to turn list of tuples into structured array
## Have to tell it what type of data you have in the struct. array
## Get this from the toy data imported above
dt2 = np.dtype([('fstat', '?'),('lenfol', '<f8')])
Y2 = np.array(Y2,dtype=dt2)
X2 = OneHotEncoder().fit_transform(X2)
with open ('rsfcalibrate_results2.csv', 'w', newline = '') as outfile1:
writer = csv.writer(outfile1)
headers = ['index', 'riskscore']
first = headers
writer.writerow(first)
res = []
riskexample = pd.Series(rsf.predict(x_test))
for i,v in riskexample.items():
res.append(i)
res.append(v)
writer.writerow(res)
res = []
print('added all risk values to csv list')
print(riskexample)
#example = rsf.predict_survival_function(x_test, return_array = True)
#writer.writerow(res)
riskexample = pd.Series(rsf.predict(X))
#print(riskexample)
example = rsf.predict_survival_function(X2, return_array = True)
for i, j in enumerate(example):
plt.step(rsf.event_times_, j, where="post", label=str(i))
plt.ylabel("Survival probability P(T>t)")
plt.xlabel("Time (months)")
plt.title('RSF estimate survival for 6 test individuals')
plt.legend()
plt.grid(True)
plt.show()
plt.savefig('rsfsurvfunctest2.png')
###Get the survival values for calibration plot
rsfcalibrate = rsf.predict_survival_function(x_test, return_array = True)
rsfcalibrate = pd.DataFrame(data= rsfcalibrate)
rsfcalibrate.to_csv('rsfcalibrate2.csv')
print('made csv file for calibration plot') | [
"matplotlib.pyplot.title",
"pandas.DataFrame",
"sksurv.ensemble.RandomSurvivalForest",
"matplotlib.pyplot.show",
"csv.writer",
"pandas.read_csv",
"sksurv.preprocessing.OneHotEncoder",
"numpy.asarray",
"numpy.dtype",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matp... | [((335, 364), 'pandas.read_csv', 'pd.read_csv', (['"""allimputed.csv"""'], {}), "('allimputed.csv')\n", (346, 364), True, 'import pandas as pd\n'), ((1270, 1315), 'numpy.dtype', 'np.dtype', (["[('fstat', '?'), ('lenfol', '<f8')]"], {}), "([('fstat', '?'), ('lenfol', '<f8')])\n", (1278, 1315), True, 'import numpy as np\n'), ((1319, 1340), 'numpy.array', 'np.array', (['Y'], {'dtype': 'dt'}), '(Y, dtype=dt)\n', (1327, 1340), True, 'import numpy as np\n'), ((1752, 1771), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (1762, 1771), True, 'import numpy as np\n'), ((1858, 1876), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (1868, 1876), True, 'import numpy as np\n'), ((2064, 2195), 'sksurv.ensemble.RandomSurvivalForest', 'RandomSurvivalForest', ([], {'n_estimators': '(150)', 'min_samples_split': '(25)', 'min_samples_leaf': '(10)', 'max_features': '(6)', 'n_jobs': '(-1)', 'random_state': 'None'}), '(n_estimators=150, min_samples_split=25,\n min_samples_leaf=10, max_features=6, n_jobs=-1, random_state=None)\n', (2084, 2195), False, 'from sksurv.ensemble import RandomSurvivalForest\n'), ((2691, 2725), 'pandas.read_csv', 'pd.read_csv', (['"""adult_datatest2.csv"""'], {}), "('adult_datatest2.csv')\n", (2702, 2725), True, 'import pandas as pd\n'), ((3653, 3698), 'numpy.dtype', 'np.dtype', (["[('fstat', '?'), ('lenfol', '<f8')]"], {}), "([('fstat', '?'), ('lenfol', '<f8')])\n", (3661, 3698), True, 'import numpy as np\n'), ((3703, 3726), 'numpy.array', 'np.array', (['Y2'], {'dtype': 'dt2'}), '(Y2, dtype=dt2)\n', (3711, 3726), True, 'import numpy as np\n'), ((4541, 4582), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival probability P(T>t)"""'], {}), "('Survival probability P(T>t)')\n", (4551, 4582), True, 'import matplotlib.pyplot as plt\n'), ((4583, 4610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (months)"""'], {}), "('Time (months)')\n", (4593, 4610), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4668), 'matplotlib.pyplot.title', 'plt.title', (['"""RSF estimate survival for 6 test individuals"""'], {}), "('RSF estimate survival for 6 test individuals')\n", (4620, 4668), True, 'import matplotlib.pyplot as plt\n'), ((4669, 4681), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4679, 4681), True, 'import matplotlib.pyplot as plt\n'), ((4682, 4696), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4690, 4696), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4707), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4705, 4707), True, 'import matplotlib.pyplot as plt\n'), ((4708, 4743), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rsfsurvfunctest2.png"""'], {}), "('rsfsurvfunctest2.png')\n", (4719, 4743), True, 'import matplotlib.pyplot as plt\n'), ((4882, 4913), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rsfcalibrate'}), '(data=rsfcalibrate)\n', (4894, 4913), True, 'import pandas as pd\n'), ((3853, 3873), 'csv.writer', 'csv.writer', (['outfile1'], {}), '(outfile1)\n', (3863, 3873), False, 'import csv\n'), ((1937, 1952), 'sksurv.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1950, 1952), False, 'from sksurv.preprocessing import OneHotEncoder\n'), ((1985, 2000), 'sksurv.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1998, 2000), False, 'from sksurv.preprocessing import OneHotEncoder\n'), ((3732, 3747), 'sksurv.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (3745, 3747), False, 'from sksurv.preprocessing import OneHotEncoder\n')] |
#<NAME> UIN 327003625 TAMU 2022
#Numerical Simulations 430
#Hmwk 1 graphing and plotting exat solutions
# -*- coding: utf-8 -*
import sys
import numpy as np
import matplotlib.pyplot as plt
"""
#code for initial finite difference model test, delta x = 1/(2**2) cm
a = np.matrix([[-3,1,-0],[1,-3,1],[0,1,-3]])
a_matrix_inverse = np.linalg.inv(a)
resultant_matrix = np.matrix([[0],[0],[-100]])
temp_solutions = np.dot(a_matrix_inverse,resultant_matrix)
pos_along_rod_list_cm = np.linspace(0, 1, num=2**2+1)
#print(pos_along_rod_list_cm)
#print(temp_solutions)
temp_solutions_list = [0]
for i in np.nditer(temp_solutions):
temp_solutions_list.append(i.tolist())
temp_solutions_list.append(100)
for i in range(0, len(temp_solutions_list)):
print(u'{:.5f} cm : {:.5f} \xb0C'.format(pos_along_rod_list_cm[i], temp_solutions_list[i]))
"""
#attempt to create more general method for smaller delta x
def case_1_produce_FDE_matrix(n):
# creating 2**n - 1 rows / columns
zero_array = np.zeros((2**n-1, 2**n-1))
FDE_matrix = zero_array
#print(zero_array)
# alpha_sum const open to change
alpha = 4
delta_x = 1.0/(2**n)
k = 2+alpha**2*delta_x**2
case_1_row_triple = [1, -k, 1]
FDE_matrix[0][0:2] = case_1_row_triple[1:3]
for row_num in range(1, 2**n-2):
FDE_matrix[row_num][row_num-1:row_num+2] = case_1_row_triple
FDE_matrix[2**n-2][2**n-3:2**n-1] = case_1_row_triple[0:2]
#print(FDE_matrix)
return(FDE_matrix)
#creating resultant matrix
n=4
resultant_matrix = np.zeros((2**n-1,1))
resultant_matrix[2**n-2,0] = -100
#print(resultant_matrix)
a_matrix_inverse = np.linalg.inv(case_1_produce_FDE_matrix(n))
solution_matrix = np.dot(a_matrix_inverse, resultant_matrix)
pos_along_rod_list_cm = np.linspace(0, 1, num=2**n+1)
temp_solutions_list = [0]
for i in np.nditer(solution_matrix):
temp_solutions_list.append(i.tolist())
temp_solutions_list.append(100)
def analytical_sol_case1(n):
# Bar Parameters ----------------------------------------------------------------------------------------------------------------------------------
k = .5 # thermal conductivity of material
R = .1 # cross section radius
Ac = np.pi * R**2 # cross sectional area
L = 1 # length
# Case Parameters -----------------------------------------------------------------------------------------------------------------------------
Tb = 0 # T(0), base temperature
T_l = 100 # T(L), tip temperature
Ta = 0 # ambient temperature
# Processing & Output ----------------------------------------------------------------------------------------------------------------------------
x = np.linspace(0, 1, 2**n+1)
a=4
h = a**2 * k * R / 2
C = (T_l - Ta - (Tb-Ta)*np.cosh(a*L))/np.sinh(a*L)
D = 0
T = C*np.sinh(a*x) + D*np.cosh(a*x) + Ta
analytical_sol_list = []
for i in np.nditer(T):
analytical_sol_list.append(i.tolist())
return analytical_sol_list
print("Case 1 solution:")
for i in range(0, len(temp_solutions_list)):
print(u'{:.5f} cm : T_(FDM) = {:.5f} \xb0C, T_analytical = {:.5f} \xb0C'.format(pos_along_rod_list_cm[i], temp_solutions_list[i], analytical_sol_case1(n)[i]))
| [
"numpy.nditer",
"numpy.zeros",
"numpy.linspace",
"numpy.cosh",
"numpy.dot",
"numpy.sinh"
] | [((1531, 1556), 'numpy.zeros', 'np.zeros', (['(2 ** n - 1, 1)'], {}), '((2 ** n - 1, 1))\n', (1539, 1556), True, 'import numpy as np\n'), ((1694, 1736), 'numpy.dot', 'np.dot', (['a_matrix_inverse', 'resultant_matrix'], {}), '(a_matrix_inverse, resultant_matrix)\n', (1700, 1736), True, 'import numpy as np\n'), ((1761, 1794), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(2 ** n + 1)'}), '(0, 1, num=2 ** n + 1)\n', (1772, 1794), True, 'import numpy as np\n'), ((1827, 1853), 'numpy.nditer', 'np.nditer', (['solution_matrix'], {}), '(solution_matrix)\n', (1836, 1853), True, 'import numpy as np\n'), ((994, 1028), 'numpy.zeros', 'np.zeros', (['(2 ** n - 1, 2 ** n - 1)'], {}), '((2 ** n - 1, 2 ** n - 1))\n', (1002, 1028), True, 'import numpy as np\n'), ((2752, 2781), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(2 ** n + 1)'], {}), '(0, 1, 2 ** n + 1)\n', (2763, 2781), True, 'import numpy as np\n'), ((2967, 2979), 'numpy.nditer', 'np.nditer', (['T'], {}), '(T)\n', (2976, 2979), True, 'import numpy as np\n'), ((2856, 2870), 'numpy.sinh', 'np.sinh', (['(a * L)'], {}), '(a * L)\n', (2863, 2870), True, 'import numpy as np\n'), ((2842, 2856), 'numpy.cosh', 'np.cosh', (['(a * L)'], {}), '(a * L)\n', (2849, 2856), True, 'import numpy as np\n'), ((2890, 2904), 'numpy.sinh', 'np.sinh', (['(a * x)'], {}), '(a * x)\n', (2897, 2904), True, 'import numpy as np\n'), ((2907, 2921), 'numpy.cosh', 'np.cosh', (['(a * x)'], {}), '(a * x)\n', (2914, 2921), True, 'import numpy as np\n')] |
import os
from libdvid import DVIDNodeService
import numpy as np
import h5py
server_addres = "slowpoke1:32768"
uuid = "341635bc8c864fa5acbaf4558122c0d5" # "4b178ac089ee443c9f422b02dcd9f2af"
# the dvid server needs to be started before calling this (see readme)
node_service = DVIDNodeService(server_addres, uuid)
def make_rois(start, shape, size):
n_x = shape[0] // size
n_y = shape[1] // size
n_z = shape[2] // size
roi = []
# stupid lazy loop....
for x in range(n_x):
for y in range(n_y):
for z in range(n_z):
roi.append(
[
[start[0] + x * size, start[1] + y * size, start[2] + z * size],
[
start[0] + (x + 1) * size,
start[1] + (y + 1) * size,
start[2] + (z + 1) * size,
],
]
)
return roi
def extract_grayscale(dataset_name, global_start, shape, save_folder):
save_path = os.path.join(save_folder, "%s.h5" % dataset_name)
rois = make_rois(global_start, shape, 512)
block_shape = (512, 512, 512)
with h5py.File(save_path) as f:
gs = f.create_dataset(
"data", shape, dtype=np.uint8, chunks=True, compression="gzip"
)
with h5py.File(save_path) as f:
ii = 0
for start, stop in rois:
print("extracting block %i / %i" % (ii, len(rois)))
ii += 1
bb = tuple(slice(start[i], stop[i]) for i in range(len(start)))
print(bb)
data = node_service.get_gray3D(dataset_name, block_shape, start)
print(data.shape)
gs[bb] = data
# extract all labelsd from the rois and store them to h5
def extract_all_labels(dataset_name, global_start, shape, save_folder):
save_path = os.path.join(save_folder, "%s.h5" % dataset_name)
rois = make_rois(global_start, shape, 512)
block_shape = (512, 512, 512)
with h5py.File(save_path) as f:
labels = f.create_dataset(
"data", shape, dtype=np.uint64, chunks=True, compression="gzip"
)
ii = 0
for start, stop in rois:
print("extracting block %i / %i" % (ii, len(rois)))
ii += 1
bb = tuple(slice(start[i], stop[i]) for i in range(len(start)))
print(bb)
dvid_data = node_service.get_labels3D(dataset_name, block_shape, start)
print(type(dvid_data))
print(dvid_data.shape)
print(np.unique(dvid_data))
labels[bb] = dvid_data
if __name__ == "__main__":
start = np.array([0, 0, 0])
stop = np.array([8255, 4479, 5311])
shape = stop - start
save_path = "/groups/saalfeld/saalfeldlab/larissa/data/fib25/"
shape = tuple(shape)
start = tuple(start)
# labels_name = 'google__fib25_groundtruth_roi_eroded50_z5006-8000'
ds_name = "grayscale"
extract_grayscale(ds_name, start, shape, save_path)
print(len(make_rois(start, shape, 512)))
| [
"h5py.File",
"numpy.array",
"os.path.join",
"numpy.unique",
"libdvid.DVIDNodeService"
] | [((279, 315), 'libdvid.DVIDNodeService', 'DVIDNodeService', (['server_addres', 'uuid'], {}), '(server_addres, uuid)\n', (294, 315), False, 'from libdvid import DVIDNodeService\n'), ((1063, 1112), 'os.path.join', 'os.path.join', (['save_folder', "('%s.h5' % dataset_name)"], {}), "(save_folder, '%s.h5' % dataset_name)\n", (1075, 1112), False, 'import os\n'), ((1935, 1984), 'os.path.join', 'os.path.join', (['save_folder', "('%s.h5' % dataset_name)"], {}), "(save_folder, '%s.h5' % dataset_name)\n", (1947, 1984), False, 'import os\n'), ((2725, 2744), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2733, 2744), True, 'import numpy as np\n'), ((2756, 2784), 'numpy.array', 'np.array', (['[8255, 4479, 5311]'], {}), '([8255, 4479, 5311])\n', (2764, 2784), True, 'import numpy as np\n'), ((1204, 1224), 'h5py.File', 'h5py.File', (['save_path'], {}), '(save_path)\n', (1213, 1224), False, 'import h5py\n'), ((2076, 2096), 'h5py.File', 'h5py.File', (['save_path'], {}), '(save_path)\n', (2085, 2096), False, 'import h5py\n'), ((1360, 1380), 'h5py.File', 'h5py.File', (['save_path'], {}), '(save_path)\n', (1369, 1380), False, 'import h5py\n'), ((2626, 2646), 'numpy.unique', 'np.unique', (['dvid_data'], {}), '(dvid_data)\n', (2635, 2646), True, 'import numpy as np\n')] |
"""Individuals."""
from typing import List, Tuple
import math
import numpy as np
import random
# Custom types
Chromosome = List[float]
SearchSpace = Tuple[float, float]
class AB:
"""Approximated Brachistochrone.
Approximated brachistochrones are the individuals that going to be evolving
during the main algorithm execution. Each AB is represented as a set of
n-1 vertical coordinates representing the overall curve, where n is the
number of evenly distributed "slots" between the starting point and the
final point.
"""
def __init__(
self,
n: int,
y: float,
x: float,
std: float,
search_space: SearchSpace,
chromosome: Chromosome = None
) -> None:
"""AB initialization."""
self.n = n
self.y = y
self.x = x
self.std = std
self.search_space = search_space
self.chromosome = chromosome if chromosome else self.initialize()
def initialize(self) -> Chromosome:
"""Return a real-valued random vector constrained by the search space."""
chromosome = []
for _ in range(self.n - 1):
value = self.y
while value >= self.y:
value = random.randint(*self.search_space)
chromosome.append(value)
return chromosome
@property
def fitness(self) -> float:
"""Compute AB's fitness based on Borschbach-Dreckmann."""
total_sum = 0.0
points = [self.y] + self.chromosome + [0]
bin_width = self.x / self.n
for i in range(self.n):
si = np.sqrt(
((bin_width)**2) +
((points[i+1] - points[i])**2)
)
d = np.sqrt(self.y - points[i]) + np.sqrt(self.y - points[i+1])
total_sum += si/d
return total_sum
| [
"random.randint",
"numpy.sqrt"
] | [((1646, 1704), 'numpy.sqrt', 'np.sqrt', (['(bin_width ** 2 + (points[i + 1] - points[i]) ** 2)'], {}), '(bin_width ** 2 + (points[i + 1] - points[i]) ** 2)\n', (1653, 1704), True, 'import numpy as np\n'), ((1276, 1310), 'random.randint', 'random.randint', (['*self.search_space'], {}), '(*self.search_space)\n', (1290, 1310), False, 'import random\n'), ((1767, 1794), 'numpy.sqrt', 'np.sqrt', (['(self.y - points[i])'], {}), '(self.y - points[i])\n', (1774, 1794), True, 'import numpy as np\n'), ((1797, 1828), 'numpy.sqrt', 'np.sqrt', (['(self.y - points[i + 1])'], {}), '(self.y - points[i + 1])\n', (1804, 1828), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from layers.layers import *
from utils import DynamicRunningStat, LimitedRunningStat, RunningStat
import random
eps = 1e-12
class RND:
# Random Network Distillation class
def __init__(self, sess, input_spec, network_spec_target, network_spec_predictor, obs_to_state, lr=7e-5,
buffer_size=1e5, batch_size=128, num_epochs=3,
motivation_weight=1., obs_normalization=False,
num_itr=3, name='rnd', **kwargs):
# Used to normalize the intrinsic reward due to arbitrary scale
self.r_norm = RunningStat()
self.obs_norm = RunningStat(shape=(9269))
self.obs_normalization = obs_normalization
# The tensorflow session
self.sess = sess
# Model hyperparameters
self.lr = lr
self.buffer_size = buffer_size
self.batch_size = batch_size
self.num_itr = num_itr
self.num_epochs = num_epochs
# Functions that define input and network specifications
self.input_spec = input_spec
self.network_spec_target = network_spec_target
self.network_spec_predictor = network_spec_predictor
self.obs_to_state = obs_to_state
# Weight of the reward output by the motivation model
self.motivation_weight = motivation_weight
# Buffer of experience
self.buffer = []
with tf.compat.v1.variable_scope(name) as vs:
# Input placeholders, they depend on DeepCrawl
self.inputs = self.input_spec()
# Target network, it must remain fixed during all the training
with tf.compat.v1.variable_scope('target'):
# Network specification from external function
self.target = self.network_spec_target(self.inputs)
# Predictor network
with tf.compat.v1.variable_scope('predictor'):
# Network specification from external function
self.predictor = self.network_spec_predictor(self.inputs)
# For fixed target labels, use a placeholder in order to NOT update the target network
_, latent = shape_list(self.target)
self.target_labels = tf.compat.v1.placeholder(tf.float32, [None, latent], name='target_labels')
self.reward_loss = tf.compat.v1.losses.mean_squared_error(self.target_labels, self.predictor)
#self.rewards = tf.math.squared_difference(self.target_labels, self.predictor)
self.rewards = tf.reduce_sum(tf.math.pow(self.target_labels - self.predictor, 2), axis=1)
optimizer = tf.compat.v1.train.AdamOptimizer(self.lr)
gradients, variables = zip(*optimizer.compute_gradients(self.reward_loss))
gradients, _ = tf.compat.v1.clip_by_global_norm(gradients, 1.0)
self.step = optimizer.apply_gradients(zip(gradients, variables))
self.saver = tf.compat.v1.train.Saver(max_to_keep=None)
# Fit function
def train(self):
losses = []
# If we want to use observation normalization, normalize the buffer
if self.obs_normalization:
self.normalize_buffer()
for it in range(self.num_itr):
# for e in range(self.num_epochs):
# num_batches = int(np.ceil(len(self.buffer)/self.batch_size))
# all_index = np.arange(len(self.buffer))
# np.random.shuffle(all_index)
# for b in range(num_batches):
# Take a mini-batch of batch_size experience
mini_batch_idxs = np.random.choice(len(self.buffer), self.batch_size, replace=False)
# mini_batch_idxs = all_index[i*self.batch_size: i*self.batch_size + self.batch_size]
mini_batch = [self.buffer[id] for id in mini_batch_idxs]
# Convert the observation to states
states = self.obs_to_state(mini_batch)
# Create the feed dict for the target network
feed_target = self.create_state_feed_dict(states)
# Get the target prediction (without training it)
target_labels = self.sess.run([self.target], feed_target)[0]
# Get the predictor estimation
feed_predictor = self.create_state_feed_dict(states)
feed_predictor[self.target_labels] = target_labels
# Update the predictor networks
loss, step, rews = self.sess.run([self.reward_loss, self.step, self.rewards], feed_predictor)
losses.append(loss)
# Update Dynamic Running Stat
if isinstance(self.r_norm, DynamicRunningStat):
self.r_norm.reset()
self.buffer = []
# Return the mean losses of all the iterations
return np.mean(losses)
# Eval function
def eval(self, obs):
# Normalize observation
if self.obs_normalization:
self.normalize_states(obs)
# Convert the observation to states
states = self.obs_to_state(obs)
# Create the feed dict for the target network
feed_target = self.create_state_feed_dict(states)
# Get the target prediction (without training it)
target_labels = self.sess.run([self.target], feed_target)[0]
# Get the predictor estimation
feed_predictor = self.create_state_feed_dict(states)
feed_predictor[self.target_labels] = target_labels
# Compute the MSE to use as reward (after normalization)
# Update the predictor networks
rewards = self.sess.run(self.rewards, feed_predictor)
rewards = np.reshape(rewards, (-1))
# Add the rewards to the normalization statistics
if not isinstance(self.r_norm, DynamicRunningStat):
for r in rewards:
self.r_norm.push(r)
return rewards
# Eval function
def eval_latent(self, obs):
# Normalize observation
if self.obs_normalization:
self.normalize_states(obs)
# Convert the observation to states
states = self.obs_to_state(obs)
# Create the feed dict for the target network
feed_target = self.create_state_feed_dict(states)
# Get the target prediction (without training it)
target_labels = self.sess.run([self.target], feed_target)[0]
# Get the predictor estimation
feed_predictor = self.create_state_feed_dict(states)
feed_predictor[self.target_labels] = target_labels
# Compute the MSE to use as reward (after normalization)
# Update the predictor networks
rewards, latents = self.sess.run([self.rewards, self.predictor], feed_predictor)
rewards = np.reshape(rewards, (-1))
# Add the rewards to the normalization statistics
if not isinstance(self.r_norm, DynamicRunningStat):
for r in rewards:
self.r_norm.push(r)
return rewards, latents
# Create a state feed_dict from states
def create_state_feed_dict(self, states):
feed_dict = {}
for i in range(len(states)):
feed_dict[self.inputs[i]] = states[i]
return feed_dict
# Add observation to buffer
def add_to_buffer(self, obs, mode='random'):
if len(self.buffer) >= self.buffer_size:
if mode == 'random':
index = np.random.randint(0, len(self.buffer))
del self.buffer[index]
else:
del self.buffer[0]
if self.obs_normalization:
self.obs_norm.push(obs['global_in'])
self.buffer.append(obs)
# Save the entire model
def save_model(self, name=None, folder='saved'):
tf.compat.v1.disable_eager_execution()
self.saver.save(self.sess, '{}/{}_rnd'.format(folder, name))
return
# Load entire model
def load_model(self, name=None, folder='saved'):
# self.saver = tf.compat.v1.train.import_meta_graph('{}/{}.meta'.format(folder, name))
tf.compat.v1.disable_eager_execution()
self.saver.restore(self.sess, '{}/{}_rnd'.format(folder, name))
print('RND loaded correctly!')
return
# Normalize the buffer state based on the running mean
def normalize_buffer(self):
for state in self.buffer:
state['global_in'] = (state['global_in'] - self.obs_norm.mean) / (self.obs_norm.std + eps)
state['global_in'] = np.clip(state['global_in'], -5, 5)
# Normalize input states based on the running mean
def normalize_states(self, states):
for state in states:
state['global_in'] = (state['global_in'] - self.obs_norm.mean) / (self.obs_norm.std + eps)
state['global_in'] = np.clip(state['global_in'], -5, 5)
# Clear experience buffer
def clear_buffer(self):
self.buffer = []
| [
"utils.RunningStat",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.clip_by_global_norm",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.train.Saver",
"numpy.clip",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.... | [((605, 618), 'utils.RunningStat', 'RunningStat', ([], {}), '()\n', (616, 618), False, 'from utils import DynamicRunningStat, LimitedRunningStat, RunningStat\n'), ((643, 666), 'utils.RunningStat', 'RunningStat', ([], {'shape': '(9269)'}), '(shape=9269)\n', (654, 666), False, 'from utils import DynamicRunningStat, LimitedRunningStat, RunningStat\n'), ((2941, 2983), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (2965, 2983), True, 'import tensorflow as tf\n'), ((4814, 4829), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (4821, 4829), True, 'import numpy as np\n'), ((5655, 5678), 'numpy.reshape', 'np.reshape', (['rewards', '(-1)'], {}), '(rewards, -1)\n', (5665, 5678), True, 'import numpy as np\n'), ((6749, 6772), 'numpy.reshape', 'np.reshape', (['rewards', '(-1)'], {}), '(rewards, -1)\n', (6759, 6772), True, 'import numpy as np\n'), ((7749, 7787), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (7785, 7787), True, 'import tensorflow as tf\n'), ((8054, 8092), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (8090, 8092), True, 'import tensorflow as tf\n'), ((1420, 1453), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['name'], {}), '(name)\n', (1447, 1453), True, 'import tensorflow as tf\n'), ((2237, 2311), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[None, latent]'], {'name': '"""target_labels"""'}), "(tf.float32, [None, latent], name='target_labels')\n", (2261, 2311), True, 'import tensorflow as tf\n'), ((2344, 2418), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.compat.v1.losses.mean_squared_error', (['self.target_labels', 'self.predictor'], {}), '(self.target_labels, self.predictor)\n', (2382, 2418), True, 'import tensorflow as tf\n'), ((2637, 2678), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (2669, 2678), True, 'import tensorflow as tf\n'), ((2793, 2841), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.compat.v1.clip_by_global_norm', (['gradients', '(1.0)'], {}), '(gradients, 1.0)\n', (2825, 2841), True, 'import tensorflow as tf\n'), ((8482, 8516), 'numpy.clip', 'np.clip', (["state['global_in']", '(-5)', '(5)'], {}), "(state['global_in'], -5, 5)\n", (8489, 8516), True, 'import numpy as np\n'), ((8778, 8812), 'numpy.clip', 'np.clip', (["state['global_in']", '(-5)', '(5)'], {}), "(state['global_in'], -5, 5)\n", (8785, 8812), True, 'import numpy as np\n'), ((1657, 1694), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""target"""'], {}), "('target')\n", (1684, 1694), True, 'import tensorflow as tf\n'), ((1877, 1917), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""predictor"""'], {}), "('predictor')\n", (1904, 1917), True, 'import tensorflow as tf\n'), ((2551, 2602), 'tensorflow.math.pow', 'tf.math.pow', (['(self.target_labels - self.predictor)', '(2)'], {}), '(self.target_labels - self.predictor, 2)\n', (2562, 2602), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
import glob, os, shutil, stat, subprocess, sys
import numpy as np
from os.path import expanduser
HOME = expanduser("~")
CWD = os.getcwd()
import socket
hostname = socket.gethostname()
WRKDIRBASE = (os.path.abspath('..') + '/Simulations/')
if 'lxkb' in hostname:
print ('\n*** LAUNCHING FOR KRONOS ***\n')
raise NotImplementedError('to be set up')
else:
sessionname = 'runsims'
print ('\n*** LAUNCHING LOCALLY VIA TMUX SESSION "{}" ***\n'.format(sessionname))
# ==== Simulation Working Directory
WRKDIR = WRKDIRBASE + '2020-01-22--coolstuff/'
# ==== Directory that contains the base cfg file with name filename
SRCDIR = CWD
SUFFIX = 'testing'
# ==== Simulation parameters
Qx = np.linspace(18.55, 18.95, 5)
Qy = np.linspace(18.55, 18.95, 5)
def launch(Qx, Qy, hostname):
wrkdir = create_directories(WRKDIR, SRCDIR)
# Prepare scan
for i, v in enumerate(Qx):
for j, w in enumerate(Qy):
write_scan_files(
wrkdir,
i*len(Qx) + j,
('Qx={0:g}, Qy={1:g}').format(
v, w)
)
if 'lxbk' in hostname:
submit_to_kronos(wrkdir,)
else:
# run locally in tmux windows
subprocess.call(['tmux', 'new', '-d', '-s', sessionname, 'ipython'])
# trigger first simulation run:
subprocess.call(['tmux', 'send', '-t', sessionname,
'!touch {}/Output/finished0'.format(wrkdir), 'Enter'])
for v in Qx:
for w in Qy:
subprocess.call(['tmux', 'new-window', '-t', sessionname])
tmux_window_id = subprocess.check_output(
['tmux', 'display-message', '-p', '#I']).decode('utf8').rstrip()
# log everything:
subprocess.call(['tmux', 'pipe-pane', '-t', sessionname,
'cat>{}/Output/tmux.{}.log'.format(
wrkdir, tmux_window_id)])
subprocess.call(['tmux', 'send', '-t', sessionname,
'# running at Qx {:g} and Qy {:g}'.format(v, w),
'Enter'])
subprocess.call(['tmux', 'send', '-t', sessionname,
'cd {}'.format(wrkdir), 'Enter'])
subprocess.call(['tmux', 'send', '-t', sessionname,
# wait until the previous run has finished:
'while [ ! -e Output/finished{0} ]; do sleep 1; done; '
# run the simulation:
'python task.{1}.py && '
# trigger next run after finishing this one:
'touch Output/finished{1}'.format(
int(tmux_window_id) - 1, tmux_window_id),
'Enter'])
def create_directories(wrkdir, srcdir, casdir=None, locdir=None):
# Catch potential slash at end of path
if srcdir.split('/')[-1] == '':
extension = srcdir.split('/')[-2]
else:
extension = srcdir.split('/')[-1]
# Make directories
newrkdir = wrkdir + '/' + extension + '_' + SUFFIX
if os.path.exists(newrkdir):
while True:
ans = raw_input('\nWARNING: Path ' + newrkdir +
' already exists! Overwrite? [yes or no]\n')
if ans in ('y', 'ye', 'yes'):
shutil.rmtree(newrkdir)
break
if ans in ('n', 'no'):
print ('\nAborting...')
exit(0)
print ('\nPlease answer "yes" or "no"!')
shutil.copytree(srcdir, newrkdir)
os.mkdir(newrkdir + '/Data')
os.mkdir(newrkdir + '/Output')
return newrkdir
def write_scan_files(wrkdir, it, kwargstr):
with open(wrkdir + '/task.' + str(it + 1) + '.py', 'wt') as file:
file.write('import main\n\n')
file.write('main.it=' + str(it + 1) + '\n')
file.write('main.outputpath="' + wrkdir + '/Data"\n')
file.write('print ("****** Running at ' + kwargstr + '!")\n\n')
file.write('main.run(' + kwargstr + ')\n\n')
def submit_to_kronos():
pass
# similar to bsub_to_hpcbatch below for example..
"""
def bsub_to_hpcbatch(wrkdir, jobmin=1, jobmax=1, libraries=None,
prefix='', casdir=None, locdir=None):
os.chdir(wrkdir)
with open('myjob.lsf', 'w') as file:
file.write('#!/bin/bash')
# file.write('\nmodule load compilers/cuda-8.0')
file.write('\nmodule load compilers/cuda-7.5')
file.write('\nnvcc --version')
file.write('\nnvidia-smi')
file.write('\nexport PATH="/home/HPC/oeftiger/anaconda/bin:$PATH"')
file.write('\nwhich python')
file.write('\n\ncd ' + wrkdir)
file.write('\n\nulimit -c 0')
file.write('\n\npython tmppyheadtail.$LSB_JOBINDEX')
file.write('\nls -l')
file.write('\n\necho -e "\\n\\n******** LSF job successfully completed!"')
# file.write('\n\necho -e "\\n******** Now copying output files..."')
# file.write('\ncp *.h5 ' + wrkdir + '/Data')
file.write('\necho -e "HOSTNAME: "')
file.write('\nhostname')
file.write('\necho -e "\\n"')
file.write('\ncat /proc/cpuinfo')
file.write('\necho -e "\\n*** DEBUG END ****"')
print ('\n*** Submitting jobs ' + prefix + ' to LSF...')
for i in range(int(jobmax / JCHUNK) + 1):
a = i * JCHUNK + 1
b = (i + 1) * JCHUNK
if b > jobmax: b = jobmax
lsfcommand = ['bsub', '-L /bin/bash', '-N ',
'-e ' + wrkdir + '/Output/stderror.%J.%I.log ',
'-o ' + wrkdir + '/Output/stdout.%J.%I.log',
'-J ' + prefix + '[' + str(a) + '-' + str(b) + ']',
'-u ' + EMAIL, '-q ' + QUEUE, '< myjob.lsf']
if EXTRA_ARGS:
lsfcommand.insert(1, EXTRA_ARGS)
if PARALLEL:
lsfcommand.insert(1, '-n 8 -R "span[hosts=1]"')
lsfcommand = ' '.join(lsfcommand)
print ('Executing submission with command: ' + lsfcommand)
with open('launch' + str(i + 1), 'wt') as file:
file.write("#!/bin/bash\n")
for lsfc in lsfcommand:
file.write(lsfc)
os.chmod("launch" + str(i + 1), 0777)
subprocess.call("./launch" + str(i + 1))
"""
if __name__ == "__main__":
launch(Qx, Qy, hostname)
| [
"os.mkdir",
"os.path.abspath",
"shutil.rmtree",
"os.getcwd",
"subprocess.check_output",
"os.path.exists",
"socket.gethostname",
"subprocess.call",
"numpy.linspace",
"shutil.copytree",
"os.path.expanduser"
] | [((127, 142), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (137, 142), False, 'from os.path import expanduser\n'), ((150, 161), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (159, 161), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((188, 208), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (206, 208), False, 'import socket\n'), ((722, 750), 'numpy.linspace', 'np.linspace', (['(18.55)', '(18.95)', '(5)'], {}), '(18.55, 18.95, 5)\n', (733, 750), True, 'import numpy as np\n'), ((756, 784), 'numpy.linspace', 'np.linspace', (['(18.55)', '(18.95)', '(5)'], {}), '(18.55, 18.95, 5)\n', (767, 784), True, 'import numpy as np\n'), ((224, 245), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (239, 245), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((3318, 3342), 'os.path.exists', 'os.path.exists', (['newrkdir'], {}), '(newrkdir)\n', (3332, 3342), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((3758, 3791), 'shutil.copytree', 'shutil.copytree', (['srcdir', 'newrkdir'], {}), '(srcdir, newrkdir)\n', (3773, 3791), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((3796, 3824), 'os.mkdir', 'os.mkdir', (["(newrkdir + '/Data')"], {}), "(newrkdir + '/Data')\n", (3804, 3824), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((3829, 3859), 'os.mkdir', 'os.mkdir', (["(newrkdir + '/Output')"], {}), "(newrkdir + '/Output')\n", (3837, 3859), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((1294, 1362), 'subprocess.call', 'subprocess.call', (["['tmux', 'new', '-d', '-s', sessionname, 'ipython']"], {}), "(['tmux', 'new', '-d', '-s', sessionname, 'ipython'])\n", (1309, 1362), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((1606, 1664), 'subprocess.call', 'subprocess.call', (["['tmux', 'new-window', '-t', sessionname]"], {}), "(['tmux', 'new-window', '-t', sessionname])\n", (1621, 1664), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((3555, 3578), 'shutil.rmtree', 'shutil.rmtree', (['newrkdir'], {}), '(newrkdir)\n', (3568, 3578), False, 'import glob, os, shutil, stat, subprocess, sys\n'), ((1698, 1762), 'subprocess.check_output', 'subprocess.check_output', (["['tmux', 'display-message', '-p', '#I']"], {}), "(['tmux', 'display-message', '-p', '#I'])\n", (1721, 1762), False, 'import glob, os, shutil, stat, subprocess, sys\n')] |
"""
generate_plots_memory.py is a Python routine that can be used to generate
the plots of <NAME>, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses".
It reads the pickle run variables that can be generated by the routine
initialize_memory.py.
The function run() executes the code.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
# get working directory, where the runs and routines should be stored
dir0 = os.getcwd() + '/'
HOME = dir0 + '/..'
os.chdir(HOME)
from dirs import read_dirs as rd
import run as r
import plot_sets
import spectra
import cosmoGW
import interferometry as inte
import pta
os.chdir(dir0)
def run():
os.chdir(HOME)
# import dictionary with the names identifying
# the runs and pointing to the corresponding directory
dirs = rd('memory_nonhelical_b73')
dirs = rd('memory_nonhelical_b27')
dirs = rd('memory_helical_b73')
dirs = rd('memory_helical_b27')
dirs = rd('memory_helical_b17')
dirs = rd('memory_helical_toff')
dirs = rd('memory_nonhelical_toff')
R = [s for s in dirs]
# read the runs stored in the pickle variables
runs = r.load_runs(R, dir0, dirs, quiet=False)
os.chdir(dir0)
return runs
def generate_table_pars(runs, save=True):
"""
Function that generates the Table I of <NAME>, <NAME>, and
<NAME>, "Leading-order nonlinear gravitational waves from
reheating magnetogeneses" that contains the parameters regarding
the end of reheating for each type of simulations.
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the table in tableI.csv
(default True)
"""
import pandas as pd
# choose 1 run of each of the series
names = np.array(['A', 'B', 'C', 'D', 'E'])
runA = runs.get('A1_l')
runB = runs.get('B1_l')
runC = runs.get('C1_l')
runD = runs.get('D1_l')
runE = runs.get('E1_l')
rrs = [runA, runB, runC, runD, runE]
Ts = []
gammas = []
betas = []
gs = []
rat_Hs = []
rat_as = []
rat_Has = []
for i in rrs:
pars = i.pars
g = pars[1]
T = pars[0]
H0 = cosmoGW.H0_val(h0=1.)
Hs = cosmoGW.Hs_val(g, T*u.GeV)
rat_H = Hs/H0
rat_a = cosmoGW.as_a0_rat(g, T*u.GeV)
rat_Ha = rat_H**2*rat_a**4
if pars[0] > 1: T = '%i'%pars[0]
else: T = '%.2f'%pars[0]
Ts.append(T)
gs.append('%i'%g)
gammas.append('%i'%pars[2])
betas.append(pars[3])
rat_Hs.append(rat_H)
rat_as.append(rat_a)
rat_Has.append('%.4e'%rat_Ha)
Ts = np.array(Ts)
gammas = np.array(gammas)
betas = np.array(betas)
gs = np.array(gs)
rat_Hs = np.array(rat_Hs)
rat_as = np.array(rat_as)
rat_Has = np.array(rat_Has)
df = pd.DataFrame({'name': names, 'Tr [GeV]': Ts, 'gamma': gammas,
'beta': betas, 'g': gs,
'(Hs/H0)^2 (as/a0)^4': rat_Has})
if save: df.to_csv('tableI.csv')
return df
def generate_table(runs, save=True, print_tex=False):
"""
Function that generates the Table II of <NAME>, <NAME>, and
<NAME>, "Leading-order nonlinear gravitational waves from
reheating magnetogeneses" that contains the relevant results of the runs.
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the table in tableII.csv
(default True)
"""
import pandas as pd
import cosmoGW
name = []
B0s = []
kstars = []
EM_ar = []
EGW_ar = []
DEGW_ar = []
rat_DEGW_ar = []
hr_ar = []
Dhr_ar = []
rat_Dhr_ar = []
pol_ar = []
Dpol_ar = []
rat_Dpol_ar = []
for i in runs:
run = runs.get(i)
t = run.ts.get('t')
indt = np.argmin(abs(t - 1.))
EEM = run.ts.get('EEEM')[indt]
if EEM > .2: EMM = '%.0f'%EEM
elif EEM > .05: EMM = '%.1f'%EEM
else: EMM = '%.2f'%EEM
tk = run.spectra.get('t_EGW')
indt = np.argmin(abs(tk - 1))
k = run.spectra.get('k')
if 'toff' in run.name_run:
GW = np.trapz(run.spectra.get('EGW_stat'), k)
hc = np.sqrt(np.trapz(np.trapz(run.spectra.get('GWh')\
[np.where(tk > 2)], tk[np.where(tk > 2)], axis=0)/ \
(tk[-1] - tk[np.where(tk > 2)][0]), k))
XiGW = np.trapz(run.spectra.get('helEGW_stat'), k)
else:
GW = np.trapz(run.spectra.get('EGW')[indt, :], k)
hc = np.sqrt(np.trapz(run.spectra.get('GWh')[indt, :], k))
XiGW = np.trapz(run.spectra.get('helEGW')[indt, :], k)
pol_l = XiGW/GW
if '_l' in run.name_run:
B0s.append('%.1e'%run.B0)
EM_ar.append(EMM)
kss = cosmoGW.ks_infla(run.pars[3], run.pars[2], eta=1)
kstars.append('%.1f'%kss)
EGW_ar.append('%.6e'%GW)
hr_ar.append(hc)
nmm = run.name_run.replace('_l', '')
if 'toff' in nmm: ser = nmm.replace('_toff', '') + "'"
else: ser = nmm
name.append(ser)
diffEGW = GW_nl - GW
DEGW_ar.append('%.6e'%diffEGW)
rdiffEGW = diffEGW/GW_nl
rat_DEGW_ar.append('%.6e'%rdiffEGW)
Dhr_ar.append(hc_nl - hc)
rat_Dhr_ar.append((hc_nl - hc)/hc_nl)
diff_pol = pol_l - pol_nl
Dpol_ar.append('%.6e'%diff_pol)
rdiff_pol = diff_pol/pol_l
rat_Dpol_ar.append('%.6e'%rdiff_pol)
else:
tk = run.spectra.get('t_EGW')
indt = np.argmin(abs(tk - 1))
k = run.spectra.get('k')
GW_nl = np.trapz(run.spectra.get('EGW')[indt, :], k)
XiGW_nl = np.trapz(run.spectra.get('helEGW')[indt, :], k)
hc_nl = np.sqrt(np.trapz(run.spectra.get('GWh')[indt, :], k))
if 'toff' in run.name_run:
GW_nl = np.trapz(run.spectra.get('EGW_stat'), k)
hc_nl = np.sqrt(np.trapz(np.trapz(run.spectra.get('GWh')\
[np.where(tk > 2)], tk[np.where(tk > 2)],
axis=0)/(tk[-1] - \
tk[np.where(tk > 2)][0]), k))
XiGW_nl = np.trapz(run.spectra.get('helEGW_stat'), k)
pol_nl = XiGW_nl/GW_nl
pol_ar.append('%.6f'%pol_nl)
name = np.array(name)
inds = np.argsort(name)
name = name[inds]
B0s = np.array(B0s)[inds]
kstars = np.array(kstars)[inds]
EM_ar = np.array(EM_ar)[inds]
EGW_ar = np.array(EGW_ar)[inds]
DEGW_ar = np.array(DEGW_ar)[inds]
rat_DEGW_ar = np.array(rat_DEGW_ar)[inds]
hr_ar = np.array(hr_ar)[inds]
Dhr_ar = np.array(Dhr_ar)[inds]
rat_Dhr_ar = np.array(rat_Dhr_ar)[inds]
pol_ar = np.array(pol_ar)[inds]
Dpol_ar = np.array(Dpol_ar)[inds]
rat_Dpol_ar = np.array(rat_Dpol_ar)[inds]
df = pd.DataFrame({'name': name, 'B0': B0s, 'EM': EM_ar,
'k_* (1)': kstars, 'EGW': EGW_ar,
'Del EGW': DEGW_ar, 'ratio Del EGW': rat_DEGW_ar,
'pol': pol_ar, 'Del pol': Dpol_ar,
'ratio Del pol': rat_Dpol_ar})
if save: df.to_csv('tableII.csv')
if print_tex:
EM = np.array(df['EM'], dtype='float')
EGW = np.array(df['EGW'], dtype='float')
DEGW = np.array(df['Del EGW'], dtype='float')
rDEGW = np.array(df['ratio Del EGW'], dtype='float')
PGW = np.array(df['pol'], dtype='float')
DPGW = np.array(df['Del pol'], dtype='float')
rDPGW = np.array(df['ratio Del pol'], dtype='float')
for i in range(0, len(name)):
nmm = name[i]
col0 = ''
col1 = ''
if "'" in nmm:
ser = nmm.replace("'", '_l_toff')
run = runs.get(ser)
ser = '\,' + nmm
if 'A' in nmm: col0 = '\\blue{'
if 'B' in nmm: col0 = '\\green{'
if 'C' in nmm: col0 = '\\orange{'
if 'D' in nmm: col0 = '\\red{'
if 'E' in nmm: col0 = '\\purple{'
col1 = '}'
else:
run = runs.get(nmm + '_l')
ser = nmm
ser = col0 + ser + col1
pars = run.pars
exp_B0 = np.floor(np.log10(run.B0))
bas_B0 = run.B0/10**exp_B0
exp_EGW = np.floor(np.log10(EGW[i]))
bas_EGW = EGW[i]/10**exp_EGW
exp_DEGW = np.floor(np.log10(DEGW[i]))
bas_DEGW = DEGW[i]/10**exp_DEGW
exp_rDEGW = np.floor(np.log10(rDEGW[i]))
bas_rDEGW = rDEGW[i]/10**exp_rDEGW
exp_DPGW = np.floor(np.log10(abs(DPGW[i])))
bas_DPGW = DPGW[i]/10**exp_DPGW
exp_rDPGW = np.floor(np.log10(abs(rDPGW[i])))
bas_rDPGW = rDPGW[i]/10**exp_rDPGW
if pars[0] > 1000:
T_exp = np.floor(np.log10(pars[0]))
T_bas = pars[0]/10**T_exp
T = "$%i \\times 10^{%i}$"%(T_bas, T_exp)
elif pars[0] > 1: T = '%i'%pars[0]
else: T = '%.2f'%pars[0]
if EM[i] > .2: EMM = '%.0f'%EM[i]
elif EM[i] > .05: EMM = '%.1f'%EM[i]
else: EMM = '%.2f'%EM[i]
aux = ''
if 'B4' in nmm: aux = '\\hline'
kstar = cosmoGW.ks_infla(pars[3], pars[2], eta=1)
print(ser, '&', # T, '&',
col0 + "$%.1f \\times 10^{%i}$"%(bas_B0, exp_B0) + col1, '&',
col0 + '%s'%EMM + col1, '&',
col0 + '$%.1f$'%kstar + col1, '&',
col0 + "$%.1f \\times 10^{%i}$"%(bas_EGW, exp_EGW) + col1,
'&')
print(col0 + "$%.1f \\times 10^{%i}$"%(bas_DEGW, exp_DEGW) + col1,
'&',
col0 + "$%.1f \\times 10^{%i}$"%(bas_rDEGW, exp_rDEGW) + col1,
'&')
print(col0 + '$%.3f$'%PGW[i] + col1, '&',
col0 + "$%.1f \\times 10^{%i}$"%(bas_DPGW, exp_DPGW) + col1,
'&',
col0 + "$%.1f \\times 10^{%i}$"%(bas_rDPGW, exp_rDPGW) + \
col1,
'\\\\' + aux)
return df
def select_runs(runs, A='A'):
"""
Function that returns linear and nonlinear runs corresponding to the
type of simulations.
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
A -- option to chose the type of runs to be plotted (default 'A',
other options are 'B', 'C', 'D')
Returns:
runs_l -- array with linear run variables
runs_nl -- array with nonlinear run variables
col -- color corresponding to A for plots
"""
col = 'blue'
if A == 'A':
run1_l = runs.get('A1_l')
run1_nl = runs.get('A1_nl')
run2_l = runs.get('A2_l')
run2_nl = runs.get('A2_nl')
run3_l = runs.get('A3_l')
run3_nl = runs.get('A3_nl')
run4_l = runs.get('A4_l')
run4_nl = runs.get('A4_nl')
if A == 'B':
run1_l = runs.get('B1_l')
run1_nl = runs.get('B1_nl')
run2_l = runs.get('B2_l')
run2_nl = runs.get('B2_nl')
run3_l = runs.get('B3_l')
run3_nl = runs.get('B3_nl')
run4_l = runs.get('B4_l')
run4_nl = runs.get('B4_nl')
col = 'darkgreen'
if A == 'C':
run1_l = runs.get('C1_l')
run1_nl = runs.get('C1_nl')
run2_l = runs.get('C2_l')
run2_nl = runs.get('C2_nl')
run3_l = runs.get('C3_l')
run3_nl = runs.get('C3_nl')
run4_l = runs.get('C4_l')
run4_nl = runs.get('C4_nl')
col = 'orange'
if A == 'D':
run1_l = runs.get('D1_l')
run1_nl = runs.get('D1_nl')
run2_l = runs.get('D2_l')
run2_nl = runs.get('D2_nl')
run3_l = runs.get('D3_l')
run3_nl = runs.get('D3_nl')
run4_l = runs.get('D4_l')
run4_nl = runs.get('D4_nl')
col = 'red'
if A == 'E':
run1_l = runs.get('E1_l')
run1_nl = runs.get('E1_nl')
run2_l = runs.get('E2_l')
run2_nl = runs.get('E2_nl')
run3_l = runs.get('E3_l')
run3_nl = runs.get('E3_nl')
col = 'purple'
if A != 'E':
runs_l = [run1_l, run2_l, run3_l, run4_l]
runs_nl = [run1_nl, run2_nl, run3_nl, run4_nl]
else:
runs_l = [run1_l, run2_l, run3_l]
runs_nl = [run1_nl, run2_nl, run3_nl]
return runs_l, runs_nl, col
def plot_EGW(runs, A='A', diff=False, save=True):
"""
Function that plots the resulting GW energy density spectrum at the end
of inflation (reheating) and compares the result from linear theory to
the result after adding the leading-order non-linear term (memory effect).
It generates the plots corresponding to figure 1 of
<NAME>, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses."
It generates the left panels if diff = False and the right panels if
diff = True
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
diff -- option to plot the EGW spectrum or the difference between
linear and nonlinear when diff = True (default False)
A -- option to chose the type of runs to be plotted (default 'A',
other options are 'B', 'C', 'D')
save -- option to save the plot in plots/EGW_k_'A'_'diff'.pdf
(default True)
"""
fig, ax = plt.subplots(figsize=(12, 8))
plot_sets.axes_lines()
if diff: ax2 = ax.twinx()
plot_sets.axes_lines(both=False)
# chose linear and nonlinear runs corresponding to A
runs_l, runs_nl, col = select_runs(runs, A=A)
EEM = [0.02, 0.1, 1, 10]
if diff:
for i in range(0, len(runs_l)):
run_l = runs_l[i]
t_l = run_l.spectra.get('t_EGW')
ind_tl = np.argmin(abs(t_l - 1.))
if abs(t_l[ind_tl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(run_l.name_run, t_l[ind_tl]))
EGW_l = run_l.spectra.get('EGW')[ind_tl, 1:]
k = run_l.spectra.get('k')[1:]
run_nl = runs_nl[i]
t_nl = run_nl.spectra.get('t_EGW')
ind_tnl = np.argmin(abs(t_nl - 1.))
if abs(t_nl[ind_tnl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(run.name_run, t_l[ind_tnl]))
EGW_nl = run_nl.spectra.get('EGW')[ind_tnl, 1:]
dif = abs(EGW_nl - EGW_l)
ax.plot(k, dif, color=col, alpha = .1 + i*.3,
label=r'${\cal E}_{\rm EM} = %.2f$'%EEM[i])
good = np.where(EGW_l != 0)
ax2.plot(k, dif/EGW_nl[good], '.', color=col,
alpha = .15 + i*.15)
ax2.set_ylim(1e-5, 2.)
else:
j = 0
for i in runs_l:
t_l = i.spectra.get('t_EGW')
ind_tl = np.argmin(abs(t_l - 1.))
if abs(t_l[ind_tl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(i.name_run, t_l[ind_tl]))
EGW_l = i.spectra.get('EGW')[ind_tl, 1:]
k = i.spectra.get('k')[1:]
ax.plot(k, EGW_l, color=col, alpha = .1 + j*.3,
label=r'${\cal E}_{\rm EM} = %.2f$'%EEM[j])
j += 1
j = 0
for i in runs_nl:
t_nl = i.spectra.get('t_EGW')
ind_tnl = np.argmin(abs(t_nl - 1.))
if abs(t_nl[ind_tnl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(i.name_run, t_nl[ind_tnl]))
EGW_nl = i.spectra.get('EGW')[ind_tnl, 1:]
k = i.spectra.get('k')[1:]
ax.plot(k, EGW_nl, color=col, ls='--', alpha=.1 + j*.3)
j += 1
fs = 32
if A == 'A':
xx = np.linspace(1.2, 5)
ax.plot(xx, 1e-8*xx, color=col, ls='-.', lw=.8)
ax.text(1.7, 1e-11, r'$\sim\!k$', color=col, fontsize=fs)
xx = np.linspace(15, 60)
ax.plot(xx, 1e-12*(xx/10)**(-32), color=col, ls='-.', lw=.8)
ax.text(14, 1e-30, r'$\sim\!k^{-32}$', color=col, fontsize=fs)
if A == 'B':
xx = np.linspace(1.15, 3)
ax.plot(xx, 1e-6*xx, color=col, ls='-.', lw=.8)
ax.text(1.3, 1e-8, r'$\sim\!k$', color=col, fontsize=fs)
xx = np.linspace(10, 100)
ax.plot(xx, 2e-1*(xx/10)**(-10), color=col, ls='-.', lw=.8)
ax.text(27, 6e-5, r'$\sim\!k^{-10}$', color=col, fontsize=fs)
if A == 'C':
xx = np.linspace(1.4, 20)
ax.plot(xx, 1e-10*xx**1.5, color=col, ls='-.', lw=.8)
ax.text(3.3, 2e-13, r'$\sim\!k^{3/2}$', color=col, fontsize=fs)
xx = np.linspace(30, 100)
ax.plot(xx, 1e10*(xx/10)**(-45), color=col, ls='-.', lw=.8)
ax.text(22, 1e-24, r'$\sim\!k^{-45}$', color=col, fontsize=fs)
if A == 'D':
xx = np.linspace(1.25, 8)
ax.plot(xx, 1e-8*xx**1.5, color=col, ls='-.', lw=.8)
ax.text(2., 1e-10, r'$\sim\!k^{3/2}$', color=col, fontsize=fs)
xx = np.linspace(11, 50)
ax.plot(xx, 1e-7*(xx/10)**(-15), color=col, ls='-.', lw=.8)
ax.text(10, 6e-15, r'$\sim\!k^{-15}$', color=col, fontsize=fs)
if A == 'E':
xx = np.linspace(.2, 2.5)
ax.plot(xx, 4e-7*xx**1.5, color=col, ls='-.', lw=.8)
ax.text(.6, 1.3e-8, r'$\sim\!k^{3/2}$', color=col, fontsize=fs)
xx = np.linspace(8, 50)
ax.plot(xx, 3e-3*xx**(-4), color=col, ls='-.', lw=.8)
ax.text(10, 7e-10, r'$\sim\!k^{-4}$', color=col, fontsize=fs)
if not diff:
ax.legend(fontsize=24, loc='lower left', frameon=False)
ax.set_yscale('log')
ax.set_xscale('log')
if A == 'E': ax.set_xlim(.1, 9e1)
else: ax.set_xlim(1, 300)
if diff: ax2.set_yscale('log')
run = runs_l[0]
if run.pars[2] == 0: h = 'non-helical'
else: h = 'helical'
b = run.pars[3]
if A == 'A' or A == 'C' or A == 'E':
if diff:
if A == 'E':
ax.set_ylim(1e-34, 1e2)
ax.set_yticks(np.logspace(-34, 2, 10))
else:
ax.set_ylim(1e-40, 1e2)
ax.set_yticks(np.logspace(-46, 2, 13))
else:
if A == 'E':
ax.set_ylim(1e-14, 1e1)
ax.set_yticks(np.logspace(-14, 2, 9))
else:
ax.set_ylim(1e-42, 1e2)
ax.set_yticks(np.logspace(-42, 2, 12))
else:
if diff:
ax.set_ylim(1e-32, 1e2)
ax.set_yticks(np.logspace(-34, 2, 10))
else:
ax.set_ylim(1e-30, 1e2)
ax.set_yticks(np.logspace(-30, 2, 9))
if not diff: ax.set_title(r'Series %s: %s runs with $\beta = %.1f$' \
%(A, h, b), pad=15)
ax.set_xlabel('$k$')
if diff:
ax.set_ylabel(r'$|\Delta E_{\rm GW} (k)|$')
ax2.set_ylabel(r'$|\Delta E_{\rm GW} (k)|$' + \
r'$/E_{\rm GW}^{\rm nlin} (k)$')
else: ax.set_ylabel(r'$E_{\rm GW} (k)$')
dff = ''
if diff: dff = '_diff'
if save: plt.savefig('plots/' + 'EGW_k_' + A + dff + '.pdf',
bbox_inches='tight')
def plot_PGW(runs, A='A', save=True):
"""
Function that plots the resulting GW polarization spectrum at the end
of inflation (reheating) and compares the result from linear theory to
the result after adding the leading-order non-linear term (memory effect).
It generates the plots corresponding to figure 2 of
<NAME>, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses".
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
A -- option to chose the type of runs to be plotted (default 'A',
other options are 'B', 'C', 'D')
save -- option to save the plot in plots/PGW_k_'A'.pdf (default True)
"""
plt.figure(figsize=(12, 8))
# chose linear and nonlinear runs corresponding to A
runs_l, runs_nl, col = select_runs(runs, A=A)
EEM = [0.02, 0.1, 1, 10]
j = 0
for i in runs_l:
t_l = i.spectra.get('t_EGW')
ind_tl = np.argmin(abs(t_l - 1.))
if abs(t_l[ind_tl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(i.name_run, t_l[ind_tl]))
EGW_l = i.spectra.get('EGW')[ind_tl, 1:]
XiGW_l = i.spectra.get('helEGW')[ind_tl, 1:]
k = i.spectra.get('k')[1:]
good = np.where(EGW_l != 0)
plt.plot(k[good], XiGW_l[good]/EGW_l[good],
color=col, alpha=.1 + j*.3,
label=r'${\cal E}_{\rm EM} = %.2f$'%EEM[j])
j += 1
j = 0
for i in runs_nl:
t_nl = i.spectra.get('t_EGW')
ind_tnl = np.argmin(abs(t_nl - 1.))
if abs(t_nl[ind_tnl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(i.name_run, t_nl[ind_tnl]))
EGW_nl = i.spectra.get('EGW')[ind_tnl, 1:]
XiGW_nl = i.spectra.get('helEGW')[ind_tnl, 1:]
k = i.spectra.get('k')[1:]
good = np.where(EGW_nl != 0)
plt.plot(k[good], XiGW_nl[good]/EGW_nl[good], '--',
color=col, alpha=.1 + j*.3)
j += 1
run = runs_l[0]
if run.pars[2] == 0: h = 'non-helical'
else: h = 'helical'
b = run.pars[3]
plt.title(r'Series %s: %s runs with $\beta = %.1f$'%(A, h, b), pad=15)
plt.xscale('log')
plt.xlim(1, 300)
plt.ylim(-.2, 1.1)
locc = 'lower left'
if A == 'C': locc = 'lower center'
if A == 'E': locc = 'lower right'
plt.legend(loc=locc, fontsize=20, frameon=False)
if A == 'E': plt.xlim(.2, 100)
plot_sets.axes_lines()
plt.xlabel('$k$')
plt.ylabel(r'${\cal P}_{\rm GW} (k)$')
ax = plt.gca()
ax.tick_params(axis='x', pad=15)
if save: plt.savefig('plots/' + 'PGW_k_' + A + '.pdf',
bbox_inches='tight')
def plot_EEGW_vs_EEM(runs, save=True, plot=True, print_tex=False):
"""
Function that plots the total GW energy density produced as a function
of the maximum electromagnetic energy density, both for the linearized
GW equation and the nonlinear contribution corresponding to the
nonlinear effect.
It generates the plot corresponding to figure 3 and the tables
corresponding to table III of <NAME>, <NAME>, and
<NAME>, "Leading-order nonlinear gravitational waves from
reheating magnetogeneses".
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the plot in plots/EEGW_EEM.pdf and
plots/DEEGW_EEM.pdf (default True)
"""
import pandas as pd
df = generate_table(runs, save=False)
df_pars = generate_table_pars(runs, save=False)
EEM = np.array(df['EM'], dtype='float')
EGW = np.array(df['EGW'], dtype='float')
delEGW = np.array(df['Del EGW'], dtype='float')
name = np.array(df['name'])
delpol = np.array(df['ratio Del pol'], dtype='float')
betas = np.array(df_pars['beta'], dtype='float')
gammas = np.array(df_pars['gamma'], dtype='float')
col_A = 'blue'
col_B = 'darkgreen'
col_C = 'orange'
col_D = 'red'
col_E = 'purple'
def plot_runA(q, A='A', exp=2, d='two', plot=True,
qst='q', kk=True, sc=False, col='blue'):
bet = np.array(df_pars['beta'][np.where(df_pars['name'] == A)[0]],
dtype='float')[0]
gam = np.array(df_pars['gamma'][np.where(df_pars['name'] == A)[0]],
dtype='float')[0]
kstar = cosmoGW.ks_infla(bet, gam)
if plot:
if d == 'two': st = '%.2f'%q
if d == 'three': st = '%.3f'%q
if sc:
xp = np.floor(np.log10(q))
base = q/10**xp
if d == 'two': st = r'%.1f \times 10^{%i}'%(base, xp)
if d == 'three': st = r'%.2f \times 10^{%i}'%(base, xp)
lbl = r'$%s = %s$'%(qst, st)
if kk: lbl += ', $k_* = %.1f$'%kstar
plt.plot(xx, (q*xx)**exp, color=col, lw=.8,
label=lbl)
return q*kstar
if plot:
plt.figure(1, figsize=(12, 8))
plt.figure(2, figsize=(12, 8))
plt.figure(3, figsize=(12, 8))
# plot scatter points of EGW, Delta EGW and Delta PGW/PGW vs EM
f_col = 'none'
for i in range(0, len(EEM)):
if 'A' in name[i]:
col = col_A
if '2' in name[i]:
if "'" in name[i]:
ratA = np.sqrt(EGW[i])/EEM[i]
ratA1 = (delEGW[i])**(1/3)/EEM[i]
ratA2 = delpol[i]/EEM[i]
if 'B' in name[i]:
col = col_B
if '2' in name[i]:
if "'" in name[i]:
ratB = np.sqrt(EGW[i])/EEM[i]
ratB1 = (delEGW[i])**(1/3)/EEM[i]
ratB2 = delpol[i]/EEM[i]
if 'C' in name[i]:
col = col_C
f_col = col
if '2' in name[i]:
if "'" in name[i]:
ratC = np.sqrt(EGW[i])/EEM[i]
ratC1 = (delEGW[i])**(1/3)/EEM[i]
ratC2 = delpol[i]/EEM[i]
if 'D' in name[i]:
col = col_D
f_col = col_D
if '2' in name[i]:
if "'" in name[i]:
ratD = np.sqrt(EGW[i])/EEM[i]
ratD1 = (delEGW[i])**(1/3)/EEM[i]
ratD2 = delpol[i]/EEM[i]
if 'E' in name[i]:
col = col_E
f_col = col_E
if '2' in name[i]:
if "'" in name[i]:
ratE = np.sqrt(EGW[i])/EEM[i]
ratE1 = (delEGW[i])**(1/3)/EEM[i]
ratE2 = delpol[i]/EEM[i]
if plot:
if "'" not in name[i]:
plt.figure(1)
plt.scatter(EEM[i], EGW[i], facecolors=f_col, color=col)
plt.figure(2)
plt.scatter(EEM[i], delEGW[i], facecolors=f_col, color=col)
plt.figure(3)
plt.scatter(EEM[i], abs(delpol[i]), facecolors=f_col,
color=col)
else:
plt.figure(1)
plt.plot(EEM[i], EGW[i], 'x', color=col)
plt.figure(2)
plt.plot(EEM[i], delEGW[i], 'x', color=col)
plt.figure(3)
plt.plot(EEM[i], abs(delpol[i]), 'x', color=col)
xx2 = np.logspace(-1, 2)
xx = np.logspace(-3, 3)
xx3 = np.logspace(-1.8, -.9)
# plot quadratic lines of EGW vs EM and obtain coefficients q and q tilde
if plot: plt.figure(1)
q = [.18, .52, .08, .21, .36]
AA = ['A', 'B', 'C', 'D', 'E', "A'", "B'", "C'",
"D'", "E'"]
cols = [col_A, col_B, col_C, col_D, col_E]
q.append(ratA)
q.append(ratB)
q.append(ratC)
q.append(ratD)
q.append(ratE)
q = np.array(q)
qt = np.zeros(10)
for l in range(0, 5):
qt[l] = plot_runA(q[l], A=AA[l], col=cols[l], plot=plot)
qt[l + 5] = plot_runA(q[l + 5], A=AA[l], plot=False)
if plot: plt.plot(xx3, (q[l + 5]*xx3)**2, ls='-.',
color=cols[l], lw=.8)
if plot:
plt.legend(loc='upper left', fontsize=20, frameon=False)
plt.text(1, 5e-5, r'${\cal E}_{\rm GW} = (q {\cal E}_{\rm EM})^2$',
fontsize=26)
plot_sets.axes_lines()
plt.xlabel(r'${\cal E}_{\rm EM}$')
plt.ylabel(r'${\cal E}_{\rm GW}$')
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-2, 20)
plt.yticks(np.logspace(-6, 2, 9))
plt.ylim(1e-6, 1e2)
plt.fill_between(xx2, xx2**0*1e-7, xx2**0*1e3, color='gray', alpha=.1)
if save and plot: plt.savefig('plots/EEGW_EEM.pdf',
bbox_inches='tight')
# plot cubic lines Delta EGW vs EM and obtain coefficients p and p tilde
if plot: plt.figure(2)
p = [.033, .23, .047, .18, .37]
p.append(ratA1*.93)
p.append(ratB1*.93)
p.append(ratC1)
p.append(ratD1*.98)
p.append(ratE1)
p = np.array(p)
pt = np.zeros(10)
for l in range(0, 5):
pt[l] = plot_runA(p[l], A=AA[l], exp=3, qst='p', kk=False,
col=cols[l], plot=plot)
pt[l + 5] = plot_runA(p[l + 5], A=AA[l], plot=False)
if plot: plt.plot(xx3, (p[l + 5]*xx3)**3, ls='-.',
color=cols[l], lw=.8)
if plot:
plt.legend(loc='upper left', fontsize=20, frameon=False)
plt.text(1, 1e-7,
r'$\Delta {\cal E}_{\rm GW} = (p {\cal E}_{\rm EM})^3$',
fontsize=26)
plot_sets.axes_lines()
plt.xlabel(r'${\cal E}_{\rm EM}$')
plt.ylabel(r'$\Delta {\cal E}_{\rm GW}$')
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-2, 20)
plt.yticks(np.logspace(-10, 2, 13))
plt.ylim(3e-11, 1e2)
plt.fill_between(xx2, xx2**0*1e-12, xx2**0*1e3, color='gray', alpha=.1)
if save and plot: plt.savefig('plots/DEEGW_EEM.pdf',
bbox_inches='tight')
# plot linear relation Delta PGW/PGW vs EM and obtain coefficients
# r and r tilde
if plot: plt.figure(3)
r = [8.5e-5, 7e-3, 2e-3, 1.5e-2, 4.5e-2]
r.append(abs(ratA2)*.99)
r.append(abs(ratB2)*1.05)
r.append(abs(ratC2)*1.3)
r.append(abs(ratD2)*.75)
r.append(abs(ratE2)*.8)
r = np.array(r)
rt = np.zeros(10)
for l in range(0, 5):
rt[l] = plot_runA(r[l], A=AA[l], exp=1, qst='r', kk=False,
sc=True, col=cols[l], plot=plot)
rt[l + 5] = plot_runA(r[l + 5], A=AA[l], plot=False)
if plot: plt.plot(xx3, r[l + 5]*xx3, ls='-.',
color=cols[l], lw=.8)
if plot:
plt.legend(loc='upper left', fontsize=20, frameon=False)
plt.text(1, 1e-5, r'$|\Delta {\cal P}_{\rm GW}| = $' + \
r' $ r |{\cal P}_{\rm GW}| \, {\cal E}_{\rm EM}$',
fontsize=26)
plot_sets.axes_lines()
plt.xlabel(r'${\cal E}_{\rm EM}$')
plt.ylabel(r'$|\Delta {\cal P}_{\rm GW}/{\cal P}_{\rm GW}|$')
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-2, 20)
plt.yticks(np.logspace(-6, 0, 7))
plt.ylim(3e-7, 1e1)
plt.fill_between(xx2, xx2**0*1e-7, xx2**0*1e1, color='gray', alpha=.1)
df2 = pd.DataFrame({'run': AA, 'q': q, 'qt': qt,
'p': p, 'pt': pt,
'r': r, 'rt': rt})
if save:
df2.to_csv('tableIII.csv')
if plot:
plt.savefig('plots/ratDPGW_EEM.pdf',
bbox_inches='tight')
if print_tex:
nms = np.array(df2['run'])
q = np.array(df2['q'])
qt = np.array(df2['qt'])
p = np.array(df2['p'])
pt = np.array(df2['pt'])
r = np.array(df2['r'])
rt = np.array(df2['rt'])
inds = np.argsort(nms)
for i in inds:
nmm = '%s'%nms[i]
if 'toff' in nms[i]: nmm = nmm.replace('_toff', "'")
exp_r = np.floor(np.log10(abs(r[i])))
bas_r = r[i]/10**exp_r
exp_rt = np.floor(np.log10(abs(rt[i])))
bas_rt = rt[i]/10**exp_rt
print(nmm, '&', '$(%.2f, %.2f)$'%(q[i], p[i]), '&',
'$(%.2f, %.2f)$'%(qt[i], pt[i]), '&')
print('$%.1f \\times 10^{%i}$'%(bas_r, exp_r), '&',
'$%.1f \\times 10^{%i}$'%(bas_rt, exp_rt), '\\\\')
return df2
def plot_timeseries_EGW(runs, lin=False, pol=False, diff=False, shift=0.,
old=False, A='A', col='blue'):
"""
Function to plot a specific time series called in
overshoot_ts (arguments are the same).
"""
run_nl = runs.get(A + '2_nl_toff')
EGW_nl = run_nl.spectra.get('EGW_mean')
t_nl = run_nl.spectra.get('t_EGW')
if pol:
XiGW_nl = run_nl.spectra.get('helEGW_mean')
P_nl = XiGW_nl/EGW_nl
if old:
run_old = runs.get(A + '2_nl')
EGW_old = run_old.spectra.get('EGW_mean')
t_old = run_old.spectra.get('t_EGW')
if pol:
XiGW_old = run_old.spectra.get('helEGW_mean')
P_old = XiGW_old/EGW_old
if lin or diff:
run_l = runs.get(A + '2_l_toff')
EGW_l = run_l.spectra.get('EGW_mean')
t_l = run_l.spectra.get('t_EGW')
if pol:
XiGW_l = run_l.spectra.get('helEGW_mean')
P_l = XiGW_l/EGW_l
if old:
run_old_l = runs.get(A + '2_l')
t_old_l = run_old_l.spectra.get('t_EGW')
EGW_old_l = run_old_l.spectra.get('EGW_mean')
if pol:
XiGW_old_l = run_old_l.spectra.get('helEGW_mean')
P_old_l = XiGW_old_l/EGW_old_l
if diff:
t_nl2 = t_nl - shift
t_lnl, EGW_nl, EGW_l = r.interpolate_ts(t_nl2, t_l, EGW_nl, EGW_l)
diff_EGW_nl = EGW_nl - EGW_l
good = np.where(t_lnl > 2)[0]
diffEGW_stat = np.trapz(diff_EGW_nl[good], t_lnl[good])/\
(t_lnl[-1] - t_lnl[good][0])
plt.figure(1)
# plot positive and negative values of Delta EGW with different
# line styles
spectra.plot_neg_pos(t_lnl, diff_EGW_nl, ls1='solid', lw1=1,
ls2=':', lw2=2, col=col)
plt.hlines(diffEGW_stat, -2, 20, color=col, ls='-.')
indt_1 = np.argmin(abs(t_lnl - 1.))
plt.plot(t_lnl[indt_1], diff_EGW_nl[indt_1], 'o', color=col)
plt.hlines(diff_EGW_nl[indt_1], -2, 2, ls='-.', color=col,
lw=.7)
print(A, ', max: ', max(diff_EGW_nl), r', value at $\eta = 1$:',
diff_EGW_nl[indt_1], r', average value over $\eta$:',
diffEGW_stat)
# plot only helical runs (pars[2] = 1)
if pol:
t_nl2 = t_nl - shift
t_lnl, P_nl, P_l = r.interpolate_ts(t_nl2, t_l, P_nl, P_l)
diff_PGW = (P_l - P_nl)/P_l
good = np.where(t_lnl > 2)[0]
diffPGW_stat = np.trapz(diff_PGW[good], t_lnl[good])/\
(t_lnl[-1] - t_lnl[good][0])
plt.figure(2)
if diffPGW_stat > 0:
plt.hlines(diffPGW_stat, -2, 20, color=col, ls='-.')
else:
plt.hlines(-diffPGW_stat, -2, 20, color=col, ls='dotted')
plt.plot(t_lnl[indt_1], abs(diff_PGW[indt_1]), 'o',
color=col)
plt.hlines(abs(diff_PGW)[indt_1], -2, 2, ls='-.', color=col,
lw=.7)
# plot positive and negative values with different line styles
spectra.plot_neg_pos(t_lnl, diff_PGW, ls1='solid', lw1=1,
ls2=':', lw2=2, col=col)
if old:
t_old2 = t_old - shift
t_old_lnl, EGW_old, EGW_old_l = \
r.interpolate_ts(t_old2, t_old_l, EGW_old, EGW_old_l)
diff_old = EGW_old - EGW_old_l
plt.figure(1)
plt.plot(t_old_lnl, diff_old, '.', color=col)
if pol:
plt.figure(2)
t_old_lnl, P_old, P_old_l = \
r.interpolate_ts(t_old, t_old_l, P_old, P_old_l)
diff_PGW_old = (P_old_l - P_old)/P_old_l
plt.plot(t_old_lnl, diff_PGW_old, '.', color=col)
else:
ind_tnl = np.argmin(abs(t_nl - 1))
k_nl = run_nl.spectra.get('k')
EGW_stat = np.trapz(run_nl.spectra.get('EGW_stat'), k_nl)
plt.figure(1)
plt.plot(t_nl, EGW_nl, color=col)
plt.hlines(EGW_stat, -2, 20, color=col, ls='-.')
if old:
plt.plot(t_old, EGW_old, '.', color=col)
indt_1 = np.argmin(abs(t_nl - 1))
plt.plot(t_nl[indt_1], EGW_nl[indt_1], 'o',
color=col)
plt.hlines(EGW_nl[indt_1], -2, 2, color=col, ls='-.', lw=.7)
print(A, ', max EEGW: ', max(EGW_nl), r', value at $\eta = 1$:',
EGW_nl[indt_1], r', average value over $\eta$:',
EGW_stat)
jf = 2
if lin:
plt.figure(jf)
plt.plot(t_l, EGW_l, color=col)
plt.plot(t_nl - shift, EGW_nl, lw=.7, ls='--', color=col)
if old:
plt.plot(t_old_l, EGW_old_l, '.', color=col)
jf += 1
if pol:# and run_nl.pars[2] == 1:
plt.figure(jf)
plt.plot(t_nl, P_nl, color=col)
plt.plot(t_nl[indt_1], P_nl[indt_1], color=col)
if old:
plt.plot(t_old, P_old, '.', color=col)
def overshoot_ts(runs, lin=False, pol=False, diff=False, shift=0,
old=False, save=True, extra=False):
"""
Function that plots the timeseries of the GW energy density up to the end
of inflation (reheating) and at later times (for which we have sourced off
the GW production) for runs A2 to E2.
It generates the plots corresponding to figure 4 of
<NAME>, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses".
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the plot in plots/overshoot_ts.pdf
or plots/overshoot_ts_diff.pdf (if diff = True)
(default True)
lin -- option to plot the linear GW energy density, instead of the
nonlinear (default False)
pol -- option to plot the time evolution of the total polarization
(default False)
diff -- option to plot the time evolution of the difference in GW
energy density between nonlinear and linear contributions
(default False)
shift -- shift in time of the nonlinear solution (default 0)
old -- option to overplot results of runs from 0 to 1 (default 0)
extra -- option to add inset plot to zoom in in a specific region
(used for the figure) (default False)
"""
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import matplotlib.patches as patches
plt.figure(1, figsize=(12, 8))
jf=2
if lin:
plt.figure(jf, figsize=(12, 8))
jf += 1
if pol:
plt.figure(jf, figsize=(12, 8))
plot_timeseries_EGW(runs, diff=diff, old=old, pol=pol, lin=lin,
shift=shift,
A='E', col='purple')
plot_timeseries_EGW(runs, diff=diff, old=old, pol=pol, lin=lin,
shift=shift,
A='D', col='red')
plot_timeseries_EGW(runs, diff=diff, old=old, pol=pol, lin=lin,
shift=shift,
A='C', col='orange')
plot_timeseries_EGW(runs, diff=diff, old=old, pol=pol, lin=lin,
shift=shift,
A='B', col='green')
plot_timeseries_EGW(runs, diff=diff, old=old, pol=pol, lin=lin,
shift=shift)
plt.figure(1)
plot_sets.axes_lines()
if diff:
y0 = 1e-9
y1 = 3e-4
else:
y0 = 6e-6
y1 = 5e-3
plt.vlines(1, y0, y1, color='black', ls='-.')
plt.vlines(2, y0, y1, color='black', ls='-.')
plt.yscale('log')
plt.xlabel(r'$\eta$')
if diff: plt.ylabel(r'$\Delta {\cal E}_{\rm GW}$')
else: plt.ylabel(r'${\cal E}_{\rm GW}^{\rm nlin}$')
plt.ylim(y0, y1)
plt.xlim(0, 10)
plt.xticks(np.linspace(0, 10, 11))
fs=24
if not diff:
plt.text(5.25, 1.25e-4, "A2'", color='blue', fontsize=fs)
plt.text(5.25, 1e-3, "B2'", color='green', fontsize=fs)
plt.text(5.25, 7e-5, "C2'", color='orange', fontsize=fs)
plt.text(5.25, 4e-4, "D2'", color='red', fontsize=fs)
plt.text(5.25, 2.8e-3, "E2'", color='purple', fontsize=fs)
dff = ''
if extra:
# this is an inset axes over the main axes
ax = plt.gca()
plt.annotate('', xy=(3.5, 5.5e-5), xytext=(2.5, 1.4e-3),
arrowprops=dict(facecolor='black', shrink=0.,
width=.2, alpha=.3),)
inset_axes = inset_axes(ax,
width="40%", # width = 30% of parent_bbox
height=1.8, # height : 1 inch
loc=8)
plt.xticks([])
plt.yticks([])
run_nl = runs.get('B2_nl_toff')
run_l = runs.get('B2_l_toff')
EGW_nl = run_nl.spectra.get('EGW_mean')
t_nl = run_nl.spectra.get('t_EGW')
EGW_l = run_l.spectra.get('EGW_mean')
t_l = run_l.spectra.get('t_EGW')
plt.plot(t_l, EGW_l, color='darkgreen', alpha=.6)
plt.plot(t_nl, EGW_nl, color='darkgreen', ls='--')
k = run_nl.spectra.get('k')
EGW_stat = run_nl.spectra.get('EGW_stat')
plt.hlines(np.trapz(EGW_stat, k), 2, 4, color='darkgreen',
ls='-.', alpha=.6)
ax.add_patch(patches.Rectangle((2, 1.4e-3), 2, 8e-4,
edgecolor='black', alpha=.7,
linewidth=1.5, facecolor='none'))
plt.xlim(2, 4)
plt.ylim(1.4e-3, 2.2e-3)
else:
plt.text(5.75, 3.5e-7, "A2'", color='blue', fontsize=fs)
plt.text(5.1, 9e-6, "B2'", color='green', fontsize=fs)
plt.text(5.25, 6e-9, "C2'", color='orange', fontsize=fs)
plt.text(6.3, 3.5e-6, "D2'", color='red', fontsize=fs)
plt.text(5.25, 6e-5, "E2'", color='purple', fontsize=fs)
dff = '_diff'
if save:
plt.savefig('plots/' + 'overshoot_ts' + dff + '.pdf',
bbox_inches='tight')
jf = 2
if lin and not diff:
plt.figure(jf)
jf += 1
plot_sets.axes_lines()
if diff:
y0 = 1e-9
y1 = 3e-4
else:
y0 = 2e-5
y1 = 5e-3
plt.vlines(1, y0, y1, color='black', ls='-.')
plt.vlines(2, y0, y1, color='black', ls='-.')
plt.yscale('log')
plt.xlabel(r'$\eta$')
plt.ylabel(r'${\cal E}_{\rm GW}$')
plt.ylim(y0, y1)
plt.xlim(0, 10)
plt.xticks(np.linspace(0, 10, 11))
if save:
plt.savefig('plots/' + 'overshoot_ts_lin.pdf',
bbox_inches='tight')
if pol:
plt.figure(jf)
plot_sets.axes_lines()
if diff:
plt.yscale('log')
y0 = 1e-7
y1 = 1e0
else:
y0 = -.6
y1 = 1.1
plt.vlines(1, y0, y1, color='black', ls='-.')
plt.vlines(2, y0, y1, color='black', ls='-.')
plt.xlabel(r'$\eta$')
if diff: plt.ylabel(r'$\Delta {\cal P}_{\rm GW}/{\cal P}_{\rm GW}$')
else:
plt.ylabel(r'${\cal P}_{\rm GW}^{\rm nlin}$')
plt.xlim(0, 10)
plt.xticks(np.linspace(0, 10, 11))
plt.ylim(y0, y1)
if save:
plt.savefig('plots/' + 'overshoot_ts_PGW' + dff + '.pdf',
bbox_inches='tight')
def plot_EGW_k(runs, lin=False, diff=False, A='A', col='blue'):
"""
Function to plot one of the spectra shown using overshoot_EGW.
"""
run_nl = runs.get(A + '2_nl_toff')
k_nl = run_nl.spectra.get('k')[1:]
t_nl = run_nl.spectra.get('t_EGW')
indt_nl = np.argmin(abs(t_nl - 1))
EGW_nl = run_nl.spectra.get('EGW')[:, 1:]
EGW_nl_stat = run_nl.spectra.get('EGW_stat')[1:]
if not diff and not lin:
plt.plot(k_nl, EGW_nl[indt_nl, :], color=col, lw = .7, alpha = .6)
plt.plot(k_nl, EGW_nl_stat, '--', color=col)
else:
run_l = runs.get(A + '2_l_toff')
k_l = run_l.spectra.get('k')[1:]
t_l = run_l.spectra.get('t_EGW')
indt_l = np.argmin(abs(t_l - 1))
EGW_l = run_l.spectra.get('EGW')[:, 1:]
EGW_l_stat = run_l.spectra.get('EGW_stat')[1:]
if diff:
diff_EGW = EGW_nl[indt_nl, :] - EGW_l[indt_l, :]
diff_EGW_stat = EGW_nl_stat - EGW_l_stat
plt.plot(k_nl, abs(diff_EGW)/EGW_nl[indt_nl, :],
color=col, alpha=.6, lw=.6)
plt.plot(k_nl, abs(diff_EGW_stat)/EGW_nl_stat, '--',
color=col)
else:
plt.plot(k_l, EGW_l[indt_l], color=col)
plt.plot(k_l, EGW_l_stat, '-.', color=col)
def overshoot_EGW(runs, lin=False, diff=False, save=True):
"""
Function that plots the GW energy density spectra and compares them at the
end of reheating and their saturated values for runs A2 to E2.
It generates the plots corresponding to figure 5 of
Y. He, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses".
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the plot in plots/overshoot_ts.pdf
or plots/overshoot_EGW_k.pdf (if diff = True)
(default True)
lin -- option to plot the linear spectra, instead of the
nonlinear (default False)
diff -- option to plot the the difference in the spectra of GW
energy density between nonlinear and linear contributions
(default False)
"""
from mpl_toolkits.axes_grid.inset_locator import inset_axes
fig, ax = plt.subplots(figsize=(12, 8))
plot_EGW_k(runs, diff=diff, lin=lin)
plot_EGW_k(runs, A='B', col='green', diff=diff, lin=lin)
plot_EGW_k(runs, A='C', col='orange', diff=diff, lin=lin)
plot_EGW_k(runs, A='D', col='red', diff=diff, lin=lin)
plot_EGW_k(runs, A='E', col='purple', diff=diff, lin=lin)
ax.set_xlabel('$k$')
if not diff: ax.set_ylabel(r'$E_{\rm GW}^{\rm nlin} (k)$')
if diff: ax.set_ylabel(r'$\Delta E_{\rm GW} (k)/E_{\rm GW}^{\rm nlin}$')
plot_sets.axes_lines()
ax.tick_params(axis='x', labelsize=28)
ax.tick_params(axis='y', labelsize=28)
ax.set_yscale('log')
ax.set_xscale('log')
plt.xlim(.2, 300)
if not diff:
plt.ylim(1e-38, 1e-2)
plt.yticks(np.logspace(-38, -2, 10))
plt.text(3.6e1, 1e-32, "A2'", color='blue')
plt.text(1.5e2, 1e-19, "B2'", color='green')
plt.text(4.5e1, 1e-10, "C2'", color='orange')
plt.text(1.8e2, 1e-25, "D2'", color='red')
plt.text(5e1, 1e-6, "E2'", color='purple')
# this is an inset axes over the main axes
inset_axes = inset_axes(ax,
width="50%", # width = 30% of parent_bbox
height=4.0, # height : 1 inch
loc=3)
plot_EGW_k(runs, lin=lin)
plot_EGW_k(runs, A='B', col='green', lin=lin)
plot_EGW_k(runs, A='C', col='orange', lin=lin)
plot_EGW_k(runs, A='D', col='red', lin=lin)
plot_EGW_k(runs, A='E', col='purple', lin=lin)
plt.yscale('log')
plt.xscale('log')
plot_sets.axes_lines()
plt.xlim(.6, 50)
plt.ylim(2e-9, 2e-3)
ax = plt.gca()
ax.yaxis.tick_right()
ax.xaxis.tick_top()
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14, pad=10)
ax.set_xticks(np.logspace(0, 1, 2))
ax.set_yticks(np.logspace(-8, -3, 6))
plt.text(.8, 6e-9, r'$E_{\rm GW} (k)$', fontsize=24)
dff = ''
else:
plt.ylim(1e-4, 2)
dff = 'diff'
if save:
plt.savefig('plots/' + 'overshoot_EGW_k' + dff + '.pdf',
bbox_inches='tight')
def Pol_EGW(runs, A='A', col='blue'):
"""
Function that plots one of the polarization spectra shown in
overshoot_PGW
"""
run_nl = runs.get(A + '2_nl_toff')
k_nl = run_nl.spectra.get('k')[1:]
t_nl = run_nl.spectra.get('t_EGW')
indt_nl = np.argmin(abs(t_nl - 1))
EGW_nl = run_nl.spectra.get('EGW')[:, 1:]
XiGW_nl = run_nl.spectra.get('helEGW')[:, 1:]
run_l = runs.get(A + '2_l_toff')
k_l = run_l.spectra.get('k')[1:]
t_l = run_l.spectra.get('t_EGW')
indt_l = np.argmin(abs(t_l - 1))
EGW_l = run_l.spectra.get('EGW')[:, 1:]
XiGW_l = run_l.spectra.get('helEGW')[:, 1:]
good = np.where(t_nl > 2)[0]
EGW_nl_stat = np.trapz(EGW_nl[good, :], t_nl[good], axis=0)/ \
(t_nl[-1] - t_nl[good][0])
XiGW_nl_stat = np.trapz(XiGW_nl[good, :], t_nl[good], axis=0)/ \
(t_nl[-1] - t_nl[good][0])
good = np.where(t_l > 2)[0]
EGW_l_stat = np.trapz(EGW_l[good, :], t_l[good], axis=0)/ \
(t_l[-1] - t_l[good][0])
XiGW_l_stat = np.trapz(XiGW_l[good, :], t_l[good], axis=0)/ \
(t_l[-1] - t_l[good][0])
plt.plot(k_nl, XiGW_nl[indt_nl, :]/EGW_nl[indt_nl, :],
color=col, lw=.7, alpha=.6)
plt.plot(k_nl, XiGW_nl_stat/EGW_nl_stat, ls='--', color=col)
plt.plot(k_l, XiGW_l_stat/EGW_l_stat, ls='dotted', color=col)
def overshoot_PGW(runs, save=True):
"""
Function that plots the GW polarization spectra and compares them at the
end of reheating and their saturated values for runs A2 to E2.
It generates the plots corresponding to figure 6 of
<NAME>, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses".
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the plot in plots/overshoot_PGW_k.pdf
or plots/overshoot_PGW_k.pdf (if diff = True)
(default True)
"""
plt.figure(figsize=(12, 8))
plot_sets.axes_lines()
Pol_EGW(runs, A='C', col='orange')
Pol_EGW(runs, A='D', col='red')
Pol_EGW(runs, A='E', col='purple')
plt.xscale('log')
plt.ylim(-.2, 1.1)
plt.xlabel('$k$')
plt.ylabel(r'${\cal P}_{\rm GW}^{\rm nlin}$')
plt.text(70, .05, "C2'", color='orange')
plt.text(70, .5, "C2", color='orange')
plt.text(180, .5, r"C$2^{\rm lin}$", color='orange')
plt.text(.35, .2, "E2'", color='purple')
plt.text(1, .7, "D2'", color='red')
dff = ''
if save:
plt.savefig('plots/' + 'overshoot_PGW_k' + dff + '.pdf',
bbox_inches='tight')
def plot_OmGW_f(run_l, run_nl, T, g, diff=True,
sp='EGW', col='blue', toff=True):
"""
Function that plots each of the runs shown in plot_OmGW_vs_f
"""
t_l = run_l.spectra.get('t_' + sp)
ind_tl = np.argmin(abs(t_l - 1.))
if abs(t_l[ind_tl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(run_l.name_run, t_l[ind_tl]))
EGW_l = run_l.spectra.get(sp)[ind_tl, 1:]
if toff:
EGW_l = run_l.spectra.get(sp + '_stat')[1:]
k_l = run_l.spectra.get('k')[1:]
f_l, OmGW_l = cosmoGW.shift_OmGW_today(k_l, EGW_l*k_l, T, g)
t_nl = run_nl.spectra.get('t_' + sp)
ind_tnl = np.argmin(abs(t_nl - 1.))
if abs(t_nl[ind_tnl] - 1) > 1e-2:
print('The time t = 1 is not available in the spectra of',
' the run %s, so t = %.2f has been',
' taken'%(run_nl.name_run, t_nl[ind_tnl]))
EGW_nl = run_nl.spectra.get(sp)[ind_tnl, 1:]
if toff:
EGW_nl = run_nl.spectra.get(sp + '_stat')[1:]
k_nl = run_nl.spectra.get('k')[1:]
f_nl, OmGW_nl = cosmoGW.shift_OmGW_today(k_nl, EGW_nl*k_nl, T, g)
if 'hel' in sp:
OmGW_nl = abs(OmGW_nl)
OmGW_l = abs(OmGW_l)
alp = 1.
if '1' in run_l.name_run: alp = .6
plt.plot(f_nl, OmGW_nl, color=col, alpha=alp)
if not diff:
plt.plot(f_l, OmGW_l, color=col, ls='-.')
else:
# plot positive and negative values with different line styles
diff_OmGW = OmGW_nl - OmGW_l
sgn = np.sign(diff_OmGW)
converge = False
sgn0 = sgn[0]
i = 0
lw = 1
while not converge:
sign = False
i0 = i
while not sign and not converge:
if sgn0 == 1:
ls = '-.'
lw = 1
else:
ls = 'dotted'
lw = .6
if i==len(sgn) - 2: converge=True
if sgn[i] != sgn0:
sign = True
sgn0 = sgn[i]
i += 1
plt.plot(f_nl[i0:i+1], abs(diff_OmGW[i0:i+1]),
color=col, ls=ls, lw=lw, alpha=alp)
return (np.array(f_l, dtype='float'), np.array(f_nl, dtype='float'),
np.array(OmGW_l, dtype='float'), np.array(OmGW_nl, dtype='float'))
def plot_OmGW_vs_f(runs, save=True):
"""
Function that plots the resulting GW energy density spectrum at the end
of inflation (reheating) as an observable at the present time and compares
it with LISA sensitivity, NANOGrav 12.5 yr results, and other GW detectors.
It plots the leading-order nonlinear term as a GW spectrum separately
for comparison.
It generates the plot corresponding to figure 7 (left panel) of
<NAME>, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses".
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the plot in plots/OmGW_f_detectors.pdf
(default True)
"""
plt.figure(figsize=(12, 8))
CWD = os.getcwd()
os.chdir('..')
dir = 'detector_sensitivity'
# read LISA PLS
fs, LISA_Om, LISA_OmPLS = inte.read_sens(SNR=10, T=4)
fs = fs*u.Hz
# read SKA
f_SKA, hc_SKA = inte.read_csv(dir, 'SKA', b='hc')
f_SKA = f_SKA*u.Hz
Om_SKA = cosmoGW.hc_OmGW(f_SKA, hc_SKA, d=-1)
# read Gaia and Theia
f_Gaia, Om_Gaia = inte.read_csv(dir, 'Gaia_PLS_SNR10')
ff = np.logspace(np.log10(f_Gaia[0]), np.log10(f_Gaia[-1]), 100)
Om_Gaia = 10**np.interp(ff, f_Gaia, np.log10(Om_Gaia))
f_Gaia = ff*u.Hz
f_Theia, Om_Theia = inte.read_csv(dir, 'Theia_PLS_SNR10')
f_Theia = f_Theia*u.Hz
# read DECIGO
f_DECIGO, Om_DECIGO = inte.read_csv(dir, 'DECIGO_PLS_SNR10')
ff = np.logspace(np.log10(f_DECIGO[0]), np.log10(f_DECIGO[-1]), 100)
Om_DECIGO = 10**np.interp(ff, f_DECIGO, np.log10(Om_DECIGO))
f_DECIGO = ff*u.Hz
# read AEDGE and AION
f_AEDGE, Om_AEDGE = inte.read_csv(dir, 'AEDGE')
f_AION, Om_AION = inte.read_csv(dir, 'AION')
# read constrains from binary resonance
f_MSP, Om_MSP = inte.read_csv(dir, 'binaries_MSPs_2038')
f_LLR, Om_LLR = inte.read_csv(dir, 'binaries_LLR_2038')
f_SLR, Om_SLR = inte.read_csv(dir, 'binaries_SLR_2038')
# read muAres
fs_mu, muAres_Om, muAres_OmPLS = inte.read_sens(interf='muAres',
SNR=10, T=4)
# read NANOGrav data
os.chdir('runs_nonhelical_ini')
_ = pta.read_PTA_data(beta_b=False, Omega_b=False, return_all=True)
gamma_NG_sPL_1s, A1_NG_sPL_1s, A2_NG_sPL_1s = [_[3], _[4], _[5]]
betas = np.linspace(-2, 5, 100)
colors = ['blue']*len(betas)
_ = pta.CP_delay(betas, colors, obs='NANOGrav_singlePL_1s',
plot=False)
fNG = _[0]
OmGW_NG_a = _[3]
OmGW_NG_b = _[6]
_ = pta.CP_delay(betas, colors, obs='NANOGrav_brokenPL_1s',
plot=False)
fNGb = _[0]
OmGW_NGb_a = _[3]
OmGW_NGb_b = _[6]
os.chdir(CWD)
lww = .6
plt.plot(fs, LISA_OmPLS, color='limegreen', lw=lww)
plt.plot(fs, LISA_Om, color='limegreen', ls='-.', lw=lww)
plt.plot(f_SKA, Om_SKA, color='black', lw=lww)
plt.plot(f_Gaia, Om_Gaia, color='navy', lw=lww)
plt.plot(f_Theia, Om_Theia, color='navy', lw=lww)
plt.plot(f_DECIGO, Om_DECIGO, color='darkred', lw=lww)
plt.plot(f_AEDGE, Om_AEDGE, color='peru', lw=lww)
plt.plot(f_AION, Om_AION, color='peru', lw=lww)
plt.plot(f_MSP, Om_MSP, color='darkviolet', lw=lww)
plt.plot(f_LLR, Om_LLR, color='darkviolet', lw=lww)
plt.plot(fs_mu, muAres_OmPLS, color='cyan', lw=lww)
#plt.plot(f_SLR, Om_SLR, color='darkviolet', lw=.8)
minOm, maxOm = pta.get_min_max(fNG, OmGW_NG_a, OmGW_NG_b)
plt.fill_between(fNG, minOm, maxOm, color='blue', alpha=.3)
plt.text(6e-9, 1.3e-5, 'NANOGrav 12.5yr', color='blue',
fontsize=14, alpha=.7)
plt.text(2e-4, 2e-13, 'LISA', color='limegreen',
fontsize=16, alpha=.7)
plt.text(6e-8, 2e-10, 'SKA', color='black',
fontsize=16, alpha=.7)
plt.text(2e-7, 5e-10, 'Theia', color='navy',
fontsize=16, alpha=.7)
plt.text(1.8e-9, 4e-9, 'Gaia', color='navy',
fontsize=16, alpha=.7)
plt.text(3e-2, 3e-17, 'DECIGO', color='darkred',
fontsize=16, alpha=.7)
plt.text(3e-2, 7e-15, 'AEDGE', color='peru',
fontsize=16, alpha=.7)
plt.text(2.8e-2, 7e-12, 'AION', color='peru',
fontsize=16, alpha=.7)
plt.text(2e-5, 5e-6, 'MSPs', color='darkviolet',
fontsize=16, alpha=.7)
plt.text(1.7e-6, 1e-7, 'LLR', color='darkviolet',
fontsize=16, alpha=.7)
plt.text(2e-4, 3e-17, r'$\mu$Ares', color='cyan',
fontsize=16)
# choose toff runs
runA_l = runs.get('A2_l_toff')
runA_nl = runs.get('A2_nl_toff')
runB_l = runs.get('B2_l_toff')
runB_nl = runs.get('B2_nl_toff')
runC_l = runs.get('C2_l_toff')
runC_nl = runs.get('C2_nl_toff')
runD_l = runs.get('D2_l_toff')
runD_nl = runs.get('D2_nl_toff')
runE_l = runs.get('E2_l_toff')
runE_nl = runs.get('E2_nl_toff')
runE1_l = runs.get('E1_l_toff')
runE1_nl = runs.get('E1_nl_toff')
col_A = 'blue'
col_B = 'darkgreen'
col_C = 'orange'
col_D = 'red'
col_E = 'purple'
# Note that T and g are different for every run
# pars[0] is the temperature in GeV and pars[1] is g
_ = plot_OmGW_f(runA_l, runA_nl, runA_l.pars[0]*u.GeV,
runA_l.pars[1], col=col_A, toff=True)
_ = plot_OmGW_f(runB_l, runB_nl, runB_l.pars[0]*u.GeV,
runB_l.pars[1], col=col_B, toff=True)
_ = plot_OmGW_f(runC_l, runC_nl, runC_l.pars[0]*u.GeV,
runC_l.pars[1], col=col_C, toff=True)
_ = plot_OmGW_f(runD_l, runD_nl, runD_l.pars[0]*u.GeV,
runD_l.pars[1], col=col_D, toff=True)
fE2_l, fE2_nl, OmGWE2_l, OmGWE2_nl = \
plot_OmGW_f(runE_l, runE_nl, runE_l.pars[0]*u.GeV,
runE_l.pars[1], col=col_E, toff=True)
fE1_l, fE1_nl, OmGWE1_l, OmGWE1_nl = \
plot_OmGW_f(runE1_l, runE1_nl, runE1_l.pars[0]*u.GeV,
runE1_l.pars[1], col=col_E, toff=True)
plt.fill_between(fE1_l, OmGWE1_nl, OmGWE2_nl, color=col_E,
alpha=.05)
plot_sets.axes_lines()
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$f$ [Hz]')
plt.ylabel(r'$h_0^2 \Omega_{\rm GW} (f)$')
plt.xticks(np.logspace(-9, 0, 10))
plt.xlim(1e-9, 1e0)
plt.yticks(np.logspace(-18, -4, 15))
plt.ylim(1e-18, 1e-4)
plt.text(6e-5, 1e-14, "A2'", color='blue')
plt.text(1.5e-7, 2e-17, "B2'", color='darkgreen')
plt.text(3e-6, 1e-16, "C2'", color='orange')
plt.text(1e-7, 8e-14, "D2'", color='red')
plt.text(2.5e-3, 1e-9, "E2'", color='purple')
plt.text(6e-4, 1.5e-12, "E1'", color='purple')
if save: plt.savefig('plots/' + 'OmGW_f_detectors.pdf',
bbox_inches='tight')
def plot_XiGW_vs_f(runs, diff=False, save=True):
"""
Function that plots the resulting helical GW energy density spectrum at
the end of inflation (reheating) as an observable at the present time
and compares it with LISA sensitivity to polarization and that of the
LISA-Taiji network.
It plots the leading-order nonlinear term as a GW spectrum separately
for comparison.
It generates the plot corresponding to figure 7 (right panel) of
Y. He, <NAME>, and <NAME>, "Leading-order nonlinear
gravitational waves from reheating magnetogeneses".
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the plot in plots/XiGW_f_detectors.pdf
(default True)
"""
plt.figure(figsize=(12, 8))
CWD = os.getcwd()
os.chdir('..')
dir = 'detector_sensitivity'
# read LISA and Taiji PLS
fs, LISA_Om, LISA_OmPLS, LISA_Xi, LISA_XiPLS = \
inte.read_sens(SNR=10, T=4, interf='LISA', Xi=True)
fs_comb, LISA_Taiji_Xi, LISA_Taiji_XiPLS = \
inte.read_sens(SNR=10, T=4, interf='comb')
fs = fs*u.Hz
fs_comb = fs_comb*u.Hz
os.chdir(CWD)
lww = .6
plt.plot(fs, LISA_XiPLS, color='limegreen', lw=lww)
plt.plot(fs, LISA_Xi, color='limegreen', ls='-.', lw=lww)
plt.plot(fs_comb, LISA_Taiji_XiPLS, color='darkred', lw=lww)
plt.text(7e-4, 1e-9, 'LISA', color='limegreen',
fontsize=16, alpha=.7)
plt.text(1e-5, 1e-9, r'LISA--Taiji', color='darkred',
fontsize=16, alpha=.7)
# choose toff runs
runA_l = runs.get('A2_l_toff')
runA_nl = runs.get('A2_nl_toff')
runB_l = runs.get('B2_l_toff')
runB_nl = runs.get('B2_nl_toff')
runC_l = runs.get('C2_l_toff')
runC_nl = runs.get('C2_nl_toff')
runD_l = runs.get('D2_l_toff')
runD_nl = runs.get('D2_nl_toff')
runE_l = runs.get('E2_l_toff')
runE_nl = runs.get('E2_nl_toff')
runE1_l = runs.get('E1_l_toff')
runE1_nl = runs.get('E1_nl_toff')
col_A = 'blue'
col_B = 'darkgreen'
col_C = 'orange'
col_D = 'red'
col_E = 'purple'
# Note that T and g are different for every run
# pars[0] is the temperature in GeV and pars[1] is g
_ = plot_OmGW_f(runA_l, runA_nl, runA_l.pars[0]*u.GeV,
runA_l.pars[1], col=col_A, sp='helEGW',
diff=diff)
_ = plot_OmGW_f(runB_l, runB_nl, runB_l.pars[0]*u.GeV,
runB_l.pars[1], col=col_B, sp='helEGW',
diff=diff)
_ = plot_OmGW_f(runC_l, runC_nl, runC_l.pars[0]*u.GeV,
runC_l.pars[1], col=col_C, sp='helEGW',
diff=diff)
_ = plot_OmGW_f(runD_l, runD_nl, runD_l.pars[0]*u.GeV,
runD_l.pars[1], col=col_D, sp='helEGW',
diff=diff)
fE2_l, fE2_nl, OmGWE2_l, OmGWE2_nl = \
plot_OmGW_f(runE_l, runE_nl, runE_l.pars[0]*u.GeV,
runE_l.pars[1], col=col_E, sp='helEGW',
diff=diff)
fE1_l, fE1_nl, OmGWE1_l, OmGWE1_nl = \
plot_OmGW_f(runE1_l, runE1_nl, runE1_l.pars[0]*u.GeV,
runE1_l.pars[1], col=col_E, sp='helEGW',
diff=diff)
plt.fill_between(fE1_l, OmGWE1_nl, OmGWE2_nl, color=col_E,
alpha=.05)
plot_sets.axes_lines()
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$f$ [Hz]')
plt.ylabel(r'$h_0^2\,\Xi_{\rm GW} (f)$')
plt.xticks(np.logspace(-9, 0, 10))
plt.xlim(1e-9, 1e0)
plt.yticks(np.logspace(-14, -4, 11))
plt.ylim(1e-14, 1e-6)
plt.text(3e-5, 1e-11, "A2'", color='blue')
plt.text(8e-9, 2e-13, "B2'", color='darkgreen')
plt.text(5e-7, 1e-10, "C2'", color='orange')
plt.text(4.5e-8, 1e-9, "D2'", color='red')
plt.text(1.5e-1, 1e-8, "E2'", color='purple')
plt.text(5e-4, 1e-13, "E1'", color='purple')
if save: plt.savefig('plots/' + 'XiGW_f_detectors.pdf',
bbox_inches='tight')
| [
"matplotlib.pyplot.title",
"cosmoGW.ks_infla",
"matplotlib.pyplot.yscale",
"spectra.plot_neg_pos",
"numpy.logspace",
"numpy.argsort",
"matplotlib.pyplot.figure",
"pta.read_PTA_data",
"dirs.read_dirs",
"matplotlib.pyplot.gca",
"cosmoGW.as_a0_rat",
"matplotlib.pyplot.fill_between",
"matplotlib... | [((538, 552), 'os.chdir', 'os.chdir', (['HOME'], {}), '(HOME)\n', (546, 552), False, 'import os\n'), ((692, 706), 'os.chdir', 'os.chdir', (['dir0'], {}), '(dir0)\n', (700, 706), False, 'import os\n'), ((500, 511), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (509, 511), False, 'import os\n'), ((724, 738), 'os.chdir', 'os.chdir', (['HOME'], {}), '(HOME)\n', (732, 738), False, 'import os\n'), ((860, 887), 'dirs.read_dirs', 'rd', (['"""memory_nonhelical_b73"""'], {}), "('memory_nonhelical_b73')\n", (862, 887), True, 'from dirs import read_dirs as rd\n'), ((899, 926), 'dirs.read_dirs', 'rd', (['"""memory_nonhelical_b27"""'], {}), "('memory_nonhelical_b27')\n", (901, 926), True, 'from dirs import read_dirs as rd\n'), ((938, 962), 'dirs.read_dirs', 'rd', (['"""memory_helical_b73"""'], {}), "('memory_helical_b73')\n", (940, 962), True, 'from dirs import read_dirs as rd\n'), ((974, 998), 'dirs.read_dirs', 'rd', (['"""memory_helical_b27"""'], {}), "('memory_helical_b27')\n", (976, 998), True, 'from dirs import read_dirs as rd\n'), ((1010, 1034), 'dirs.read_dirs', 'rd', (['"""memory_helical_b17"""'], {}), "('memory_helical_b17')\n", (1012, 1034), True, 'from dirs import read_dirs as rd\n'), ((1046, 1071), 'dirs.read_dirs', 'rd', (['"""memory_helical_toff"""'], {}), "('memory_helical_toff')\n", (1048, 1071), True, 'from dirs import read_dirs as rd\n'), ((1083, 1111), 'dirs.read_dirs', 'rd', (['"""memory_nonhelical_toff"""'], {}), "('memory_nonhelical_toff')\n", (1085, 1111), True, 'from dirs import read_dirs as rd\n'), ((1201, 1240), 'run.load_runs', 'r.load_runs', (['R', 'dir0', 'dirs'], {'quiet': '(False)'}), '(R, dir0, dirs, quiet=False)\n', (1212, 1240), True, 'import run as r\n'), ((1245, 1259), 'os.chdir', 'os.chdir', (['dir0'], {}), '(dir0)\n', (1253, 1259), False, 'import os\n'), ((1869, 1904), 'numpy.array', 'np.array', (["['A', 'B', 'C', 'D', 'E']"], {}), "(['A', 'B', 'C', 'D', 'E'])\n", (1877, 1904), True, 'import numpy as np\n'), ((2744, 2756), 'numpy.array', 'np.array', (['Ts'], {}), '(Ts)\n', (2752, 2756), True, 'import numpy as np\n'), ((2770, 2786), 'numpy.array', 'np.array', (['gammas'], {}), '(gammas)\n', (2778, 2786), True, 'import numpy as np\n'), ((2799, 2814), 'numpy.array', 'np.array', (['betas'], {}), '(betas)\n', (2807, 2814), True, 'import numpy as np\n'), ((2824, 2836), 'numpy.array', 'np.array', (['gs'], {}), '(gs)\n', (2832, 2836), True, 'import numpy as np\n'), ((2850, 2866), 'numpy.array', 'np.array', (['rat_Hs'], {}), '(rat_Hs)\n', (2858, 2866), True, 'import numpy as np\n'), ((2880, 2896), 'numpy.array', 'np.array', (['rat_as'], {}), '(rat_as)\n', (2888, 2896), True, 'import numpy as np\n'), ((2911, 2928), 'numpy.array', 'np.array', (['rat_Has'], {}), '(rat_Has)\n', (2919, 2928), True, 'import numpy as np\n'), ((2939, 3061), 'pandas.DataFrame', 'pd.DataFrame', (["{'name': names, 'Tr [GeV]': Ts, 'gamma': gammas, 'beta': betas, 'g': gs,\n '(Hs/H0)^2 (as/a0)^4': rat_Has}"], {}), "({'name': names, 'Tr [GeV]': Ts, 'gamma': gammas, 'beta': betas,\n 'g': gs, '(Hs/H0)^2 (as/a0)^4': rat_Has})\n", (2951, 3061), True, 'import pandas as pd\n'), ((6608, 6622), 'numpy.array', 'np.array', (['name'], {}), '(name)\n', (6616, 6622), True, 'import numpy as np\n'), ((6634, 6650), 'numpy.argsort', 'np.argsort', (['name'], {}), '(name)\n', (6644, 6650), True, 'import numpy as np\n'), ((7137, 7346), 'pandas.DataFrame', 'pd.DataFrame', (["{'name': name, 'B0': B0s, 'EM': EM_ar, 'k_* (1)': kstars, 'EGW': EGW_ar,\n 'Del EGW': DEGW_ar, 'ratio Del EGW': rat_DEGW_ar, 'pol': pol_ar,\n 'Del pol': Dpol_ar, 'ratio Del pol': rat_Dpol_ar}"], {}), "({'name': name, 'B0': B0s, 'EM': EM_ar, 'k_* (1)': kstars,\n 'EGW': EGW_ar, 'Del EGW': DEGW_ar, 'ratio Del EGW': rat_DEGW_ar, 'pol':\n pol_ar, 'Del pol': Dpol_ar, 'ratio Del pol': rat_Dpol_ar})\n", (7149, 7346), True, 'import pandas as pd\n'), ((13902, 13931), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (13914, 13931), True, 'import matplotlib.pyplot as plt\n'), ((13936, 13958), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (13956, 13958), False, 'import plot_sets\n'), ((13993, 14025), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {'both': '(False)'}), '(both=False)\n', (14013, 14025), False, 'import plot_sets\n'), ((20903, 20930), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (20913, 20930), True, 'import matplotlib.pyplot as plt\n'), ((22494, 22566), 'matplotlib.pyplot.title', 'plt.title', (["('Series %s: %s runs with $\\\\beta = %.1f$' % (A, h, b))"], {'pad': '(15)'}), "('Series %s: %s runs with $\\\\beta = %.1f$' % (A, h, b), pad=15)\n", (22503, 22566), True, 'import matplotlib.pyplot as plt\n'), ((22570, 22587), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (22580, 22587), True, 'import matplotlib.pyplot as plt\n'), ((22592, 22608), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(300)'], {}), '(1, 300)\n', (22600, 22608), True, 'import matplotlib.pyplot as plt\n'), ((22613, 22632), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.2)', '(1.1)'], {}), '(-0.2, 1.1)\n', (22621, 22632), True, 'import matplotlib.pyplot as plt\n'), ((22737, 22785), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'locc', 'fontsize': '(20)', 'frameon': '(False)'}), '(loc=locc, fontsize=20, frameon=False)\n', (22747, 22785), True, 'import matplotlib.pyplot as plt\n'), ((22825, 22847), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (22845, 22847), False, 'import plot_sets\n'), ((22852, 22869), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {}), "('$k$')\n", (22862, 22869), True, 'import matplotlib.pyplot as plt\n'), ((22874, 22913), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\cal P}_{\\\\rm GW} (k)$"""'], {}), "('${\\\\cal P}_{\\\\rm GW} (k)$')\n", (22884, 22913), True, 'import matplotlib.pyplot as plt\n'), ((22922, 22931), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22929, 22931), True, 'import matplotlib.pyplot as plt\n'), ((23982, 24015), 'numpy.array', 'np.array', (["df['EM']"], {'dtype': '"""float"""'}), "(df['EM'], dtype='float')\n", (23990, 24015), True, 'import numpy as np\n'), ((24026, 24060), 'numpy.array', 'np.array', (["df['EGW']"], {'dtype': '"""float"""'}), "(df['EGW'], dtype='float')\n", (24034, 24060), True, 'import numpy as np\n'), ((24074, 24112), 'numpy.array', 'np.array', (["df['Del EGW']"], {'dtype': '"""float"""'}), "(df['Del EGW'], dtype='float')\n", (24082, 24112), True, 'import numpy as np\n'), ((24124, 24144), 'numpy.array', 'np.array', (["df['name']"], {}), "(df['name'])\n", (24132, 24144), True, 'import numpy as np\n'), ((24158, 24202), 'numpy.array', 'np.array', (["df['ratio Del pol']"], {'dtype': '"""float"""'}), "(df['ratio Del pol'], dtype='float')\n", (24166, 24202), True, 'import numpy as np\n'), ((24215, 24255), 'numpy.array', 'np.array', (["df_pars['beta']"], {'dtype': '"""float"""'}), "(df_pars['beta'], dtype='float')\n", (24223, 24255), True, 'import numpy as np\n'), ((24269, 24310), 'numpy.array', 'np.array', (["df_pars['gamma']"], {'dtype': '"""float"""'}), "(df_pars['gamma'], dtype='float')\n", (24277, 24310), True, 'import numpy as np\n'), ((27704, 27722), 'numpy.logspace', 'np.logspace', (['(-1)', '(2)'], {}), '(-1, 2)\n', (27715, 27722), True, 'import numpy as np\n'), ((27732, 27750), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)'], {}), '(-3, 3)\n', (27743, 27750), True, 'import numpy as np\n'), ((27761, 27784), 'numpy.logspace', 'np.logspace', (['(-1.8)', '(-0.9)'], {}), '(-1.8, -0.9)\n', (27772, 27784), True, 'import numpy as np\n'), ((28149, 28160), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (28157, 28160), True, 'import numpy as np\n'), ((28170, 28182), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (28178, 28182), True, 'import numpy as np\n'), ((29345, 29356), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (29353, 29356), True, 'import numpy as np\n'), ((29366, 29378), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (29374, 29378), True, 'import numpy as np\n'), ((30676, 30687), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (30684, 30687), True, 'import numpy as np\n'), ((30697, 30709), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (30705, 30709), True, 'import numpy as np\n'), ((31649, 31728), 'pandas.DataFrame', 'pd.DataFrame', (["{'run': AA, 'q': q, 'qt': qt, 'p': p, 'pt': pt, 'r': r, 'rt': rt}"], {}), "({'run': AA, 'q': q, 'qt': qt, 'p': p, 'pt': pt, 'r': r, 'rt': rt})\n", (31661, 31728), True, 'import pandas as pd\n'), ((39423, 39453), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(12, 8)'}), '(1, figsize=(12, 8))\n', (39433, 39453), True, 'import matplotlib.pyplot as plt\n'), ((40288, 40301), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (40298, 40301), True, 'import matplotlib.pyplot as plt\n'), ((40306, 40328), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (40326, 40328), False, 'import plot_sets\n'), ((40430, 40475), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(1)', 'y0', 'y1'], {'color': '"""black"""', 'ls': '"""-."""'}), "(1, y0, y1, color='black', ls='-.')\n", (40440, 40475), True, 'import matplotlib.pyplot as plt\n'), ((40480, 40525), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(2)', 'y0', 'y1'], {'color': '"""black"""', 'ls': '"""-."""'}), "(2, y0, y1, color='black', ls='-.')\n", (40490, 40525), True, 'import matplotlib.pyplot as plt\n'), ((40530, 40547), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (40540, 40547), True, 'import matplotlib.pyplot as plt\n'), ((40552, 40573), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\eta$"""'], {}), "('$\\\\eta$')\n", (40562, 40573), True, 'import matplotlib.pyplot as plt\n'), ((40689, 40705), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y0', 'y1'], {}), '(y0, y1)\n', (40697, 40705), True, 'import matplotlib.pyplot as plt\n'), ((40710, 40725), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(10)'], {}), '(0, 10)\n', (40718, 40725), True, 'import matplotlib.pyplot as plt\n'), ((46761, 46790), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (46773, 46790), True, 'import matplotlib.pyplot as plt\n'), ((47245, 47267), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (47265, 47267), False, 'import plot_sets\n'), ((47408, 47426), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.2)', '(300)'], {}), '(0.2, 300)\n', (47416, 47426), True, 'import matplotlib.pyplot as plt\n'), ((50118, 50208), 'matplotlib.pyplot.plot', 'plt.plot', (['k_nl', '(XiGW_nl[indt_nl, :] / EGW_nl[indt_nl, :])'], {'color': 'col', 'lw': '(0.7)', 'alpha': '(0.6)'}), '(k_nl, XiGW_nl[indt_nl, :] / EGW_nl[indt_nl, :], color=col, lw=0.7,\n alpha=0.6)\n', (50126, 50208), True, 'import matplotlib.pyplot as plt\n'), ((50218, 50280), 'matplotlib.pyplot.plot', 'plt.plot', (['k_nl', '(XiGW_nl_stat / EGW_nl_stat)'], {'ls': '"""--"""', 'color': 'col'}), "(k_nl, XiGW_nl_stat / EGW_nl_stat, ls='--', color=col)\n", (50226, 50280), True, 'import matplotlib.pyplot as plt\n'), ((50283, 50346), 'matplotlib.pyplot.plot', 'plt.plot', (['k_l', '(XiGW_l_stat / EGW_l_stat)'], {'ls': '"""dotted"""', 'color': 'col'}), "(k_l, XiGW_l_stat / EGW_l_stat, ls='dotted', color=col)\n", (50291, 50346), True, 'import matplotlib.pyplot as plt\n'), ((50999, 51026), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (51009, 51026), True, 'import matplotlib.pyplot as plt\n'), ((51031, 51053), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (51051, 51053), False, 'import plot_sets\n'), ((51172, 51189), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (51182, 51189), True, 'import matplotlib.pyplot as plt\n'), ((51194, 51213), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.2)', '(1.1)'], {}), '(-0.2, 1.1)\n', (51202, 51213), True, 'import matplotlib.pyplot as plt\n'), ((51217, 51234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {}), "('$k$')\n", (51227, 51234), True, 'import matplotlib.pyplot as plt\n'), ((51239, 51286), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\cal P}_{\\\\rm GW}^{\\\\rm nlin}$"""'], {}), "('${\\\\cal P}_{\\\\rm GW}^{\\\\rm nlin}$')\n", (51249, 51286), True, 'import matplotlib.pyplot as plt\n'), ((51289, 51330), 'matplotlib.pyplot.text', 'plt.text', (['(70)', '(0.05)', '"""C2\'"""'], {'color': '"""orange"""'}), '(70, 0.05, "C2\'", color=\'orange\')\n', (51297, 51330), True, 'import matplotlib.pyplot as plt\n'), ((51334, 51373), 'matplotlib.pyplot.text', 'plt.text', (['(70)', '(0.5)', '"""C2"""'], {'color': '"""orange"""'}), "(70, 0.5, 'C2', color='orange')\n", (51342, 51373), True, 'import matplotlib.pyplot as plt\n'), ((51377, 51430), 'matplotlib.pyplot.text', 'plt.text', (['(180)', '(0.5)', '"""C$2^{\\\\rm lin}$"""'], {'color': '"""orange"""'}), "(180, 0.5, 'C$2^{\\\\rm lin}$', color='orange')\n", (51385, 51430), True, 'import matplotlib.pyplot as plt\n'), ((51434, 51476), 'matplotlib.pyplot.text', 'plt.text', (['(0.35)', '(0.2)', '"""E2\'"""'], {'color': '"""purple"""'}), '(0.35, 0.2, "E2\'", color=\'purple\')\n', (51442, 51476), True, 'import matplotlib.pyplot as plt\n'), ((51479, 51515), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(0.7)', '"""D2\'"""'], {'color': '"""red"""'}), '(1, 0.7, "D2\'", color=\'red\')\n', (51487, 51515), True, 'import matplotlib.pyplot as plt\n'), ((52280, 52328), 'cosmoGW.shift_OmGW_today', 'cosmoGW.shift_OmGW_today', (['k_l', '(EGW_l * k_l)', 'T', 'g'], {}), '(k_l, EGW_l * k_l, T, g)\n', (52304, 52328), False, 'import cosmoGW\n'), ((52796, 52847), 'cosmoGW.shift_OmGW_today', 'cosmoGW.shift_OmGW_today', (['k_nl', '(EGW_nl * k_nl)', 'T', 'g'], {}), '(k_nl, EGW_nl * k_nl, T, g)\n', (52820, 52847), False, 'import cosmoGW\n'), ((52986, 53031), 'matplotlib.pyplot.plot', 'plt.plot', (['f_nl', 'OmGW_nl'], {'color': 'col', 'alpha': 'alp'}), '(f_nl, OmGW_nl, color=col, alpha=alp)\n', (52994, 53031), True, 'import matplotlib.pyplot as plt\n'), ((54849, 54876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (54859, 54876), True, 'import matplotlib.pyplot as plt\n'), ((54888, 54899), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (54897, 54899), False, 'import os\n'), ((54904, 54918), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (54912, 54918), False, 'import os\n'), ((55003, 55030), 'interferometry.read_sens', 'inte.read_sens', ([], {'SNR': '(10)', 'T': '(4)'}), '(SNR=10, T=4)\n', (55017, 55030), True, 'import interferometry as inte\n'), ((55084, 55117), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""SKA"""'], {'b': '"""hc"""'}), "(dir, 'SKA', b='hc')\n", (55097, 55117), True, 'import interferometry as inte\n'), ((55154, 55190), 'cosmoGW.hc_OmGW', 'cosmoGW.hc_OmGW', (['f_SKA', 'hc_SKA'], {'d': '(-1)'}), '(f_SKA, hc_SKA, d=-1)\n', (55169, 55190), False, 'import cosmoGW\n'), ((55240, 55276), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""Gaia_PLS_SNR10"""'], {}), "(dir, 'Gaia_PLS_SNR10')\n", (55253, 55276), True, 'import interferometry as inte\n'), ((55450, 55487), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""Theia_PLS_SNR10"""'], {}), "(dir, 'Theia_PLS_SNR10')\n", (55463, 55487), True, 'import interferometry as inte\n'), ((55560, 55598), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""DECIGO_PLS_SNR10"""'], {}), "(dir, 'DECIGO_PLS_SNR10')\n", (55573, 55598), True, 'import interferometry as inte\n'), ((55811, 55838), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""AEDGE"""'], {}), "(dir, 'AEDGE')\n", (55824, 55838), True, 'import interferometry as inte\n'), ((55861, 55887), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""AION"""'], {}), "(dir, 'AION')\n", (55874, 55887), True, 'import interferometry as inte\n'), ((55953, 55993), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""binaries_MSPs_2038"""'], {}), "(dir, 'binaries_MSPs_2038')\n", (55966, 55993), True, 'import interferometry as inte\n'), ((56014, 56053), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""binaries_LLR_2038"""'], {}), "(dir, 'binaries_LLR_2038')\n", (56027, 56053), True, 'import interferometry as inte\n'), ((56074, 56113), 'interferometry.read_csv', 'inte.read_csv', (['dir', '"""binaries_SLR_2038"""'], {}), "(dir, 'binaries_SLR_2038')\n", (56087, 56113), True, 'import interferometry as inte\n'), ((56170, 56214), 'interferometry.read_sens', 'inte.read_sens', ([], {'interf': '"""muAres"""', 'SNR': '(10)', 'T': '(4)'}), "(interf='muAres', SNR=10, T=4)\n", (56184, 56214), True, 'import interferometry as inte\n'), ((56294, 56325), 'os.chdir', 'os.chdir', (['"""runs_nonhelical_ini"""'], {}), "('runs_nonhelical_ini')\n", (56302, 56325), False, 'import os\n'), ((56334, 56397), 'pta.read_PTA_data', 'pta.read_PTA_data', ([], {'beta_b': '(False)', 'Omega_b': '(False)', 'return_all': '(True)'}), '(beta_b=False, Omega_b=False, return_all=True)\n', (56351, 56397), False, 'import pta\n'), ((56479, 56502), 'numpy.linspace', 'np.linspace', (['(-2)', '(5)', '(100)'], {}), '(-2, 5, 100)\n', (56490, 56502), True, 'import numpy as np\n'), ((56544, 56611), 'pta.CP_delay', 'pta.CP_delay', (['betas', 'colors'], {'obs': '"""NANOGrav_singlePL_1s"""', 'plot': '(False)'}), "(betas, colors, obs='NANOGrav_singlePL_1s', plot=False)\n", (56556, 56611), False, 'import pta\n'), ((56690, 56757), 'pta.CP_delay', 'pta.CP_delay', (['betas', 'colors'], {'obs': '"""NANOGrav_brokenPL_1s"""', 'plot': '(False)'}), "(betas, colors, obs='NANOGrav_brokenPL_1s', plot=False)\n", (56702, 56757), False, 'import pta\n'), ((56835, 56848), 'os.chdir', 'os.chdir', (['CWD'], {}), '(CWD)\n', (56843, 56848), False, 'import os\n'), ((56867, 56918), 'matplotlib.pyplot.plot', 'plt.plot', (['fs', 'LISA_OmPLS'], {'color': '"""limegreen"""', 'lw': 'lww'}), "(fs, LISA_OmPLS, color='limegreen', lw=lww)\n", (56875, 56918), True, 'import matplotlib.pyplot as plt\n'), ((56923, 56980), 'matplotlib.pyplot.plot', 'plt.plot', (['fs', 'LISA_Om'], {'color': '"""limegreen"""', 'ls': '"""-."""', 'lw': 'lww'}), "(fs, LISA_Om, color='limegreen', ls='-.', lw=lww)\n", (56931, 56980), True, 'import matplotlib.pyplot as plt\n'), ((56985, 57031), 'matplotlib.pyplot.plot', 'plt.plot', (['f_SKA', 'Om_SKA'], {'color': '"""black"""', 'lw': 'lww'}), "(f_SKA, Om_SKA, color='black', lw=lww)\n", (56993, 57031), True, 'import matplotlib.pyplot as plt\n'), ((57036, 57083), 'matplotlib.pyplot.plot', 'plt.plot', (['f_Gaia', 'Om_Gaia'], {'color': '"""navy"""', 'lw': 'lww'}), "(f_Gaia, Om_Gaia, color='navy', lw=lww)\n", (57044, 57083), True, 'import matplotlib.pyplot as plt\n'), ((57088, 57137), 'matplotlib.pyplot.plot', 'plt.plot', (['f_Theia', 'Om_Theia'], {'color': '"""navy"""', 'lw': 'lww'}), "(f_Theia, Om_Theia, color='navy', lw=lww)\n", (57096, 57137), True, 'import matplotlib.pyplot as plt\n'), ((57142, 57196), 'matplotlib.pyplot.plot', 'plt.plot', (['f_DECIGO', 'Om_DECIGO'], {'color': '"""darkred"""', 'lw': 'lww'}), "(f_DECIGO, Om_DECIGO, color='darkred', lw=lww)\n", (57150, 57196), True, 'import matplotlib.pyplot as plt\n'), ((57201, 57250), 'matplotlib.pyplot.plot', 'plt.plot', (['f_AEDGE', 'Om_AEDGE'], {'color': '"""peru"""', 'lw': 'lww'}), "(f_AEDGE, Om_AEDGE, color='peru', lw=lww)\n", (57209, 57250), True, 'import matplotlib.pyplot as plt\n'), ((57255, 57302), 'matplotlib.pyplot.plot', 'plt.plot', (['f_AION', 'Om_AION'], {'color': '"""peru"""', 'lw': 'lww'}), "(f_AION, Om_AION, color='peru', lw=lww)\n", (57263, 57302), True, 'import matplotlib.pyplot as plt\n'), ((57307, 57358), 'matplotlib.pyplot.plot', 'plt.plot', (['f_MSP', 'Om_MSP'], {'color': '"""darkviolet"""', 'lw': 'lww'}), "(f_MSP, Om_MSP, color='darkviolet', lw=lww)\n", (57315, 57358), True, 'import matplotlib.pyplot as plt\n'), ((57363, 57414), 'matplotlib.pyplot.plot', 'plt.plot', (['f_LLR', 'Om_LLR'], {'color': '"""darkviolet"""', 'lw': 'lww'}), "(f_LLR, Om_LLR, color='darkviolet', lw=lww)\n", (57371, 57414), True, 'import matplotlib.pyplot as plt\n'), ((57419, 57470), 'matplotlib.pyplot.plot', 'plt.plot', (['fs_mu', 'muAres_OmPLS'], {'color': '"""cyan"""', 'lw': 'lww'}), "(fs_mu, muAres_OmPLS, color='cyan', lw=lww)\n", (57427, 57470), True, 'import matplotlib.pyplot as plt\n'), ((57546, 57588), 'pta.get_min_max', 'pta.get_min_max', (['fNG', 'OmGW_NG_a', 'OmGW_NG_b'], {}), '(fNG, OmGW_NG_a, OmGW_NG_b)\n', (57561, 57588), False, 'import pta\n'), ((57593, 57653), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['fNG', 'minOm', 'maxOm'], {'color': '"""blue"""', 'alpha': '(0.3)'}), "(fNG, minOm, maxOm, color='blue', alpha=0.3)\n", (57609, 57653), True, 'import matplotlib.pyplot as plt\n'), ((57657, 57742), 'matplotlib.pyplot.text', 'plt.text', (['(6e-09)', '(1.3e-05)', '"""NANOGrav 12.5yr"""'], {'color': '"""blue"""', 'fontsize': '(14)', 'alpha': '(0.7)'}), "(6e-09, 1.3e-05, 'NANOGrav 12.5yr', color='blue', fontsize=14,\n alpha=0.7)\n", (57665, 57742), True, 'import matplotlib.pyplot as plt\n'), ((57753, 57827), 'matplotlib.pyplot.text', 'plt.text', (['(0.0002)', '(2e-13)', '"""LISA"""'], {'color': '"""limegreen"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(0.0002, 2e-13, 'LISA', color='limegreen', fontsize=16, alpha=0.7)\n", (57761, 57827), True, 'import matplotlib.pyplot as plt\n'), ((57842, 57910), 'matplotlib.pyplot.text', 'plt.text', (['(6e-08)', '(2e-10)', '"""SKA"""'], {'color': '"""black"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(6e-08, 2e-10, 'SKA', color='black', fontsize=16, alpha=0.7)\n", (57850, 57910), True, 'import matplotlib.pyplot as plt\n'), ((57926, 57995), 'matplotlib.pyplot.text', 'plt.text', (['(2e-07)', '(5e-10)', '"""Theia"""'], {'color': '"""navy"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(2e-07, 5e-10, 'Theia', color='navy', fontsize=16, alpha=0.7)\n", (57934, 57995), True, 'import matplotlib.pyplot as plt\n'), ((58011, 58081), 'matplotlib.pyplot.text', 'plt.text', (['(1.8e-09)', '(4e-09)', '"""Gaia"""'], {'color': '"""navy"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(1.8e-09, 4e-09, 'Gaia', color='navy', fontsize=16, alpha=0.7)\n", (58019, 58081), True, 'import matplotlib.pyplot as plt\n'), ((58096, 58168), 'matplotlib.pyplot.text', 'plt.text', (['(0.03)', '(3e-17)', '"""DECIGO"""'], {'color': '"""darkred"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(0.03, 3e-17, 'DECIGO', color='darkred', fontsize=16, alpha=0.7)\n", (58104, 58168), True, 'import matplotlib.pyplot as plt\n'), ((58185, 58253), 'matplotlib.pyplot.text', 'plt.text', (['(0.03)', '(7e-15)', '"""AEDGE"""'], {'color': '"""peru"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(0.03, 7e-15, 'AEDGE', color='peru', fontsize=16, alpha=0.7)\n", (58193, 58253), True, 'import matplotlib.pyplot as plt\n'), ((58270, 58338), 'matplotlib.pyplot.text', 'plt.text', (['(0.028)', '(7e-12)', '"""AION"""'], {'color': '"""peru"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(0.028, 7e-12, 'AION', color='peru', fontsize=16, alpha=0.7)\n", (58278, 58338), True, 'import matplotlib.pyplot as plt\n'), ((58356, 58430), 'matplotlib.pyplot.text', 'plt.text', (['(2e-05)', '(5e-06)', '"""MSPs"""'], {'color': '"""darkviolet"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(2e-05, 5e-06, 'MSPs', color='darkviolet', fontsize=16, alpha=0.7)\n", (58364, 58430), True, 'import matplotlib.pyplot as plt\n'), ((58445, 58520), 'matplotlib.pyplot.text', 'plt.text', (['(1.7e-06)', '(1e-07)', '"""LLR"""'], {'color': '"""darkviolet"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(1.7e-06, 1e-07, 'LLR', color='darkviolet', fontsize=16, alpha=0.7)\n", (58453, 58520), True, 'import matplotlib.pyplot as plt\n'), ((58535, 58599), 'matplotlib.pyplot.text', 'plt.text', (['(0.0002)', '(3e-17)', '"""$\\\\mu$Ares"""'], {'color': '"""cyan"""', 'fontsize': '(16)'}), "(0.0002, 3e-17, '$\\\\mu$Ares', color='cyan', fontsize=16)\n", (58543, 58599), True, 'import matplotlib.pyplot as plt\n'), ((60079, 60149), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['fE1_l', 'OmGWE1_nl', 'OmGWE2_nl'], {'color': 'col_E', 'alpha': '(0.05)'}), '(fE1_l, OmGWE1_nl, OmGWE2_nl, color=col_E, alpha=0.05)\n', (60095, 60149), True, 'import matplotlib.pyplot as plt\n'), ((60175, 60197), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (60195, 60197), False, 'import plot_sets\n'), ((60202, 60219), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (60212, 60219), True, 'import matplotlib.pyplot as plt\n'), ((60224, 60241), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (60234, 60241), True, 'import matplotlib.pyplot as plt\n'), ((60246, 60268), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ [Hz]"""'], {}), "('$f$ [Hz]')\n", (60256, 60268), True, 'import matplotlib.pyplot as plt\n'), ((60273, 60316), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$h_0^2 \\\\Omega_{\\\\rm GW} (f)$"""'], {}), "('$h_0^2 \\\\Omega_{\\\\rm GW} (f)$')\n", (60283, 60316), True, 'import matplotlib.pyplot as plt\n'), ((60359, 60379), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1e-09)', '(1.0)'], {}), '(1e-09, 1.0)\n', (60367, 60379), True, 'import matplotlib.pyplot as plt\n'), ((60424, 60447), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1e-18)', '(0.0001)'], {}), '(1e-18, 0.0001)\n', (60432, 60447), True, 'import matplotlib.pyplot as plt\n'), ((60451, 60494), 'matplotlib.pyplot.text', 'plt.text', (['(6e-05)', '(1e-14)', '"""A2\'"""'], {'color': '"""blue"""'}), '(6e-05, 1e-14, "A2\'", color=\'blue\')\n', (60459, 60494), True, 'import matplotlib.pyplot as plt\n'), ((60498, 60548), 'matplotlib.pyplot.text', 'plt.text', (['(1.5e-07)', '(2e-17)', '"""B2\'"""'], {'color': '"""darkgreen"""'}), '(1.5e-07, 2e-17, "B2\'", color=\'darkgreen\')\n', (60506, 60548), True, 'import matplotlib.pyplot as plt\n'), ((60552, 60597), 'matplotlib.pyplot.text', 'plt.text', (['(3e-06)', '(1e-16)', '"""C2\'"""'], {'color': '"""orange"""'}), '(3e-06, 1e-16, "C2\'", color=\'orange\')\n', (60560, 60597), True, 'import matplotlib.pyplot as plt\n'), ((60601, 60643), 'matplotlib.pyplot.text', 'plt.text', (['(1e-07)', '(8e-14)', '"""D2\'"""'], {'color': '"""red"""'}), '(1e-07, 8e-14, "D2\'", color=\'red\')\n', (60609, 60643), True, 'import matplotlib.pyplot as plt\n'), ((60647, 60693), 'matplotlib.pyplot.text', 'plt.text', (['(0.0025)', '(1e-09)', '"""E2\'"""'], {'color': '"""purple"""'}), '(0.0025, 1e-09, "E2\'", color=\'purple\')\n', (60655, 60693), True, 'import matplotlib.pyplot as plt\n'), ((60697, 60745), 'matplotlib.pyplot.text', 'plt.text', (['(0.0006)', '(1.5e-12)', '"""E1\'"""'], {'color': '"""purple"""'}), '(0.0006, 1.5e-12, "E1\'", color=\'purple\')\n', (60705, 60745), True, 'import matplotlib.pyplot as plt\n'), ((61667, 61694), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (61677, 61694), True, 'import matplotlib.pyplot as plt\n'), ((61706, 61717), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (61715, 61717), False, 'import os\n'), ((61722, 61736), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (61730, 61736), False, 'import os\n'), ((61865, 61916), 'interferometry.read_sens', 'inte.read_sens', ([], {'SNR': '(10)', 'T': '(4)', 'interf': '"""LISA"""', 'Xi': '(True)'}), "(SNR=10, T=4, interf='LISA', Xi=True)\n", (61879, 61916), True, 'import interferometry as inte\n'), ((61978, 62020), 'interferometry.read_sens', 'inte.read_sens', ([], {'SNR': '(10)', 'T': '(4)', 'interf': '"""comb"""'}), "(SNR=10, T=4, interf='comb')\n", (61992, 62020), True, 'import interferometry as inte\n'), ((62069, 62082), 'os.chdir', 'os.chdir', (['CWD'], {}), '(CWD)\n', (62077, 62082), False, 'import os\n'), ((62101, 62152), 'matplotlib.pyplot.plot', 'plt.plot', (['fs', 'LISA_XiPLS'], {'color': '"""limegreen"""', 'lw': 'lww'}), "(fs, LISA_XiPLS, color='limegreen', lw=lww)\n", (62109, 62152), True, 'import matplotlib.pyplot as plt\n'), ((62157, 62214), 'matplotlib.pyplot.plot', 'plt.plot', (['fs', 'LISA_Xi'], {'color': '"""limegreen"""', 'ls': '"""-."""', 'lw': 'lww'}), "(fs, LISA_Xi, color='limegreen', ls='-.', lw=lww)\n", (62165, 62214), True, 'import matplotlib.pyplot as plt\n'), ((62219, 62279), 'matplotlib.pyplot.plot', 'plt.plot', (['fs_comb', 'LISA_Taiji_XiPLS'], {'color': '"""darkred"""', 'lw': 'lww'}), "(fs_comb, LISA_Taiji_XiPLS, color='darkred', lw=lww)\n", (62227, 62279), True, 'import matplotlib.pyplot as plt\n'), ((62284, 62358), 'matplotlib.pyplot.text', 'plt.text', (['(0.0007)', '(1e-09)', '"""LISA"""'], {'color': '"""limegreen"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(0.0007, 1e-09, 'LISA', color='limegreen', fontsize=16, alpha=0.7)\n", (62292, 62358), True, 'import matplotlib.pyplot as plt\n'), ((62372, 62450), 'matplotlib.pyplot.text', 'plt.text', (['(1e-05)', '(1e-09)', '"""LISA--Taiji"""'], {'color': '"""darkred"""', 'fontsize': '(16)', 'alpha': '(0.7)'}), "(1e-05, 1e-09, 'LISA--Taiji', color='darkred', fontsize=16, alpha=0.7)\n", (62380, 62450), True, 'import matplotlib.pyplot as plt\n'), ((64120, 64190), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['fE1_l', 'OmGWE1_nl', 'OmGWE2_nl'], {'color': 'col_E', 'alpha': '(0.05)'}), '(fE1_l, OmGWE1_nl, OmGWE2_nl, color=col_E, alpha=0.05)\n', (64136, 64190), True, 'import matplotlib.pyplot as plt\n'), ((64216, 64238), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (64236, 64238), False, 'import plot_sets\n'), ((64243, 64260), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (64253, 64260), True, 'import matplotlib.pyplot as plt\n'), ((64265, 64282), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (64275, 64282), True, 'import matplotlib.pyplot as plt\n'), ((64287, 64309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ [Hz]"""'], {}), "('$f$ [Hz]')\n", (64297, 64309), True, 'import matplotlib.pyplot as plt\n'), ((64314, 64356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$h_0^2\\\\,\\\\Xi_{\\\\rm GW} (f)$"""'], {}), "('$h_0^2\\\\,\\\\Xi_{\\\\rm GW} (f)$')\n", (64324, 64356), True, 'import matplotlib.pyplot as plt\n'), ((64398, 64418), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1e-09)', '(1.0)'], {}), '(1e-09, 1.0)\n', (64406, 64418), True, 'import matplotlib.pyplot as plt\n'), ((64463, 64485), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1e-14)', '(1e-06)'], {}), '(1e-14, 1e-06)\n', (64471, 64485), True, 'import matplotlib.pyplot as plt\n'), ((64490, 64533), 'matplotlib.pyplot.text', 'plt.text', (['(3e-05)', '(1e-11)', '"""A2\'"""'], {'color': '"""blue"""'}), '(3e-05, 1e-11, "A2\'", color=\'blue\')\n', (64498, 64533), True, 'import matplotlib.pyplot as plt\n'), ((64537, 64585), 'matplotlib.pyplot.text', 'plt.text', (['(8e-09)', '(2e-13)', '"""B2\'"""'], {'color': '"""darkgreen"""'}), '(8e-09, 2e-13, "B2\'", color=\'darkgreen\')\n', (64545, 64585), True, 'import matplotlib.pyplot as plt\n'), ((64589, 64634), 'matplotlib.pyplot.text', 'plt.text', (['(5e-07)', '(1e-10)', '"""C2\'"""'], {'color': '"""orange"""'}), '(5e-07, 1e-10, "C2\'", color=\'orange\')\n', (64597, 64634), True, 'import matplotlib.pyplot as plt\n'), ((64638, 64682), 'matplotlib.pyplot.text', 'plt.text', (['(4.5e-08)', '(1e-09)', '"""D2\'"""'], {'color': '"""red"""'}), '(4.5e-08, 1e-09, "D2\'", color=\'red\')\n', (64646, 64682), True, 'import matplotlib.pyplot as plt\n'), ((64685, 64729), 'matplotlib.pyplot.text', 'plt.text', (['(0.15)', '(1e-08)', '"""E2\'"""'], {'color': '"""purple"""'}), '(0.15, 1e-08, "E2\'", color=\'purple\')\n', (64693, 64729), True, 'import matplotlib.pyplot as plt\n'), ((64735, 64781), 'matplotlib.pyplot.text', 'plt.text', (['(0.0005)', '(1e-13)', '"""E1\'"""'], {'color': '"""purple"""'}), '(0.0005, 1e-13, "E1\'", color=\'purple\')\n', (64743, 64781), True, 'import matplotlib.pyplot as plt\n'), ((2286, 2308), 'cosmoGW.H0_val', 'cosmoGW.H0_val', ([], {'h0': '(1.0)'}), '(h0=1.0)\n', (2300, 2308), False, 'import cosmoGW\n'), ((2321, 2349), 'cosmoGW.Hs_val', 'cosmoGW.Hs_val', (['g', '(T * u.GeV)'], {}), '(g, T * u.GeV)\n', (2335, 2349), False, 'import cosmoGW\n'), ((2386, 2417), 'cosmoGW.as_a0_rat', 'cosmoGW.as_a0_rat', (['g', '(T * u.GeV)'], {}), '(g, T * u.GeV)\n', (2403, 2417), False, 'import cosmoGW\n'), ((6683, 6696), 'numpy.array', 'np.array', (['B0s'], {}), '(B0s)\n', (6691, 6696), True, 'import numpy as np\n'), ((6716, 6732), 'numpy.array', 'np.array', (['kstars'], {}), '(kstars)\n', (6724, 6732), True, 'import numpy as np\n'), ((6751, 6766), 'numpy.array', 'np.array', (['EM_ar'], {}), '(EM_ar)\n', (6759, 6766), True, 'import numpy as np\n'), ((6786, 6802), 'numpy.array', 'np.array', (['EGW_ar'], {}), '(EGW_ar)\n', (6794, 6802), True, 'import numpy as np\n'), ((6823, 6840), 'numpy.array', 'np.array', (['DEGW_ar'], {}), '(DEGW_ar)\n', (6831, 6840), True, 'import numpy as np\n'), ((6865, 6886), 'numpy.array', 'np.array', (['rat_DEGW_ar'], {}), '(rat_DEGW_ar)\n', (6873, 6886), True, 'import numpy as np\n'), ((6905, 6920), 'numpy.array', 'np.array', (['hr_ar'], {}), '(hr_ar)\n', (6913, 6920), True, 'import numpy as np\n'), ((6940, 6956), 'numpy.array', 'np.array', (['Dhr_ar'], {}), '(Dhr_ar)\n', (6948, 6956), True, 'import numpy as np\n'), ((6980, 7000), 'numpy.array', 'np.array', (['rat_Dhr_ar'], {}), '(rat_Dhr_ar)\n', (6988, 7000), True, 'import numpy as np\n'), ((7020, 7036), 'numpy.array', 'np.array', (['pol_ar'], {}), '(pol_ar)\n', (7028, 7036), True, 'import numpy as np\n'), ((7057, 7074), 'numpy.array', 'np.array', (['Dpol_ar'], {}), '(Dpol_ar)\n', (7065, 7074), True, 'import numpy as np\n'), ((7099, 7120), 'numpy.array', 'np.array', (['rat_Dpol_ar'], {}), '(rat_Dpol_ar)\n', (7107, 7120), True, 'import numpy as np\n'), ((7503, 7536), 'numpy.array', 'np.array', (["df['EM']"], {'dtype': '"""float"""'}), "(df['EM'], dtype='float')\n", (7511, 7536), True, 'import numpy as np\n'), ((7551, 7585), 'numpy.array', 'np.array', (["df['EGW']"], {'dtype': '"""float"""'}), "(df['EGW'], dtype='float')\n", (7559, 7585), True, 'import numpy as np\n'), ((7601, 7639), 'numpy.array', 'np.array', (["df['Del EGW']"], {'dtype': '"""float"""'}), "(df['Del EGW'], dtype='float')\n", (7609, 7639), True, 'import numpy as np\n'), ((7656, 7700), 'numpy.array', 'np.array', (["df['ratio Del EGW']"], {'dtype': '"""float"""'}), "(df['ratio Del EGW'], dtype='float')\n", (7664, 7700), True, 'import numpy as np\n'), ((7715, 7749), 'numpy.array', 'np.array', (["df['pol']"], {'dtype': '"""float"""'}), "(df['pol'], dtype='float')\n", (7723, 7749), True, 'import numpy as np\n'), ((7765, 7803), 'numpy.array', 'np.array', (["df['Del pol']"], {'dtype': '"""float"""'}), "(df['Del pol'], dtype='float')\n", (7773, 7803), True, 'import numpy as np\n'), ((7820, 7864), 'numpy.array', 'np.array', (["df['ratio Del pol']"], {'dtype': '"""float"""'}), "(df['ratio Del pol'], dtype='float')\n", (7828, 7864), True, 'import numpy as np\n'), ((20029, 20101), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'EGW_k_' + A + dff + '.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'EGW_k_' + A + dff + '.pdf', bbox_inches='tight')\n", (20040, 20101), True, 'import matplotlib.pyplot as plt\n'), ((21551, 21571), 'numpy.where', 'np.where', (['(EGW_l != 0)'], {}), '(EGW_l != 0)\n', (21559, 21571), True, 'import numpy as np\n'), ((21580, 21709), 'matplotlib.pyplot.plot', 'plt.plot', (['k[good]', '(XiGW_l[good] / EGW_l[good])'], {'color': 'col', 'alpha': '(0.1 + j * 0.3)', 'label': "('${\\\\cal E}_{\\\\rm EM} = %.2f$' % EEM[j])"}), "(k[good], XiGW_l[good] / EGW_l[good], color=col, alpha=0.1 + j * \n 0.3, label='${\\\\cal E}_{\\\\rm EM} = %.2f$' % EEM[j])\n", (21588, 21709), True, 'import matplotlib.pyplot as plt\n'), ((22240, 22261), 'numpy.where', 'np.where', (['(EGW_nl != 0)'], {}), '(EGW_nl != 0)\n', (22248, 22261), True, 'import numpy as np\n'), ((22270, 22359), 'matplotlib.pyplot.plot', 'plt.plot', (['k[good]', '(XiGW_nl[good] / EGW_nl[good])', '"""--"""'], {'color': 'col', 'alpha': '(0.1 + j * 0.3)'}), "(k[good], XiGW_nl[good] / EGW_nl[good], '--', color=col, alpha=0.1 +\n j * 0.3)\n", (22278, 22359), True, 'import matplotlib.pyplot as plt\n'), ((22803, 22821), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.2)', '(100)'], {}), '(0.2, 100)\n', (22811, 22821), True, 'import matplotlib.pyplot as plt\n'), ((22983, 23049), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'PGW_k_' + A + '.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'PGW_k_' + A + '.pdf', bbox_inches='tight')\n", (22994, 23049), True, 'import matplotlib.pyplot as plt\n'), ((24780, 24806), 'cosmoGW.ks_infla', 'cosmoGW.ks_infla', (['bet', 'gam'], {}), '(bet, gam)\n', (24796, 24806), False, 'import cosmoGW\n'), ((25368, 25398), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(12, 8)'}), '(1, figsize=(12, 8))\n', (25378, 25398), True, 'import matplotlib.pyplot as plt\n'), ((25407, 25437), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(12, 8)'}), '(2, figsize=(12, 8))\n', (25417, 25437), True, 'import matplotlib.pyplot as plt\n'), ((25446, 25476), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {'figsize': '(12, 8)'}), '(3, figsize=(12, 8))\n', (25456, 25476), True, 'import matplotlib.pyplot as plt\n'), ((27876, 27889), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (27886, 27889), True, 'import matplotlib.pyplot as plt\n'), ((28463, 28519), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(20)', 'frameon': '(False)'}), "(loc='upper left', fontsize=20, frameon=False)\n", (28473, 28519), True, 'import matplotlib.pyplot as plt\n'), ((28528, 28617), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(5e-05)', '"""${\\\\cal E}_{\\\\rm GW} = (q {\\\\cal E}_{\\\\rm EM})^2$"""'], {'fontsize': '(26)'}), "(1, 5e-05, '${\\\\cal E}_{\\\\rm GW} = (q {\\\\cal E}_{\\\\rm EM})^2$',\n fontsize=26)\n", (28536, 28617), True, 'import matplotlib.pyplot as plt\n'), ((28635, 28657), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (28655, 28657), False, 'import plot_sets\n'), ((28666, 28701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""${\\\\cal E}_{\\\\rm EM}$"""'], {}), "('${\\\\cal E}_{\\\\rm EM}$')\n", (28676, 28701), True, 'import matplotlib.pyplot as plt\n'), ((28709, 28744), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\cal E}_{\\\\rm GW}$"""'], {}), "('${\\\\cal E}_{\\\\rm GW}$')\n", (28719, 28744), True, 'import matplotlib.pyplot as plt\n'), ((28752, 28769), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (28762, 28769), True, 'import matplotlib.pyplot as plt\n'), ((28778, 28795), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (28788, 28795), True, 'import matplotlib.pyplot as plt\n'), ((28804, 28822), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.01)', '(20)'], {}), '(0.01, 20)\n', (28812, 28822), True, 'import matplotlib.pyplot as plt\n'), ((28873, 28895), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1e-06)', '(100.0)'], {}), '(1e-06, 100.0)\n', (28881, 28895), True, 'import matplotlib.pyplot as plt\n'), ((28901, 28988), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xx2', '(xx2 ** 0 * 1e-07)', '(xx2 ** 0 * 1000.0)'], {'color': '"""gray"""', 'alpha': '(0.1)'}), "(xx2, xx2 ** 0 * 1e-07, xx2 ** 0 * 1000.0, color='gray',\n alpha=0.1)\n", (28917, 28988), True, 'import matplotlib.pyplot as plt\n'), ((28995, 29049), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/EEGW_EEM.pdf"""'], {'bbox_inches': '"""tight"""'}), "('plots/EEGW_EEM.pdf', bbox_inches='tight')\n", (29006, 29049), True, 'import matplotlib.pyplot as plt\n'), ((29175, 29188), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (29185, 29188), True, 'import matplotlib.pyplot as plt\n'), ((29711, 29767), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(20)', 'frameon': '(False)'}), "(loc='upper left', fontsize=20, frameon=False)\n", (29721, 29767), True, 'import matplotlib.pyplot as plt\n'), ((29776, 29873), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(1e-07)', '"""$\\\\Delta {\\\\cal E}_{\\\\rm GW} = (p {\\\\cal E}_{\\\\rm EM})^3$"""'], {'fontsize': '(26)'}), "(1, 1e-07,\n '$\\\\Delta {\\\\cal E}_{\\\\rm GW} = (p {\\\\cal E}_{\\\\rm EM})^3$', fontsize=26)\n", (29784, 29873), True, 'import matplotlib.pyplot as plt\n'), ((29907, 29929), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (29927, 29929), False, 'import plot_sets\n'), ((29938, 29973), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""${\\\\cal E}_{\\\\rm EM}$"""'], {}), "('${\\\\cal E}_{\\\\rm EM}$')\n", (29948, 29973), True, 'import matplotlib.pyplot as plt\n'), ((29981, 30024), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta {\\\\cal E}_{\\\\rm GW}$"""'], {}), "('$\\\\Delta {\\\\cal E}_{\\\\rm GW}$')\n", (29991, 30024), True, 'import matplotlib.pyplot as plt\n'), ((30031, 30048), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (30041, 30048), True, 'import matplotlib.pyplot as plt\n'), ((30057, 30074), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (30067, 30074), True, 'import matplotlib.pyplot as plt\n'), ((30083, 30101), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.01)', '(20)'], {}), '(0.01, 20)\n', (30091, 30101), True, 'import matplotlib.pyplot as plt\n'), ((30154, 30176), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(3e-11)', '(100.0)'], {}), '(3e-11, 100.0)\n', (30162, 30176), True, 'import matplotlib.pyplot as plt\n'), ((30183, 30270), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xx2', '(xx2 ** 0 * 1e-12)', '(xx2 ** 0 * 1000.0)'], {'color': '"""gray"""', 'alpha': '(0.1)'}), "(xx2, xx2 ** 0 * 1e-12, xx2 ** 0 * 1000.0, color='gray',\n alpha=0.1)\n", (30199, 30270), True, 'import matplotlib.pyplot as plt\n'), ((30278, 30333), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/DEEGW_EEM.pdf"""'], {'bbox_inches': '"""tight"""'}), "('plots/DEEGW_EEM.pdf', bbox_inches='tight')\n", (30289, 30333), True, 'import matplotlib.pyplot as plt\n'), ((30464, 30477), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (30474, 30477), True, 'import matplotlib.pyplot as plt\n'), ((31046, 31102), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(20)', 'frameon': '(False)'}), "(loc='upper left', fontsize=20, frameon=False)\n", (31056, 31102), True, 'import matplotlib.pyplot as plt\n'), ((31111, 31240), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(1e-05)', "('$|\\\\Delta {\\\\cal P}_{\\\\rm GW}| = $' +\n ' $ r |{\\\\cal P}_{\\\\rm GW}| \\\\, {\\\\cal E}_{\\\\rm EM}$')"], {'fontsize': '(26)'}), "(1, 1e-05, '$|\\\\Delta {\\\\cal P}_{\\\\rm GW}| = $' +\n ' $ r |{\\\\cal P}_{\\\\rm GW}| \\\\, {\\\\cal E}_{\\\\rm EM}$', fontsize=26)\n", (31119, 31240), True, 'import matplotlib.pyplot as plt\n'), ((31274, 31296), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (31294, 31296), False, 'import plot_sets\n'), ((31305, 31340), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""${\\\\cal E}_{\\\\rm EM}$"""'], {}), "('${\\\\cal E}_{\\\\rm EM}$')\n", (31315, 31340), True, 'import matplotlib.pyplot as plt\n'), ((31348, 31413), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|\\\\Delta {\\\\cal P}_{\\\\rm GW}/{\\\\cal P}_{\\\\rm GW}|$"""'], {}), "('$|\\\\Delta {\\\\cal P}_{\\\\rm GW}/{\\\\cal P}_{\\\\rm GW}|$')\n", (31358, 31413), True, 'import matplotlib.pyplot as plt\n'), ((31418, 31435), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (31428, 31435), True, 'import matplotlib.pyplot as plt\n'), ((31444, 31461), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (31454, 31461), True, 'import matplotlib.pyplot as plt\n'), ((31470, 31488), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.01)', '(20)'], {}), '(0.01, 20)\n', (31478, 31488), True, 'import matplotlib.pyplot as plt\n'), ((31539, 31560), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(3e-07)', '(10.0)'], {}), '(3e-07, 10.0)\n', (31547, 31560), True, 'import matplotlib.pyplot as plt\n'), ((31567, 31652), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xx2', '(xx2 ** 0 * 1e-07)', '(xx2 ** 0 * 10.0)'], {'color': '"""gray"""', 'alpha': '(0.1)'}), "(xx2, xx2 ** 0 * 1e-07, xx2 ** 0 * 10.0, color='gray',\n alpha=0.1)\n", (31583, 31652), True, 'import matplotlib.pyplot as plt\n'), ((31970, 31990), 'numpy.array', 'np.array', (["df2['run']"], {}), "(df2['run'])\n", (31978, 31990), True, 'import numpy as np\n'), ((32003, 32021), 'numpy.array', 'np.array', (["df2['q']"], {}), "(df2['q'])\n", (32011, 32021), True, 'import numpy as np\n'), ((32035, 32054), 'numpy.array', 'np.array', (["df2['qt']"], {}), "(df2['qt'])\n", (32043, 32054), True, 'import numpy as np\n'), ((32067, 32085), 'numpy.array', 'np.array', (["df2['p']"], {}), "(df2['p'])\n", (32075, 32085), True, 'import numpy as np\n'), ((32099, 32118), 'numpy.array', 'np.array', (["df2['pt']"], {}), "(df2['pt'])\n", (32107, 32118), True, 'import numpy as np\n'), ((32131, 32149), 'numpy.array', 'np.array', (["df2['r']"], {}), "(df2['r'])\n", (32139, 32149), True, 'import numpy as np\n'), ((32163, 32182), 'numpy.array', 'np.array', (["df2['rt']"], {}), "(df2['rt'])\n", (32171, 32182), True, 'import numpy as np\n'), ((32199, 32214), 'numpy.argsort', 'np.argsort', (['nms'], {}), '(nms)\n', (32209, 32214), True, 'import numpy as np\n'), ((34123, 34166), 'run.interpolate_ts', 'r.interpolate_ts', (['t_nl2', 't_l', 'EGW_nl', 'EGW_l'], {}), '(t_nl2, t_l, EGW_nl, EGW_l)\n', (34139, 34166), True, 'import run as r\n'), ((34374, 34387), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (34384, 34387), True, 'import matplotlib.pyplot as plt\n'), ((34490, 34579), 'spectra.plot_neg_pos', 'spectra.plot_neg_pos', (['t_lnl', 'diff_EGW_nl'], {'ls1': '"""solid"""', 'lw1': '(1)', 'ls2': '""":"""', 'lw2': '(2)', 'col': 'col'}), "(t_lnl, diff_EGW_nl, ls1='solid', lw1=1, ls2=':', lw2=2,\n col=col)\n", (34510, 34579), False, 'import spectra\n'), ((34613, 34665), 'matplotlib.pyplot.hlines', 'plt.hlines', (['diffEGW_stat', '(-2)', '(20)'], {'color': 'col', 'ls': '"""-."""'}), "(diffEGW_stat, -2, 20, color=col, ls='-.')\n", (34623, 34665), True, 'import matplotlib.pyplot as plt\n'), ((34718, 34778), 'matplotlib.pyplot.plot', 'plt.plot', (['t_lnl[indt_1]', 'diff_EGW_nl[indt_1]', '"""o"""'], {'color': 'col'}), "(t_lnl[indt_1], diff_EGW_nl[indt_1], 'o', color=col)\n", (34726, 34778), True, 'import matplotlib.pyplot as plt\n'), ((34787, 34853), 'matplotlib.pyplot.hlines', 'plt.hlines', (['diff_EGW_nl[indt_1]', '(-2)', '(2)'], {'ls': '"""-."""', 'color': 'col', 'lw': '(0.7)'}), "(diff_EGW_nl[indt_1], -2, 2, ls='-.', color=col, lw=0.7)\n", (34797, 34853), True, 'import matplotlib.pyplot as plt\n'), ((36798, 36811), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (36808, 36811), True, 'import matplotlib.pyplot as plt\n'), ((36820, 36853), 'matplotlib.pyplot.plot', 'plt.plot', (['t_nl', 'EGW_nl'], {'color': 'col'}), '(t_nl, EGW_nl, color=col)\n', (36828, 36853), True, 'import matplotlib.pyplot as plt\n'), ((36862, 36910), 'matplotlib.pyplot.hlines', 'plt.hlines', (['EGW_stat', '(-2)', '(20)'], {'color': 'col', 'ls': '"""-."""'}), "(EGW_stat, -2, 20, color=col, ls='-.')\n", (36872, 36910), True, 'import matplotlib.pyplot as plt\n'), ((37030, 37084), 'matplotlib.pyplot.plot', 'plt.plot', (['t_nl[indt_1]', 'EGW_nl[indt_1]', '"""o"""'], {'color': 'col'}), "(t_nl[indt_1], EGW_nl[indt_1], 'o', color=col)\n", (37038, 37084), True, 'import matplotlib.pyplot as plt\n'), ((37110, 37171), 'matplotlib.pyplot.hlines', 'plt.hlines', (['EGW_nl[indt_1]', '(-2)', '(2)'], {'color': 'col', 'ls': '"""-."""', 'lw': '(0.7)'}), "(EGW_nl[indt_1], -2, 2, color=col, ls='-.', lw=0.7)\n", (37120, 37171), True, 'import matplotlib.pyplot as plt\n'), ((39483, 39514), 'matplotlib.pyplot.figure', 'plt.figure', (['jf'], {'figsize': '(12, 8)'}), '(jf, figsize=(12, 8))\n', (39493, 39514), True, 'import matplotlib.pyplot as plt\n'), ((39551, 39582), 'matplotlib.pyplot.figure', 'plt.figure', (['jf'], {'figsize': '(12, 8)'}), '(jf, figsize=(12, 8))\n', (39561, 39582), True, 'import matplotlib.pyplot as plt\n'), ((40587, 40630), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta {\\\\cal E}_{\\\\rm GW}$"""'], {}), "('$\\\\Delta {\\\\cal E}_{\\\\rm GW}$')\n", (40597, 40630), True, 'import matplotlib.pyplot as plt\n'), ((40639, 40686), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\cal E}_{\\\\rm GW}^{\\\\rm nlin}$"""'], {}), "('${\\\\cal E}_{\\\\rm GW}^{\\\\rm nlin}$')\n", (40649, 40686), True, 'import matplotlib.pyplot as plt\n'), ((40741, 40763), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(11)'], {}), '(0, 10, 11)\n', (40752, 40763), True, 'import numpy as np\n'), ((40801, 40859), 'matplotlib.pyplot.text', 'plt.text', (['(5.25)', '(0.000125)', '"""A2\'"""'], {'color': '"""blue"""', 'fontsize': 'fs'}), '(5.25, 0.000125, "A2\'", color=\'blue\', fontsize=fs)\n', (40809, 40859), True, 'import matplotlib.pyplot as plt\n'), ((40867, 40923), 'matplotlib.pyplot.text', 'plt.text', (['(5.25)', '(0.001)', '"""B2\'"""'], {'color': '"""green"""', 'fontsize': 'fs'}), '(5.25, 0.001, "B2\'", color=\'green\', fontsize=fs)\n', (40875, 40923), True, 'import matplotlib.pyplot as plt\n'), ((40931, 40988), 'matplotlib.pyplot.text', 'plt.text', (['(5.25)', '(7e-05)', '"""C2\'"""'], {'color': '"""orange"""', 'fontsize': 'fs'}), '(5.25, 7e-05, "C2\'", color=\'orange\', fontsize=fs)\n', (40939, 40988), True, 'import matplotlib.pyplot as plt\n'), ((40996, 41051), 'matplotlib.pyplot.text', 'plt.text', (['(5.25)', '(0.0004)', '"""D2\'"""'], {'color': '"""red"""', 'fontsize': 'fs'}), '(5.25, 0.0004, "D2\'", color=\'red\', fontsize=fs)\n', (41004, 41051), True, 'import matplotlib.pyplot as plt\n'), ((41058, 41116), 'matplotlib.pyplot.text', 'plt.text', (['(5.25)', '(0.0028)', '"""E2\'"""'], {'color': '"""purple"""', 'fontsize': 'fs'}), '(5.25, 0.0028, "E2\'", color=\'purple\', fontsize=fs)\n', (41066, 41116), True, 'import matplotlib.pyplot as plt\n'), ((42602, 42659), 'matplotlib.pyplot.text', 'plt.text', (['(5.75)', '(3.5e-07)', '"""A2\'"""'], {'color': '"""blue"""', 'fontsize': 'fs'}), '(5.75, 3.5e-07, "A2\'", color=\'blue\', fontsize=fs)\n', (42610, 42659), True, 'import matplotlib.pyplot as plt\n'), ((42667, 42722), 'matplotlib.pyplot.text', 'plt.text', (['(5.1)', '(9e-06)', '"""B2\'"""'], {'color': '"""green"""', 'fontsize': 'fs'}), '(5.1, 9e-06, "B2\'", color=\'green\', fontsize=fs)\n', (42675, 42722), True, 'import matplotlib.pyplot as plt\n'), ((42730, 42787), 'matplotlib.pyplot.text', 'plt.text', (['(5.25)', '(6e-09)', '"""C2\'"""'], {'color': '"""orange"""', 'fontsize': 'fs'}), '(5.25, 6e-09, "C2\'", color=\'orange\', fontsize=fs)\n', (42738, 42787), True, 'import matplotlib.pyplot as plt\n'), ((42795, 42850), 'matplotlib.pyplot.text', 'plt.text', (['(6.3)', '(3.5e-06)', '"""D2\'"""'], {'color': '"""red"""', 'fontsize': 'fs'}), '(6.3, 3.5e-06, "D2\'", color=\'red\', fontsize=fs)\n', (42803, 42850), True, 'import matplotlib.pyplot as plt\n'), ((42858, 42915), 'matplotlib.pyplot.text', 'plt.text', (['(5.25)', '(6e-05)', '"""E2\'"""'], {'color': '"""purple"""', 'fontsize': 'fs'}), '(5.25, 6e-05, "E2\'", color=\'purple\', fontsize=fs)\n', (42866, 42915), True, 'import matplotlib.pyplot as plt\n'), ((42959, 43033), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'overshoot_ts' + dff + '.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'overshoot_ts' + dff + '.pdf', bbox_inches='tight')\n", (42970, 43033), True, 'import matplotlib.pyplot as plt\n'), ((43099, 43113), 'matplotlib.pyplot.figure', 'plt.figure', (['jf'], {}), '(jf)\n', (43109, 43113), True, 'import matplotlib.pyplot as plt\n'), ((43138, 43160), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (43158, 43160), False, 'import plot_sets\n'), ((43290, 43335), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(1)', 'y0', 'y1'], {'color': '"""black"""', 'ls': '"""-."""'}), "(1, y0, y1, color='black', ls='-.')\n", (43300, 43335), True, 'import matplotlib.pyplot as plt\n'), ((43344, 43389), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(2)', 'y0', 'y1'], {'color': '"""black"""', 'ls': '"""-."""'}), "(2, y0, y1, color='black', ls='-.')\n", (43354, 43389), True, 'import matplotlib.pyplot as plt\n'), ((43398, 43415), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (43408, 43415), True, 'import matplotlib.pyplot as plt\n'), ((43424, 43445), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\eta$"""'], {}), "('$\\\\eta$')\n", (43434, 43445), True, 'import matplotlib.pyplot as plt\n'), ((43454, 43489), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\cal E}_{\\\\rm GW}$"""'], {}), "('${\\\\cal E}_{\\\\rm GW}$')\n", (43464, 43489), True, 'import matplotlib.pyplot as plt\n'), ((43497, 43513), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y0', 'y1'], {}), '(y0, y1)\n', (43505, 43513), True, 'import matplotlib.pyplot as plt\n'), ((43522, 43537), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(10)'], {}), '(0, 10)\n', (43530, 43537), True, 'import matplotlib.pyplot as plt\n'), ((43724, 43738), 'matplotlib.pyplot.figure', 'plt.figure', (['jf'], {}), '(jf)\n', (43734, 43738), True, 'import matplotlib.pyplot as plt\n'), ((43747, 43769), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (43767, 43769), False, 'import plot_sets\n'), ((43924, 43969), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(1)', 'y0', 'y1'], {'color': '"""black"""', 'ls': '"""-."""'}), "(1, y0, y1, color='black', ls='-.')\n", (43934, 43969), True, 'import matplotlib.pyplot as plt\n'), ((43978, 44023), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(2)', 'y0', 'y1'], {'color': '"""black"""', 'ls': '"""-."""'}), "(2, y0, y1, color='black', ls='-.')\n", (43988, 44023), True, 'import matplotlib.pyplot as plt\n'), ((44032, 44053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\eta$"""'], {}), "('$\\\\eta$')\n", (44042, 44053), True, 'import matplotlib.pyplot as plt\n'), ((44211, 44226), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(10)'], {}), '(0, 10)\n', (44219, 44226), True, 'import matplotlib.pyplot as plt\n'), ((44278, 44294), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y0', 'y1'], {}), '(y0, y1)\n', (44286, 44294), True, 'import matplotlib.pyplot as plt\n'), ((44870, 44934), 'matplotlib.pyplot.plot', 'plt.plot', (['k_nl', 'EGW_nl[indt_nl, :]'], {'color': 'col', 'lw': '(0.7)', 'alpha': '(0.6)'}), '(k_nl, EGW_nl[indt_nl, :], color=col, lw=0.7, alpha=0.6)\n', (44878, 44934), True, 'import matplotlib.pyplot as plt\n'), ((44945, 44989), 'matplotlib.pyplot.plot', 'plt.plot', (['k_nl', 'EGW_nl_stat', '"""--"""'], {'color': 'col'}), "(k_nl, EGW_nl_stat, '--', color=col)\n", (44953, 44989), True, 'import matplotlib.pyplot as plt\n'), ((47451, 47472), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1e-38)', '(0.01)'], {}), '(1e-38, 0.01)\n', (47459, 47472), True, 'import matplotlib.pyplot as plt\n'), ((47526, 47568), 'matplotlib.pyplot.text', 'plt.text', (['(36.0)', '(1e-32)', '"""A2\'"""'], {'color': '"""blue"""'}), '(36.0, 1e-32, "A2\'", color=\'blue\')\n', (47534, 47568), True, 'import matplotlib.pyplot as plt\n'), ((47578, 47622), 'matplotlib.pyplot.text', 'plt.text', (['(150.0)', '(1e-19)', '"""B2\'"""'], {'color': '"""green"""'}), '(150.0, 1e-19, "B2\'", color=\'green\')\n', (47586, 47622), True, 'import matplotlib.pyplot as plt\n'), ((47631, 47675), 'matplotlib.pyplot.text', 'plt.text', (['(45.0)', '(1e-10)', '"""C2\'"""'], {'color': '"""orange"""'}), '(45.0, 1e-10, "C2\'", color=\'orange\')\n', (47639, 47675), True, 'import matplotlib.pyplot as plt\n'), ((47685, 47727), 'matplotlib.pyplot.text', 'plt.text', (['(180.0)', '(1e-25)', '"""D2\'"""'], {'color': '"""red"""'}), '(180.0, 1e-25, "D2\'", color=\'red\')\n', (47693, 47727), True, 'import matplotlib.pyplot as plt\n'), ((47736, 47780), 'matplotlib.pyplot.text', 'plt.text', (['(50.0)', '(1e-06)', '"""E2\'"""'], {'color': '"""purple"""'}), '(50.0, 1e-06, "E2\'", color=\'purple\')\n', (47744, 47780), True, 'import matplotlib.pyplot as plt\n'), ((47852, 47898), 'mpl_toolkits.axes_grid.inset_locator.inset_axes', 'inset_axes', (['ax'], {'width': '"""50%"""', 'height': '(4.0)', 'loc': '(3)'}), "(ax, width='50%', height=4.0, loc=3)\n", (47862, 47898), False, 'from mpl_toolkits.axes_grid.inset_locator import inset_axes\n'), ((48276, 48293), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (48286, 48293), True, 'import matplotlib.pyplot as plt\n'), ((48302, 48319), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (48312, 48319), True, 'import matplotlib.pyplot as plt\n'), ((48328, 48350), 'plot_sets.axes_lines', 'plot_sets.axes_lines', ([], {}), '()\n', (48348, 48350), False, 'import plot_sets\n'), ((48359, 48376), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.6)', '(50)'], {}), '(0.6, 50)\n', (48367, 48376), True, 'import matplotlib.pyplot as plt\n'), ((48384, 48406), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(2e-09)', '(0.002)'], {}), '(2e-09, 0.002)\n', (48392, 48406), True, 'import matplotlib.pyplot as plt\n'), ((48418, 48427), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (48425, 48427), True, 'import matplotlib.pyplot as plt\n'), ((48686, 48740), 'matplotlib.pyplot.text', 'plt.text', (['(0.8)', '(6e-09)', '"""$E_{\\\\rm GW} (k)$"""'], {'fontsize': '(24)'}), "(0.8, 6e-09, '$E_{\\\\rm GW} (k)$', fontsize=24)\n", (48694, 48740), True, 'import matplotlib.pyplot as plt\n'), ((48774, 48793), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0001)', '(2)'], {}), '(0.0001, 2)\n', (48782, 48793), True, 'import matplotlib.pyplot as plt\n'), ((48835, 48912), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'overshoot_EGW_k' + dff + '.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'overshoot_EGW_k' + dff + '.pdf', bbox_inches='tight')\n", (48846, 48912), True, 'import matplotlib.pyplot as plt\n'), ((49577, 49595), 'numpy.where', 'np.where', (['(t_nl > 2)'], {}), '(t_nl > 2)\n', (49585, 49595), True, 'import numpy as np\n'), ((49617, 49662), 'numpy.trapz', 'np.trapz', (['EGW_nl[good, :]', 't_nl[good]'], {'axis': '(0)'}), '(EGW_nl[good, :], t_nl[good], axis=0)\n', (49625, 49662), True, 'import numpy as np\n'), ((49740, 49786), 'numpy.trapz', 'np.trapz', (['XiGW_nl[good, :]', 't_nl[good]'], {'axis': '(0)'}), '(XiGW_nl[good, :], t_nl[good], axis=0)\n', (49748, 49786), True, 'import numpy as np\n'), ((49856, 49873), 'numpy.where', 'np.where', (['(t_l > 2)'], {}), '(t_l > 2)\n', (49864, 49873), True, 'import numpy as np\n'), ((49894, 49937), 'numpy.trapz', 'np.trapz', (['EGW_l[good, :]', 't_l[good]'], {'axis': '(0)'}), '(EGW_l[good, :], t_l[good], axis=0)\n', (49902, 49937), True, 'import numpy as np\n'), ((50012, 50056), 'numpy.trapz', 'np.trapz', (['XiGW_l[good, :]', 't_l[good]'], {'axis': '(0)'}), '(XiGW_l[good, :], t_l[good], axis=0)\n', (50020, 50056), True, 'import numpy as np\n'), ((51549, 51626), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'overshoot_PGW_k' + dff + '.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'overshoot_PGW_k' + dff + '.pdf', bbox_inches='tight')\n", (51560, 51626), True, 'import matplotlib.pyplot as plt\n'), ((53058, 53099), 'matplotlib.pyplot.plot', 'plt.plot', (['f_l', 'OmGW_l'], {'color': 'col', 'ls': '"""-."""'}), "(f_l, OmGW_l, color=col, ls='-.')\n", (53066, 53099), True, 'import matplotlib.pyplot as plt\n'), ((53232, 53250), 'numpy.sign', 'np.sign', (['diff_OmGW'], {}), '(diff_OmGW)\n', (53239, 53250), True, 'import numpy as np\n'), ((53918, 53946), 'numpy.array', 'np.array', (['f_l'], {'dtype': '"""float"""'}), "(f_l, dtype='float')\n", (53926, 53946), True, 'import numpy as np\n'), ((53948, 53977), 'numpy.array', 'np.array', (['f_nl'], {'dtype': '"""float"""'}), "(f_nl, dtype='float')\n", (53956, 53977), True, 'import numpy as np\n'), ((53991, 54022), 'numpy.array', 'np.array', (['OmGW_l'], {'dtype': '"""float"""'}), "(OmGW_l, dtype='float')\n", (53999, 54022), True, 'import numpy as np\n'), ((54024, 54056), 'numpy.array', 'np.array', (['OmGW_nl'], {'dtype': '"""float"""'}), "(OmGW_nl, dtype='float')\n", (54032, 54056), True, 'import numpy as np\n'), ((55298, 55317), 'numpy.log10', 'np.log10', (['f_Gaia[0]'], {}), '(f_Gaia[0])\n', (55306, 55317), True, 'import numpy as np\n'), ((55319, 55339), 'numpy.log10', 'np.log10', (['f_Gaia[-1]'], {}), '(f_Gaia[-1])\n', (55327, 55339), True, 'import numpy as np\n'), ((55620, 55641), 'numpy.log10', 'np.log10', (['f_DECIGO[0]'], {}), '(f_DECIGO[0])\n', (55628, 55641), True, 'import numpy as np\n'), ((55643, 55665), 'numpy.log10', 'np.log10', (['f_DECIGO[-1]'], {}), '(f_DECIGO[-1])\n', (55651, 55665), True, 'import numpy as np\n'), ((60331, 60353), 'numpy.logspace', 'np.logspace', (['(-9)', '(0)', '(10)'], {}), '(-9, 0, 10)\n', (60342, 60353), True, 'import numpy as np\n'), ((60394, 60418), 'numpy.logspace', 'np.logspace', (['(-18)', '(-4)', '(15)'], {}), '(-18, -4, 15)\n', (60405, 60418), True, 'import numpy as np\n'), ((60758, 60825), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'OmGW_f_detectors.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'OmGW_f_detectors.pdf', bbox_inches='tight')\n", (60769, 60825), True, 'import matplotlib.pyplot as plt\n'), ((64370, 64392), 'numpy.logspace', 'np.logspace', (['(-9)', '(0)', '(10)'], {}), '(-9, 0, 10)\n', (64381, 64392), True, 'import numpy as np\n'), ((64433, 64457), 'numpy.logspace', 'np.logspace', (['(-14)', '(-4)', '(11)'], {}), '(-14, -4, 11)\n', (64444, 64457), True, 'import numpy as np\n'), ((64794, 64861), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'XiGW_f_detectors.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'XiGW_f_detectors.pdf', bbox_inches='tight')\n", (64805, 64861), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5045), 'cosmoGW.ks_infla', 'cosmoGW.ks_infla', (['run.pars[3]', 'run.pars[2]'], {'eta': '(1)'}), '(run.pars[3], run.pars[2], eta=1)\n', (5012, 5045), False, 'import cosmoGW\n'), ((9605, 9646), 'cosmoGW.ks_infla', 'cosmoGW.ks_infla', (['pars[3]', 'pars[2]'], {'eta': '(1)'}), '(pars[3], pars[2], eta=1)\n', (9621, 9646), False, 'import cosmoGW\n'), ((15289, 15309), 'numpy.where', 'np.where', (['(EGW_l != 0)'], {}), '(EGW_l != 0)\n', (15297, 15309), True, 'import numpy as np\n'), ((16659, 16678), 'numpy.linspace', 'np.linspace', (['(1.2)', '(5)'], {}), '(1.2, 5)\n', (16670, 16678), True, 'import numpy as np\n'), ((16826, 16845), 'numpy.linspace', 'np.linspace', (['(15)', '(60)'], {}), '(15, 60)\n', (16837, 16845), True, 'import numpy as np\n'), ((17032, 17052), 'numpy.linspace', 'np.linspace', (['(1.15)', '(3)'], {}), '(1.15, 3)\n', (17043, 17052), True, 'import numpy as np\n'), ((17199, 17219), 'numpy.linspace', 'np.linspace', (['(10)', '(100)'], {}), '(10, 100)\n', (17210, 17219), True, 'import numpy as np\n'), ((17404, 17424), 'numpy.linspace', 'np.linspace', (['(1.4)', '(20)'], {}), '(1.4, 20)\n', (17415, 17424), True, 'import numpy as np\n'), ((17584, 17604), 'numpy.linspace', 'np.linspace', (['(30)', '(100)'], {}), '(30, 100)\n', (17595, 17604), True, 'import numpy as np\n'), ((17790, 17810), 'numpy.linspace', 'np.linspace', (['(1.25)', '(8)'], {}), '(1.25, 8)\n', (17801, 17810), True, 'import numpy as np\n'), ((17968, 17987), 'numpy.linspace', 'np.linspace', (['(11)', '(50)'], {}), '(11, 50)\n', (17979, 17987), True, 'import numpy as np\n'), ((18173, 18194), 'numpy.linspace', 'np.linspace', (['(0.2)', '(2.5)'], {}), '(0.2, 2.5)\n', (18184, 18194), True, 'import numpy as np\n'), ((18352, 18370), 'numpy.linspace', 'np.linspace', (['(8)', '(50)'], {}), '(8, 50)\n', (18363, 18370), True, 'import numpy as np\n'), ((25246, 25305), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', '((q * xx) ** exp)'], {'color': 'col', 'lw': '(0.8)', 'label': 'lbl'}), '(xx, (q * xx) ** exp, color=col, lw=0.8, label=lbl)\n', (25254, 25305), True, 'import matplotlib.pyplot as plt\n'), ((28352, 28420), 'matplotlib.pyplot.plot', 'plt.plot', (['xx3', '((q[l + 5] * xx3) ** 2)'], {'ls': '"""-."""', 'color': 'cols[l]', 'lw': '(0.8)'}), "(xx3, (q[l + 5] * xx3) ** 2, ls='-.', color=cols[l], lw=0.8)\n", (28360, 28420), True, 'import matplotlib.pyplot as plt\n'), ((28842, 28863), 'numpy.logspace', 'np.logspace', (['(-6)', '(2)', '(9)'], {}), '(-6, 2, 9)\n', (28853, 28863), True, 'import numpy as np\n'), ((29600, 29668), 'matplotlib.pyplot.plot', 'plt.plot', (['xx3', '((p[l + 5] * xx3) ** 3)'], {'ls': '"""-."""', 'color': 'cols[l]', 'lw': '(0.8)'}), "(xx3, (p[l + 5] * xx3) ** 3, ls='-.', color=cols[l], lw=0.8)\n", (29608, 29668), True, 'import matplotlib.pyplot as plt\n'), ((30121, 30144), 'numpy.logspace', 'np.logspace', (['(-10)', '(2)', '(13)'], {}), '(-10, 2, 13)\n', (30132, 30144), True, 'import numpy as np\n'), ((30940, 31001), 'matplotlib.pyplot.plot', 'plt.plot', (['xx3', '(r[l + 5] * xx3)'], {'ls': '"""-."""', 'color': 'cols[l]', 'lw': '(0.8)'}), "(xx3, r[l + 5] * xx3, ls='-.', color=cols[l], lw=0.8)\n", (30948, 31001), True, 'import matplotlib.pyplot as plt\n'), ((31508, 31529), 'numpy.logspace', 'np.logspace', (['(-6)', '(0)', '(7)'], {}), '(-6, 0, 7)\n', (31519, 31529), True, 'import numpy as np\n'), ((31854, 31911), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/ratDPGW_EEM.pdf"""'], {'bbox_inches': '"""tight"""'}), "('plots/ratDPGW_EEM.pdf', bbox_inches='tight')\n", (31865, 31911), True, 'import matplotlib.pyplot as plt\n'), ((34219, 34238), 'numpy.where', 'np.where', (['(t_lnl > 2)'], {}), '(t_lnl > 2)\n', (34227, 34238), True, 'import numpy as np\n'), ((34265, 34305), 'numpy.trapz', 'np.trapz', (['diff_EGW_nl[good]', 't_lnl[good]'], {}), '(diff_EGW_nl[good], t_lnl[good])\n', (34273, 34305), True, 'import numpy as np\n'), ((35169, 35208), 'run.interpolate_ts', 'r.interpolate_ts', (['t_nl2', 't_l', 'P_nl', 'P_l'], {}), '(t_nl2, t_l, P_nl, P_l)\n', (35185, 35208), True, 'import run as r\n'), ((35431, 35444), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (35441, 35444), True, 'import matplotlib.pyplot as plt\n'), ((35925, 36011), 'spectra.plot_neg_pos', 'spectra.plot_neg_pos', (['t_lnl', 'diff_PGW'], {'ls1': '"""solid"""', 'lw1': '(1)', 'ls2': '""":"""', 'lw2': '(2)', 'col': 'col'}), "(t_lnl, diff_PGW, ls1='solid', lw1=1, ls2=':', lw2=2,\n col=col)\n", (35945, 36011), False, 'import spectra\n'), ((36159, 36212), 'run.interpolate_ts', 'r.interpolate_ts', (['t_old2', 't_old_l', 'EGW_old', 'EGW_old_l'], {}), '(t_old2, t_old_l, EGW_old, EGW_old_l)\n', (36175, 36212), True, 'import run as r\n'), ((36268, 36281), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (36278, 36281), True, 'import matplotlib.pyplot as plt\n'), ((36294, 36339), 'matplotlib.pyplot.plot', 'plt.plot', (['t_old_lnl', 'diff_old', '"""."""'], {'color': 'col'}), "(t_old_lnl, diff_old, '.', color=col)\n", (36302, 36339), True, 'import matplotlib.pyplot as plt\n'), ((36939, 36979), 'matplotlib.pyplot.plot', 'plt.plot', (['t_old', 'EGW_old', '"""."""'], {'color': 'col'}), "(t_old, EGW_old, '.', color=col)\n", (36947, 36979), True, 'import matplotlib.pyplot as plt\n'), ((37374, 37388), 'matplotlib.pyplot.figure', 'plt.figure', (['jf'], {}), '(jf)\n', (37384, 37388), True, 'import matplotlib.pyplot as plt\n'), ((37401, 37432), 'matplotlib.pyplot.plot', 'plt.plot', (['t_l', 'EGW_l'], {'color': 'col'}), '(t_l, EGW_l, color=col)\n', (37409, 37432), True, 'import matplotlib.pyplot as plt\n'), ((37445, 37503), 'matplotlib.pyplot.plot', 'plt.plot', (['(t_nl - shift)', 'EGW_nl'], {'lw': '(0.7)', 'ls': '"""--"""', 'color': 'col'}), "(t_nl - shift, EGW_nl, lw=0.7, ls='--', color=col)\n", (37453, 37503), True, 'import matplotlib.pyplot as plt\n'), ((37658, 37672), 'matplotlib.pyplot.figure', 'plt.figure', (['jf'], {}), '(jf)\n', (37668, 37672), True, 'import matplotlib.pyplot as plt\n'), ((37685, 37716), 'matplotlib.pyplot.plot', 'plt.plot', (['t_nl', 'P_nl'], {'color': 'col'}), '(t_nl, P_nl, color=col)\n', (37693, 37716), True, 'import matplotlib.pyplot as plt\n'), ((37729, 37776), 'matplotlib.pyplot.plot', 'plt.plot', (['t_nl[indt_1]', 'P_nl[indt_1]'], {'color': 'col'}), '(t_nl[indt_1], P_nl[indt_1], color=col)\n', (37737, 37776), True, 'import matplotlib.pyplot as plt\n'), ((41224, 41233), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (41231, 41233), True, 'import matplotlib.pyplot as plt\n'), ((41462, 41508), 'mpl_toolkits.axes_grid.inset_locator.inset_axes', 'inset_axes', (['ax'], {'width': '"""40%"""', 'height': '(1.8)', 'loc': '(8)'}), "(ax, width='40%', height=1.8, loc=8)\n", (41472, 41508), False, 'from mpl_toolkits.axes_grid.inset_locator import inset_axes\n'), ((41652, 41666), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (41662, 41666), True, 'import matplotlib.pyplot as plt\n'), ((41679, 41693), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (41689, 41693), True, 'import matplotlib.pyplot as plt\n'), ((41986, 42036), 'matplotlib.pyplot.plot', 'plt.plot', (['t_l', 'EGW_l'], {'color': '"""darkgreen"""', 'alpha': '(0.6)'}), "(t_l, EGW_l, color='darkgreen', alpha=0.6)\n", (41994, 42036), True, 'import matplotlib.pyplot as plt\n'), ((42048, 42098), 'matplotlib.pyplot.plot', 'plt.plot', (['t_nl', 'EGW_nl'], {'color': '"""darkgreen"""', 'ls': '"""--"""'}), "(t_nl, EGW_nl, color='darkgreen', ls='--')\n", (42056, 42098), True, 'import matplotlib.pyplot as plt\n'), ((42532, 42546), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(2)', '(4)'], {}), '(2, 4)\n', (42540, 42546), True, 'import matplotlib.pyplot as plt\n'), ((42559, 42583), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0014)', '(0.0022)'], {}), '(0.0014, 0.0022)\n', (42567, 42583), True, 'import matplotlib.pyplot as plt\n'), ((43557, 43579), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(11)'], {}), '(0, 10, 11)\n', (43568, 43579), True, 'import numpy as np\n'), ((43611, 43678), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'overshoot_ts_lin.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'overshoot_ts_lin.pdf', bbox_inches='tight')\n", (43622, 43678), True, 'import matplotlib.pyplot as plt\n'), ((43799, 43816), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (43809, 43816), True, 'import matplotlib.pyplot as plt\n'), ((44071, 44134), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta {\\\\cal P}_{\\\\rm GW}/{\\\\cal P}_{\\\\rm GW}$"""'], {}), "('$\\\\Delta {\\\\cal P}_{\\\\rm GW}/{\\\\cal P}_{\\\\rm GW}$')\n", (44081, 44134), True, 'import matplotlib.pyplot as plt\n'), ((44157, 44204), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\cal P}_{\\\\rm GW}^{\\\\rm nlin}$"""'], {}), "('${\\\\cal P}_{\\\\rm GW}^{\\\\rm nlin}$')\n", (44167, 44204), True, 'import matplotlib.pyplot as plt\n'), ((44246, 44268), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(11)'], {}), '(0, 10, 11)\n', (44257, 44268), True, 'import numpy as np\n'), ((44324, 44402), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + 'overshoot_ts_PGW' + dff + '.pdf')"], {'bbox_inches': '"""tight"""'}), "('plots/' + 'overshoot_ts_PGW' + dff + '.pdf', bbox_inches='tight')\n", (44335, 44402), True, 'import matplotlib.pyplot as plt\n'), ((45633, 45672), 'matplotlib.pyplot.plot', 'plt.plot', (['k_l', 'EGW_l[indt_l]'], {'color': 'col'}), '(k_l, EGW_l[indt_l], color=col)\n', (45641, 45672), True, 'import matplotlib.pyplot as plt\n'), ((45685, 45727), 'matplotlib.pyplot.plot', 'plt.plot', (['k_l', 'EGW_l_stat', '"""-."""'], {'color': 'col'}), "(k_l, EGW_l_stat, '-.', color=col)\n", (45693, 45727), True, 'import matplotlib.pyplot as plt\n'), ((47492, 47516), 'numpy.logspace', 'np.logspace', (['(-38)', '(-2)', '(10)'], {}), '(-38, -2, 10)\n', (47503, 47516), True, 'import numpy as np\n'), ((48610, 48630), 'numpy.logspace', 'np.logspace', (['(0)', '(1)', '(2)'], {}), '(0, 1, 2)\n', (48621, 48630), True, 'import numpy as np\n'), ((48654, 48676), 'numpy.logspace', 'np.logspace', (['(-8)', '(-3)', '(6)'], {}), '(-8, -3, 6)\n', (48665, 48676), True, 'import numpy as np\n'), ((55386, 55403), 'numpy.log10', 'np.log10', (['Om_Gaia'], {}), '(Om_Gaia)\n', (55394, 55403), True, 'import numpy as np\n'), ((55716, 55735), 'numpy.log10', 'np.log10', (['Om_DECIGO'], {}), '(Om_DECIGO)\n', (55724, 55735), True, 'import numpy as np\n'), ((8572, 8588), 'numpy.log10', 'np.log10', (['run.B0'], {}), '(run.B0)\n', (8580, 8588), True, 'import numpy as np\n'), ((8660, 8676), 'numpy.log10', 'np.log10', (['EGW[i]'], {}), '(EGW[i])\n', (8668, 8676), True, 'import numpy as np\n'), ((8751, 8768), 'numpy.log10', 'np.log10', (['DEGW[i]'], {}), '(DEGW[i])\n', (8759, 8768), True, 'import numpy as np\n'), ((8847, 8865), 'numpy.log10', 'np.log10', (['rDEGW[i]'], {}), '(rDEGW[i])\n', (8855, 8865), True, 'import numpy as np\n'), ((19479, 19502), 'numpy.logspace', 'np.logspace', (['(-34)', '(2)', '(10)'], {}), '(-34, 2, 10)\n', (19490, 19502), True, 'import numpy as np\n'), ((19580, 19602), 'numpy.logspace', 'np.logspace', (['(-30)', '(2)', '(9)'], {}), '(-30, 2, 9)\n', (19591, 19602), True, 'import numpy as np\n'), ((27072, 27085), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (27082, 27085), True, 'import matplotlib.pyplot as plt\n'), ((27102, 27158), 'matplotlib.pyplot.scatter', 'plt.scatter', (['EEM[i]', 'EGW[i]'], {'facecolors': 'f_col', 'color': 'col'}), '(EEM[i], EGW[i], facecolors=f_col, color=col)\n', (27113, 27158), True, 'import matplotlib.pyplot as plt\n'), ((27175, 27188), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (27185, 27188), True, 'import matplotlib.pyplot as plt\n'), ((27205, 27264), 'matplotlib.pyplot.scatter', 'plt.scatter', (['EEM[i]', 'delEGW[i]'], {'facecolors': 'f_col', 'color': 'col'}), '(EEM[i], delEGW[i], facecolors=f_col, color=col)\n', (27216, 27264), True, 'import matplotlib.pyplot as plt\n'), ((27281, 27294), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (27291, 27294), True, 'import matplotlib.pyplot as plt\n'), ((27438, 27451), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (27448, 27451), True, 'import matplotlib.pyplot as plt\n'), ((27468, 27508), 'matplotlib.pyplot.plot', 'plt.plot', (['EEM[i]', 'EGW[i]', '"""x"""'], {'color': 'col'}), "(EEM[i], EGW[i], 'x', color=col)\n", (27476, 27508), True, 'import matplotlib.pyplot as plt\n'), ((27525, 27538), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (27535, 27538), True, 'import matplotlib.pyplot as plt\n'), ((27555, 27598), 'matplotlib.pyplot.plot', 'plt.plot', (['EEM[i]', 'delEGW[i]', '"""x"""'], {'color': 'col'}), "(EEM[i], delEGW[i], 'x', color=col)\n", (27563, 27598), True, 'import matplotlib.pyplot as plt\n'), ((27615, 27628), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (27625, 27628), True, 'import matplotlib.pyplot as plt\n'), ((35268, 35287), 'numpy.where', 'np.where', (['(t_lnl > 2)'], {}), '(t_lnl > 2)\n', (35276, 35287), True, 'import numpy as np\n'), ((35318, 35355), 'numpy.trapz', 'np.trapz', (['diff_PGW[good]', 't_lnl[good]'], {}), '(diff_PGW[good], t_lnl[good])\n', (35326, 35355), True, 'import numpy as np\n'), ((35494, 35546), 'matplotlib.pyplot.hlines', 'plt.hlines', (['diffPGW_stat', '(-2)', '(20)'], {'color': 'col', 'ls': '"""-."""'}), "(diffPGW_stat, -2, 20, color=col, ls='-.')\n", (35504, 35546), True, 'import matplotlib.pyplot as plt\n'), ((35581, 35638), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(-diffPGW_stat)', '(-2)', '(20)'], {'color': 'col', 'ls': '"""dotted"""'}), "(-diffPGW_stat, -2, 20, color=col, ls='dotted')\n", (35591, 35638), True, 'import matplotlib.pyplot as plt\n'), ((36376, 36389), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (36386, 36389), True, 'import matplotlib.pyplot as plt\n'), ((36460, 36508), 'run.interpolate_ts', 'r.interpolate_ts', (['t_old', 't_old_l', 'P_old', 'P_old_l'], {}), '(t_old, t_old_l, P_old, P_old_l)\n', (36476, 36508), True, 'import run as r\n'), ((36582, 36631), 'matplotlib.pyplot.plot', 'plt.plot', (['t_old_lnl', 'diff_PGW_old', '"""."""'], {'color': 'col'}), "(t_old_lnl, diff_PGW_old, '.', color=col)\n", (36590, 36631), True, 'import matplotlib.pyplot as plt\n'), ((37539, 37583), 'matplotlib.pyplot.plot', 'plt.plot', (['t_old_l', 'EGW_old_l', '"""."""'], {'color': 'col'}), "(t_old_l, EGW_old_l, '.', color=col)\n", (37547, 37583), True, 'import matplotlib.pyplot as plt\n'), ((37813, 37851), 'matplotlib.pyplot.plot', 'plt.plot', (['t_old', 'P_old', '"""."""'], {'color': 'col'}), "(t_old, P_old, '.', color=col)\n", (37821, 37851), True, 'import matplotlib.pyplot as plt\n'), ((42216, 42237), 'numpy.trapz', 'np.trapz', (['EGW_stat', 'k'], {}), '(EGW_stat, k)\n', (42224, 42237), True, 'import numpy as np\n'), ((42331, 42439), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(2, 0.0014)', '(2)', '(0.0008)'], {'edgecolor': '"""black"""', 'alpha': '(0.7)', 'linewidth': '(1.5)', 'facecolor': '"""none"""'}), "((2, 0.0014), 2, 0.0008, edgecolor='black', alpha=0.7,\n linewidth=1.5, facecolor='none')\n", (42348, 42439), True, 'import matplotlib.patches as patches\n'), ((9183, 9200), 'numpy.log10', 'np.log10', (['pars[0]'], {}), '(pars[0])\n', (9191, 9200), True, 'import numpy as np\n'), ((19006, 19029), 'numpy.logspace', 'np.logspace', (['(-34)', '(2)', '(10)'], {}), '(-34, 2, 10)\n', (19017, 19029), True, 'import numpy as np\n'), ((19119, 19142), 'numpy.logspace', 'np.logspace', (['(-46)', '(2)', '(13)'], {}), '(-46, 2, 13)\n', (19130, 19142), True, 'import numpy as np\n'), ((19253, 19275), 'numpy.logspace', 'np.logspace', (['(-14)', '(2)', '(9)'], {}), '(-14, 2, 9)\n', (19264, 19275), True, 'import numpy as np\n'), ((19365, 19388), 'numpy.logspace', 'np.logspace', (['(-42)', '(2)', '(12)'], {}), '(-42, 2, 12)\n', (19376, 19388), True, 'import numpy as np\n'), ((24957, 24968), 'numpy.log10', 'np.log10', (['q'], {}), '(q)\n', (24965, 24968), True, 'import numpy as np\n'), ((24570, 24600), 'numpy.where', 'np.where', (["(df_pars['name'] == A)"], {}), "(df_pars['name'] == A)\n", (24578, 24600), True, 'import numpy as np\n'), ((24687, 24717), 'numpy.where', 'np.where', (["(df_pars['name'] == A)"], {}), "(df_pars['name'] == A)\n", (24695, 24717), True, 'import numpy as np\n'), ((25742, 25757), 'numpy.sqrt', 'np.sqrt', (['EGW[i]'], {}), '(EGW[i])\n', (25749, 25757), True, 'import numpy as np\n'), ((26008, 26023), 'numpy.sqrt', 'np.sqrt', (['EGW[i]'], {}), '(EGW[i])\n', (26015, 26023), True, 'import numpy as np\n'), ((26298, 26313), 'numpy.sqrt', 'np.sqrt', (['EGW[i]'], {}), '(EGW[i])\n', (26305, 26313), True, 'import numpy as np\n'), ((26590, 26605), 'numpy.sqrt', 'np.sqrt', (['EGW[i]'], {}), '(EGW[i])\n', (26597, 26605), True, 'import numpy as np\n'), ((26882, 26897), 'numpy.sqrt', 'np.sqrt', (['EGW[i]'], {}), '(EGW[i])\n', (26889, 26897), True, 'import numpy as np\n'), ((4453, 4469), 'numpy.where', 'np.where', (['(tk > 2)'], {}), '(tk > 2)\n', (4461, 4469), True, 'import numpy as np\n'), ((4475, 4491), 'numpy.where', 'np.where', (['(tk > 2)'], {}), '(tk > 2)\n', (4483, 4491), True, 'import numpy as np\n'), ((4546, 4562), 'numpy.where', 'np.where', (['(tk > 2)'], {}), '(tk > 2)\n', (4554, 4562), True, 'import numpy as np\n'), ((6290, 6306), 'numpy.where', 'np.where', (['(tk > 2)'], {}), '(tk > 2)\n', (6298, 6306), True, 'import numpy as np\n'), ((6312, 6328), 'numpy.where', 'np.where', (['(tk > 2)'], {}), '(tk > 2)\n', (6320, 6328), True, 'import numpy as np\n'), ((6422, 6438), 'numpy.where', 'np.where', (['(tk > 2)'], {}), '(tk > 2)\n', (6430, 6438), True, 'import numpy as np\n')] |
"""Utility methods managing inference based on a trained model
"""
import os
import sys
import warnings
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from scipy import stats, special
import emcee
import matplotlib.pyplot as plt
DEBUG = False
def get_normal_logpdf(mu, log_sigma, x,
bounds_lower=-np.inf,
bounds_upper=np.inf):
"""Evaluate the log kappa likelihood of the test set,
log p(k_j|Omega), exactly
Note
----
Only normal likelihood supported for now.
Returns
-------
np.ndarray or float
Log PDF, of shape broadcasted across mu, log_sigma, and x
"""
# logpdf = 0.5*(np.log(ivar + 1.e-7) - ivar*(x - mu)**2.0)
candidate = np.array([mu, log_sigma])
if np.any(candidate < bounds_lower):
return -np.inf
elif np.any(candidate > bounds_upper):
return -np.inf
logpdf = -log_sigma - 0.5*(x - mu)**2.0/np.exp(2.0*log_sigma)
logpdf = logpdf - 0.5*np.log(2*np.pi) # normalization
assert not np.isnan(logpdf).any()
assert not np.isinf(logpdf).any()
return logpdf
def run_mcmc(log_prob, log_prob_kwargs, p0, n_run, n_burn, chain_path,
run_name='mcmc',
n_walkers=100,
plot_chain=True,
clear=False,
n_cores=None):
"""Run MCMC sampling
Parameters
----------
p0 : np.array of shape `[n_walkers, n_dim]`
n_run : int
n_burn : int
chain_path : os.path or str
n_walkers : int
plot_chain : bool
"""
n_dim = p0.shape[1]
n_cores = cpu_count() - 2 if n_cores is None else n_cores
# Set up the backend
backend = emcee.backends.HDFBackend(chain_path, name=run_name)
if clear:
backend.reset(n_walkers, n_dim) # clear it in case the file already exists
with Pool(n_cores) as pool:
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_prob,
kwargs=log_prob_kwargs,
pool=pool, backend=backend)
if n_burn > 0:
print("Running MCMC...")
state = sampler.run_mcmc(p0, n_burn)
sampler.reset()
sampler.run_mcmc(state, n_run, progress=True)
else:
sampler.run_mcmc(None, n_run, progress=True)
if plot_chain:
samples = sampler.get_chain(flat=True)
get_chain_plot(samples, os.path.join(os.path.dirname(chain_path),
f'mcmc_chain_{os.path.split(chain_path)[1]}.png'))
def get_chain_plot(samples, out_path='mcmc_chain.png'):
"""Plot MCMC chain
Note
----
Borrowed from https://emcee.readthedocs.io/en/stable/tutorials/line/
"""
fig, axes = plt.subplots(2, figsize=(10, 7), sharex=True)
n_chain, n_dim = samples.shape
labels = ["mean", "log_sigma"]
for i in range(2):
ax = axes[i]
ax.plot(samples[:, i], "k", alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number")
if out_path is None:
plt.show()
else:
fig.savefig(out_path)
def get_log_p_k_given_omega_int_kde(k_train, k_bnn, kwargs=None):
"""Evaluate the log likelihood, log p(k|Omega_int),
using kernel density estimation (KDE) on training kappa,
on the BNN kappa samples of test sightlines
Parameters
----------
k_train : np.array of shape `[n_train]`
kappa in the training set
k_bnn : np.array of shape `[n_test, n_samples]`
kwargs : dict
currently unused, placeholder for symmetry with analytic version
Returns
-------
np.array of shape `[n_test, n_samples]`
log p(k|Omega_int)
"""
kde = stats.gaussian_kde(k_train, bw_method='scott')
log_p_k_given_omega_int = kde.pdf(k_bnn.reshape(-1)).reshape(k_bnn.shape)
log_p_k_given_omega_int = np.log(log_p_k_given_omega_int)
assert not np.isnan(log_p_k_given_omega_int).any()
assert not np.isinf(log_p_k_given_omega_int).any()
return log_p_k_given_omega_int
def get_log_p_k_given_omega_int_analytic(k_train, k_bnn, interim_pdf_func):
"""Evaluate the log likelihood, log p(k|Omega_int),
using kernel density estimation (KDE) on training kappa,
on the BNN kappa samples of test sightlines
Parameters
----------
k_train : np.array of shape `[n_train]`
kappa in the training set. Unused.
k_bnn : np.array of shape `[n_test, n_samples]`
interim_pdf_func : callable
function that evaluates the PDF of the interim prior
Returns
-------
np.array of shape `[n_test, n_samples]`
log p(k|Omega_int)
"""
log_p_k_given_omega_int = interim_pdf_func(k_bnn)
log_p_k_given_omega_int = np.log(log_p_k_given_omega_int)
assert not np.isnan(log_p_k_given_omega_int).any()
assert not np.isinf(log_p_k_given_omega_int).any()
return log_p_k_given_omega_int
def get_omega_post(k_bnn, log_p_k_given_omega_int, mcmc_kwargs,
bounds_lower, bounds_upper):
"""Sample from p(Omega|{d}) using MCMC
Parameters
----------
k_bnn : np.array of shape `[n_test, n_samples]`
BNN samples for `n_test` sightlines
log_p_k_given_omega_int : np.array of shape `[n_test, n_samples]`
log p(k_bnn|Omega_int)
"""
np.random.seed(42)
k_bnn = k_bnn.squeeze(1) # pop the lone Y_dim dimension
log_p_k_given_omega_func = partial(get_normal_logpdf, x=k_bnn,
bounds_lower=bounds_lower,
bounds_upper=bounds_upper)
mcmc_kwargs['log_prob_kwargs'] = dict(
log_p_k_given_omega_func=log_p_k_given_omega_func,
log_p_k_given_omega_int=log_p_k_given_omega_int
)
if DEBUG:
print("int", log_p_k_given_omega_int.shape)
print("kbnn", k_bnn.shape)
np.save('num.npy', log_p_k_given_omega_func(0.01, np.log(0.04)))
np.save('denom.npy', log_p_k_given_omega_int)
p = np.zeros([200, 100])
xx, yy = np.meshgrid(np.linspace(0, 0.05, 200),
np.linspace(-7, -3, 100), indexing='ij')
for i in range(200):
for j in range(100):
p[i, j] = log_prob_mcmc([xx[i, j], yy[i, j]],
**mcmc_kwargs['log_prob_kwargs'])
np.save('p_look.npy', p)
run_mcmc(log_prob_mcmc, **mcmc_kwargs)
def get_omega_post_loop(k_samples_list, log_p_k_given_omega_int_list,
mcmc_kwargs,
bounds_lower, bounds_upper):
"""Sample from p(Omega|{d}) using MCMC
Parameters
----------
k_samples_list : list
Each element is the array of samples for a sightline, so the
list has length `n_test`
log_p_k_given_omega_int_list : list
Each element is the array of log p(k_samples|Omega_int) for a
sightline, so the list has length `n_test`
"""
np.random.seed(42)
n_test = len(k_samples_list)
log_p_k_given_omega_func_list = []
for los_i in range(n_test):
log_p_k_given_omega_func_list.append(partial(get_normal_logpdf,
x=k_samples_list[los_i],
bounds_lower=bounds_lower,
bounds_upper=bounds_upper))
kwargs = dict(
log_p_k_given_omega_func_list=log_p_k_given_omega_func_list,
log_p_k_given_omega_int_list=log_p_k_given_omega_int_list
)
mcmc_kwargs['log_prob_kwargs'] = kwargs
if DEBUG:
print("n_test")
print(len(k_samples_list), len(log_p_k_given_omega_int_list))
print(len(log_p_k_given_omega_func_list))
print("n_samples of first sightline")
print(len(k_samples_list[0]), len(log_p_k_given_omega_int_list[0]))
if DEBUG:
np.save('func_list_num', log_p_k_given_omega_func_list)
np.save('arr_list_denom', log_p_k_given_omega_int_list)
run_mcmc(log_prob_mcmc_loop, **mcmc_kwargs)
def log_prob_mcmc(omega, log_p_k_given_omega_func, log_p_k_given_omega_int):
"""Evaluate the MCMC objective
Parameters
----------
omega : list
Current MCMC sample of [mu, log_sigma] = Omega
log_p_k_given_omega_func : callable
function that returns p(k|Omega) of shape [n_test, n_samples]
for given omega and k fixed to be the BNN samples
log_p_k_given_omega_int : np.ndarray
Values of p(k|Omega_int) of shape [n_test, n_samples] for k
fixed to be the BNN samples
Returns
-------
float
Description
"""
log_p_k_given_omega = log_p_k_given_omega_func(omega[0], omega[1])
log_ratio = log_p_k_given_omega - log_p_k_given_omega_int # [n_test, n_samples]
mean_over_samples = special.logsumexp(log_ratio,
# normalization
b=1.0/log_ratio.shape[-1],
axis=1) # [n_test,]
assert not np.isnan(log_p_k_given_omega_int).any()
assert not np.isinf(log_p_k_given_omega_int).any()
log_prob = mean_over_samples.sum() # summed over sightlines
# log_prob = log_prob - n_test*np.log(n_samples) # normalization
return log_prob
def log_prob_mcmc_loop(omega, log_p_k_given_omega_func_list,
log_p_k_given_omega_int_list):
"""Evaluate the MCMC objective
Parameters
----------
omega : list
Current MCMC sample of [mu, log_sigma] = Omega
log_p_k_given_omega_func_list : list of callable
List of functions that returns p(k|Omega) of shape [n_samples]
for given omega and k fixed to be the posterior samples
log_p_k_given_omega_int_list : np.ndarray
List of values of p(k|Omega_int) of shape [n_samples] for k
fixed to be the posterior samples
Returns
-------
float
Description
"""
n_test = len(log_p_k_given_omega_int_list)
mean_over_samples_dataset = np.empty(n_test)
for los_i in range(n_test):
# log_p_k_given_omega ~ [n_samples]
log_p_k_given_omega = log_p_k_given_omega_func_list[los_i](omega[0], omega[1])
log_ratio = log_p_k_given_omega - log_p_k_given_omega_int_list[los_i] # [n_samples]
# print(len(log_ratio), len(log_p_k_given_omega), len(log_p_k_given_omega_int_list[los_i]))
mean_over_samples = special.logsumexp(log_ratio,
# normalization
b=1.0/len(log_ratio)) # scalar
if DEBUG:
if np.sum(~np.isfinite(log_ratio)) > 0:
print("has nan in log_ratio")
print("number of nans: ", np.sum(~np.isfinite(log_ratio)))
print("los:", los_i)
print("omega:", omega)
print("num:", log_p_k_given_omega)
print("denom:", log_p_k_given_omega_int_list[los_i])
if np.isnan(mean_over_samples):
print("NaN mean over samples")
print("los:", los_i)
print("omega:", omega)
print("num:", log_p_k_given_omega)
print("denom:", log_p_k_given_omega_int_list[los_i])
mean_over_samples_dataset[los_i] = mean_over_samples
is_finite = np.isfinite(mean_over_samples_dataset)
n_eff_samples = np.sum(is_finite)
good = mean_over_samples_dataset[is_finite]
if n_eff_samples < n_test:
return -np.inf
#if n_eff_samples == 0:
# # Happens when proposed omega is out of bounds,
# # i.e. numerator `log_p_k_given_omega` is -np.inf, a scalar
# return -np.inf
#else:
# if DEBUG:
# warnings.warn(f"Effective samples {n_eff_samples} less than total")
# assert not np.isnan(mean_over_samples_dataset).any()
# assert not np.isinf(mean_over_samples_dataset).any()
log_prob = good.sum() # summed over sightlines
# log_prob = log_prob - n_test*np.log(n_samples) # normalization
return log_prob
def get_mcmc_samples(chain_path, chain_kwargs):
"""Load the samples from saved MCMC run
Parameters
----------
chain_path : str
Path to the stored chain
chain_kwargs : dict
Options for chain postprocessing, including flat, thin, discard
Returns
-------
np.array of shape `[n_omega, 2]`
omega samples from MCMC chain
"""
backend = emcee.backends.HDFBackend(chain_path)
chain = backend.get_chain(**chain_kwargs)
return chain
def get_kappa_log_weights(k_bnn, log_p_k_given_omega_int,
omega_post_samples=None):
"""Evaluate the log weights used to reweight individual kappa posteriors
Parameters
----------
k_bnn : np.ndarray of shape `[n_samples]`
BNN posterior samples
log_p_k_given_omega_int : np.ndarray of shape `[n_samples]`
Likelihood of BNN kappa given the interim prior.
omega_post_samples : np.ndarray, optional
Omega posterior samples used as the prior to apply.
Should be np.array of shape `[n_omega, 2]`.
If None, only division by the interim prior will be done.
Returns
-------
np.ndarray
log weights evaluated at k_bnn
"""
k_bnn = k_bnn.reshape(-1, 1) # [n_samplses, 1]
if omega_post_samples is not None:
mu = omega_post_samples[:, 0].reshape([1, -1]) # [1, n_omega]
log_sigma = omega_post_samples[:, 1].reshape([1, -1]) # [1, n_omega]
num = get_normal_logpdf(x=k_bnn,
mu=mu,
log_sigma=log_sigma) # [n_samples, n_omega]
else:
num = 0.0
denom = log_p_k_given_omega_int[:, np.newaxis] # [n_samples, 1]
log_weights = special.logsumexp(num - denom, axis=-1) # [n_samples]
return log_weights
def get_kappa_log_weights_vectorized(k_bnn, omega_post_samples, log_p_k_given_omega_int):
"""Evaluate the log weights used to reweight individual kappa posteriors
Parameters
----------
k_bnn : np.array of shape `[n_test, n_samples]`
omega_post_samples : np.array of shape `[n_omega, 2]`
log_p_k_given_omega_int : np.array of shape `[n_test, n_samples]`
"""
k_bnn = k_bnn[:, :, np.newaxis] # [n_test, n_samples, 1]
mu = omega_post_samples[:, 0].reshape([1, 1, -1]) # [1, 1, n_omega]
log_sigma = omega_post_samples[:, 0].reshape([1, 1, -1]) # [1, 1, n_omega]
num = get_normal_logpdf(x=k_bnn,
mu=mu,
log_sigma=log_sigma) # [n_test, n_samples, n_omega]
denom = log_p_k_given_omega_int[:, :, np.newaxis]
weights = special.logsumexp(num - denom, axis=-1) # [n_test, n_samples]
return weights
def resample_from_pdf(grid, log_pdf, n_samples):
if n_samples > len(grid):
fine_grid = np.linspace(grid.min(), grid.max(), n_samples*5)
pdf = np.interp(fine_grid, grid, np.exp(log_pdf))
else:
fine_grid = grid
pdf = np.exp(log_pdf)
# Normalize to unity
pdf = pdf/np.sum(pdf)
resampled = np.random.choice(fine_grid,
size=n_samples,
replace=True,
p=pdf)
return resampled
def fit_kde_on_weighted_samples(samples, weights=None):
"""Fit a KDE on weighted samples
"""
samples = samples.squeeze()
if weights is not None:
weights = weights.squeeze()
kde = stats.gaussian_kde(samples, bw_method='scott', weights=weights)
return kde
def resample_from_samples(samples, weights, n_resamples, plot_path=None):
"""Resample from a distribution defined by weighted samples
Parameters
----------
samples : np.ndarray
weights : np.ndarray
n_resamples : int
plot_path : str
Path for the plot illustrating the KDE fit
"""
kde = fit_kde_on_weighted_samples(samples, weights)
samples = samples.squeeze()
weights = weights.squeeze()
resamples = kde.resample(n_resamples).squeeze()
if plot_path is not None:
grid = np.linspace(-0.2, 0.2, 50)
plt.hist(samples, density=True,
histtype='step', color='tab:gray', label='orig samples')
plt.hist(samples, weights=weights, density=True,
alpha=0.5, color='tab:red', label='weighted samples')
plt.plot(grid, kde.pdf(grid), color='k', label='KDE fit')
plt.legend()
plt.savefig(plot_path)
plt.close()
return resamples
| [
"numpy.random.seed",
"numpy.sum",
"numpy.empty",
"numpy.isnan",
"numpy.exp",
"scipy.special.logsumexp",
"multiprocessing.cpu_count",
"emcee.backends.HDFBackend",
"matplotlib.pyplot.close",
"os.path.dirname",
"numpy.isfinite",
"numpy.linspace",
"numpy.random.choice",
"matplotlib.pyplot.subp... | [((778, 803), 'numpy.array', 'np.array', (['[mu, log_sigma]'], {}), '([mu, log_sigma])\n', (786, 803), True, 'import numpy as np\n'), ((811, 843), 'numpy.any', 'np.any', (['(candidate < bounds_lower)'], {}), '(candidate < bounds_lower)\n', (817, 843), True, 'import numpy as np\n'), ((1713, 1765), 'emcee.backends.HDFBackend', 'emcee.backends.HDFBackend', (['chain_path'], {'name': 'run_name'}), '(chain_path, name=run_name)\n', (1738, 1765), False, 'import emcee\n'), ((2796, 2841), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'figsize': '(10, 7)', 'sharex': '(True)'}), '(2, figsize=(10, 7), sharex=True)\n', (2808, 2841), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3887), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['k_train'], {'bw_method': '"""scott"""'}), "(k_train, bw_method='scott')\n", (3859, 3887), False, 'from scipy import stats, special\n'), ((3996, 4027), 'numpy.log', 'np.log', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (4002, 4027), True, 'import numpy as np\n'), ((4868, 4899), 'numpy.log', 'np.log', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (4874, 4899), True, 'import numpy as np\n'), ((5443, 5461), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5457, 5461), True, 'import numpy as np\n'), ((5554, 5648), 'functools.partial', 'partial', (['get_normal_logpdf'], {'x': 'k_bnn', 'bounds_lower': 'bounds_lower', 'bounds_upper': 'bounds_upper'}), '(get_normal_logpdf, x=k_bnn, bounds_lower=bounds_lower, bounds_upper\n =bounds_upper)\n', (5561, 5648), False, 'from functools import partial\n'), ((7192, 7210), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7206, 7210), True, 'import numpy as np\n'), ((9119, 9184), 'scipy.special.logsumexp', 'special.logsumexp', (['log_ratio'], {'b': '(1.0 / log_ratio.shape[-1])', 'axis': '(1)'}), '(log_ratio, b=1.0 / log_ratio.shape[-1], axis=1)\n', (9136, 9184), False, 'from scipy import stats, special\n'), ((10345, 10361), 'numpy.empty', 'np.empty', (['n_test'], {}), '(n_test)\n', (10353, 10361), True, 'import numpy as np\n'), ((11675, 11713), 'numpy.isfinite', 'np.isfinite', (['mean_over_samples_dataset'], {}), '(mean_over_samples_dataset)\n', (11686, 11713), True, 'import numpy as np\n'), ((11734, 11751), 'numpy.sum', 'np.sum', (['is_finite'], {}), '(is_finite)\n', (11740, 11751), True, 'import numpy as np\n'), ((12830, 12867), 'emcee.backends.HDFBackend', 'emcee.backends.HDFBackend', (['chain_path'], {}), '(chain_path)\n', (12855, 12867), False, 'import emcee\n'), ((14172, 14211), 'scipy.special.logsumexp', 'special.logsumexp', (['(num - denom)'], {'axis': '(-1)'}), '(num - denom, axis=-1)\n', (14189, 14211), False, 'from scipy import stats, special\n'), ((15075, 15114), 'scipy.special.logsumexp', 'special.logsumexp', (['(num - denom)'], {'axis': '(-1)'}), '(num - denom, axis=-1)\n', (15092, 15114), False, 'from scipy import stats, special\n'), ((15497, 15561), 'numpy.random.choice', 'np.random.choice', (['fine_grid'], {'size': 'n_samples', 'replace': '(True)', 'p': 'pdf'}), '(fine_grid, size=n_samples, replace=True, p=pdf)\n', (15513, 15561), True, 'import numpy as np\n'), ((15892, 15955), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['samples'], {'bw_method': '"""scott"""', 'weights': 'weights'}), "(samples, bw_method='scott', weights=weights)\n", (15910, 15955), False, 'from scipy import stats, special\n'), ((877, 909), 'numpy.any', 'np.any', (['(candidate > bounds_upper)'], {}), '(candidate > bounds_upper)\n', (883, 909), True, 'import numpy as np\n'), ((1873, 1886), 'multiprocessing.Pool', 'Pool', (['n_cores'], {}), '(n_cores)\n', (1877, 1886), False, 'from multiprocessing import Pool, cpu_count\n'), ((1914, 2019), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['n_walkers', 'n_dim', 'log_prob'], {'kwargs': 'log_prob_kwargs', 'pool': 'pool', 'backend': 'backend'}), '(n_walkers, n_dim, log_prob, kwargs=log_prob_kwargs,\n pool=pool, backend=backend)\n', (1935, 2019), False, 'import emcee\n'), ((3190, 3200), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3198, 3200), True, 'import matplotlib.pyplot as plt\n'), ((6174, 6219), 'numpy.save', 'np.save', (['"""denom.npy"""', 'log_p_k_given_omega_int'], {}), "('denom.npy', log_p_k_given_omega_int)\n", (6181, 6219), True, 'import numpy as np\n'), ((6232, 6252), 'numpy.zeros', 'np.zeros', (['[200, 100]'], {}), '([200, 100])\n', (6240, 6252), True, 'import numpy as np\n'), ((6585, 6609), 'numpy.save', 'np.save', (['"""p_look.npy"""', 'p'], {}), "('p_look.npy', p)\n", (6592, 6609), True, 'import numpy as np\n'), ((8168, 8223), 'numpy.save', 'np.save', (['"""func_list_num"""', 'log_p_k_given_omega_func_list'], {}), "('func_list_num', log_p_k_given_omega_func_list)\n", (8175, 8223), True, 'import numpy as np\n'), ((8232, 8287), 'numpy.save', 'np.save', (['"""arr_list_denom"""', 'log_p_k_given_omega_int_list'], {}), "('arr_list_denom', log_p_k_given_omega_int_list)\n", (8239, 8287), True, 'import numpy as np\n'), ((15414, 15429), 'numpy.exp', 'np.exp', (['log_pdf'], {}), '(log_pdf)\n', (15420, 15429), True, 'import numpy as np\n'), ((15469, 15480), 'numpy.sum', 'np.sum', (['pdf'], {}), '(pdf)\n', (15475, 15480), True, 'import numpy as np\n'), ((16510, 16536), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.2)', '(50)'], {}), '(-0.2, 0.2, 50)\n', (16521, 16536), True, 'import numpy as np\n'), ((16545, 16638), 'matplotlib.pyplot.hist', 'plt.hist', (['samples'], {'density': '(True)', 'histtype': '"""step"""', 'color': '"""tab:gray"""', 'label': '"""orig samples"""'}), "(samples, density=True, histtype='step', color='tab:gray', label=\n 'orig samples')\n", (16553, 16638), True, 'import matplotlib.pyplot as plt\n'), ((16659, 16765), 'matplotlib.pyplot.hist', 'plt.hist', (['samples'], {'weights': 'weights', 'density': '(True)', 'alpha': '(0.5)', 'color': '"""tab:red"""', 'label': '"""weighted samples"""'}), "(samples, weights=weights, density=True, alpha=0.5, color='tab:red',\n label='weighted samples')\n", (16667, 16765), True, 'import matplotlib.pyplot as plt\n'), ((16853, 16865), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16863, 16865), True, 'import matplotlib.pyplot as plt\n'), ((16874, 16896), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_path'], {}), '(plot_path)\n', (16885, 16896), True, 'import matplotlib.pyplot as plt\n'), ((16905, 16916), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16914, 16916), True, 'import matplotlib.pyplot as plt\n'), ((978, 1001), 'numpy.exp', 'np.exp', (['(2.0 * log_sigma)'], {}), '(2.0 * log_sigma)\n', (984, 1001), True, 'import numpy as np\n'), ((1026, 1043), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1032, 1043), True, 'import numpy as np\n'), ((1626, 1637), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1635, 1637), False, 'from multiprocessing import Pool, cpu_count\n'), ((6282, 6307), 'numpy.linspace', 'np.linspace', (['(0)', '(0.05)', '(200)'], {}), '(0, 0.05, 200)\n', (6293, 6307), True, 'import numpy as np\n'), ((6338, 6362), 'numpy.linspace', 'np.linspace', (['(-7)', '(-3)', '(100)'], {}), '(-7, -3, 100)\n', (6349, 6362), True, 'import numpy as np\n'), ((7361, 7471), 'functools.partial', 'partial', (['get_normal_logpdf'], {'x': 'k_samples_list[los_i]', 'bounds_lower': 'bounds_lower', 'bounds_upper': 'bounds_upper'}), '(get_normal_logpdf, x=k_samples_list[los_i], bounds_lower=\n bounds_lower, bounds_upper=bounds_upper)\n', (7368, 7471), False, 'from functools import partial\n'), ((11326, 11353), 'numpy.isnan', 'np.isnan', (['mean_over_samples'], {}), '(mean_over_samples)\n', (11334, 11353), True, 'import numpy as np\n'), ((15348, 15363), 'numpy.exp', 'np.exp', (['log_pdf'], {}), '(log_pdf)\n', (15354, 15363), True, 'import numpy as np\n'), ((1074, 1090), 'numpy.isnan', 'np.isnan', (['logpdf'], {}), '(logpdf)\n', (1082, 1090), True, 'import numpy as np\n'), ((1112, 1128), 'numpy.isinf', 'np.isinf', (['logpdf'], {}), '(logpdf)\n', (1120, 1128), True, 'import numpy as np\n'), ((2473, 2500), 'os.path.dirname', 'os.path.dirname', (['chain_path'], {}), '(chain_path)\n', (2488, 2500), False, 'import os\n'), ((4043, 4076), 'numpy.isnan', 'np.isnan', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (4051, 4076), True, 'import numpy as np\n'), ((4098, 4131), 'numpy.isinf', 'np.isinf', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (4106, 4131), True, 'import numpy as np\n'), ((4915, 4948), 'numpy.isnan', 'np.isnan', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (4923, 4948), True, 'import numpy as np\n'), ((4970, 5003), 'numpy.isinf', 'np.isinf', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (4978, 5003), True, 'import numpy as np\n'), ((6151, 6163), 'numpy.log', 'np.log', (['(0.04)'], {}), '(0.04)\n', (6157, 6163), True, 'import numpy as np\n'), ((9353, 9386), 'numpy.isnan', 'np.isnan', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (9361, 9386), True, 'import numpy as np\n'), ((9408, 9441), 'numpy.isinf', 'np.isinf', (['log_p_k_given_omega_int'], {}), '(log_p_k_given_omega_int)\n', (9416, 9441), True, 'import numpy as np\n'), ((10965, 10987), 'numpy.isfinite', 'np.isfinite', (['log_ratio'], {}), '(log_ratio)\n', (10976, 10987), True, 'import numpy as np\n'), ((2561, 2586), 'os.path.split', 'os.path.split', (['chain_path'], {}), '(chain_path)\n', (2574, 2586), False, 'import os\n'), ((11090, 11112), 'numpy.isfinite', 'np.isfinite', (['log_ratio'], {}), '(log_ratio)\n', (11101, 11112), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
import pdb
import argparse
import os
import shutil
# finish the ntu
def getArgs():
parse = argparse.ArgumentParser()
parse.add_argument('--mode', type=str, help='ori or test', default='ori')
parse.add_argument('--data_path', type=str, help='the input data', default='/home/wuqiang/Workspace/2_generative_model/3_DA_Gesture/2_ST_GCN/st-gcn-master/data/NTU-RGB-D/xsub/val_data.npy')
parse.add_argument('--data_layout', type=str, help='openpose, ntu-rgb+d, ntu_edge', default='openpose')
parse.add_argument('--out_path', type=str, help='the path where the result will be saved', default='./videos/')
args = parse.parse_args()
return vars(args)
'''
basic setting
including 1) the connection of open pose --> /net/utils/graph.py
2) color for visualization
3) image size
'''
''' data loader '''
def data_loader(data_path, data_id):
data = np.load(data_path, mmap_mode='r')
# N=0 [data index], M=0 [person id], C=0,1 [x and y, not confidence]
# N, C, T, V, M = data.shape
demo_item = data[data_id,:,:,:,:]
return demo_item
'''visualization for each frame'''
def data_visu(data_item, frame_id, out_path):
C, T, V, M = data_item.shape
#print(data_item.shape)
connecting_joint = np.array(
[2, 1, 21, 3, 21, 5, 6, 7, 21, 9, 10, 11, 1, 13, 14, 15, 1, 17, 18, 19, 2, 8, 8, 12, 12]) - 1
location = data_item[:, frame_id,:,:]
plt.figure()
plt.cla()
plt.xlim(-1500, 2000)
plt.ylim(-2000, 2000)
for m in range(M):
x = data_item[0,frame_id,:,m] * 1080
y = (data_item[1,frame_id,:,m] * 1080)
for v in range(V):
k = connecting_joint[v]
plt.plot([x[v],x[k]], [y[v],y[k]], '-o', c=(0.1,0.1,0.1), linewidth=0.5, markersize=0)
plt.scatter(x, y, marker='o', s=16)
#plt.show()
plt.savefig(out_path + str(t) + '.png')
plt.close()
if __name__ == '__main__':
args = getArgs()
data_path = args['data_path']
out_path = args['out_path']
mode = args['mode']
data_id = 1
data_item = data_loader(data_path, data_id)
C, T, V, M = data_item.shape
# process the data_item in each frame
# rm and mkdir
if os.path.exists(out_path):
shutil.rmtree(out_path)
os.makedirs(out_path)
if mode == 'ori':
if os.path.exists(out_path):
shutil.rmtree(out_path)
os.makedirs(out_path)
for t in range(100):
data_visu(data_item, t, out_path)
if mode == 'test':
if os.path.exists(out_path+'ori/'):
shutil.rmtree(out_path+'ori/')
os.makedirs(out_path+'ori/')
if os.path.exists(out_path+'gen/'):
shutil.rmtree(out_path+'gen/')
os.makedirs(out_path+'gen/')
data_ori = data_item[:3,:,:,:]
data_gen = data_item[4:,:,:,:]
for t in range(64):
data_visu(data_ori, t, out_path+'ori/')
data_visu(data_gen, t, out_path+'gen/')
| [
"matplotlib.pyplot.xlim",
"numpy.load",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"os.path.exists",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.cla",
"numpy.array",
"shutil.rmtree"
... | [((158, 183), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (181, 183), False, 'import argparse\n'), ((951, 984), 'numpy.load', 'np.load', (['data_path'], {'mmap_mode': '"""r"""'}), "(data_path, mmap_mode='r')\n", (958, 984), True, 'import numpy as np\n'), ((1477, 1489), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1487, 1489), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1503), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1501, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1508, 1529), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1500)', '(2000)'], {}), '(-1500, 2000)\n', (1516, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1555), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2000)', '(2000)'], {}), '(-2000, 2000)\n', (1542, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1953), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1951, 1953), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2285), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (2275, 2285), False, 'import os\n'), ((2323, 2344), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (2334, 2344), False, 'import os\n'), ((1318, 1420), 'numpy.array', 'np.array', (['[2, 1, 21, 3, 21, 5, 6, 7, 21, 9, 10, 11, 1, 13, 14, 15, 1, 17, 18, 19, 2, \n 8, 8, 12, 12]'], {}), '([2, 1, 21, 3, 21, 5, 6, 7, 21, 9, 10, 11, 1, 13, 14, 15, 1, 17, 18,\n 19, 2, 8, 8, 12, 12])\n', (1326, 1420), True, 'import numpy as np\n'), ((1842, 1877), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'marker': '"""o"""', 's': '(16)'}), "(x, y, marker='o', s=16)\n", (1853, 1877), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2318), 'shutil.rmtree', 'shutil.rmtree', (['out_path'], {}), '(out_path)\n', (2308, 2318), False, 'import shutil\n'), ((2379, 2403), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (2393, 2403), False, 'import os\n'), ((2449, 2470), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (2460, 2470), False, 'import os\n'), ((2582, 2615), 'os.path.exists', 'os.path.exists', (["(out_path + 'ori/')"], {}), "(out_path + 'ori/')\n", (2596, 2615), False, 'import os\n'), ((2666, 2696), 'os.makedirs', 'os.makedirs', (["(out_path + 'ori/')"], {}), "(out_path + 'ori/')\n", (2677, 2696), False, 'import os\n'), ((2707, 2740), 'os.path.exists', 'os.path.exists', (["(out_path + 'gen/')"], {}), "(out_path + 'gen/')\n", (2721, 2740), False, 'import os\n'), ((2791, 2821), 'os.makedirs', 'os.makedirs', (["(out_path + 'gen/')"], {}), "(out_path + 'gen/')\n", (2802, 2821), False, 'import os\n'), ((1747, 1841), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[v], x[k]]', '[y[v], y[k]]', '"""-o"""'], {'c': '(0.1, 0.1, 0.1)', 'linewidth': '(0.5)', 'markersize': '(0)'}), "([x[v], x[k]], [y[v], y[k]], '-o', c=(0.1, 0.1, 0.1), linewidth=0.5,\n markersize=0)\n", (1755, 1841), True, 'import matplotlib.pyplot as plt\n'), ((2417, 2440), 'shutil.rmtree', 'shutil.rmtree', (['out_path'], {}), '(out_path)\n', (2430, 2440), False, 'import shutil\n'), ((2627, 2659), 'shutil.rmtree', 'shutil.rmtree', (["(out_path + 'ori/')"], {}), "(out_path + 'ori/')\n", (2640, 2659), False, 'import shutil\n'), ((2752, 2784), 'shutil.rmtree', 'shutil.rmtree', (["(out_path + 'gen/')"], {}), "(out_path + 'gen/')\n", (2765, 2784), False, 'import shutil\n')] |
import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt
# opent the xls file for reading
xlsfile = xlrd.open_workbook('fire_theft.xls', encoding_override='utf-8')
# there can be many sheets in xls document
sheet = xlsfile.sheet_by_index(0)
# ask the sheet for each row of data explicitly
data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
# compute the number of samples
num_samples = data.shape[0]
# create a placeholder to pass in the data X values
X = tf.placeholder(tf.float32, shape=num_samples, name='num-fire')
# create place holder to pass in the Y values
Y = tf.placeholder(tf.float32, shape=num_samples, name='num-theft')
# compute the mean of the y-values for init intercept guess
y_mean = np.mean(data[:,1])
# create tf variables for the model to be estimated
a = tf.Variable(0.0, name='slope' , dtype=tf.float32)
b = tf.Variable(0.0, name='intercept', dtype=tf.float32)
# operation to compute model values: a*X+b
y = tf.add(tf.multiply(a, X), b)
# operation to compute the model error w.r.t. to data
error = tf.subtract(Y, y, name='error')
# operation compute the RMSE loss function based on model error
loss = tf.reduce_mean(tf.abs(error),name='L1-loss')
# init gradient decent optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# operation to compute gradients of the loss function w.r.t. model parameters
compute_grad_op = optimizer.compute_gradients(loss=loss, var_list=[a, b])
# operation to apply gradients to model parameters
apply_grad_op = optimizer.apply_gradients(compute_grad_op, name='parameter-update')
# create summary operations to track the parameter values
a_sum_op = tf.summary.scalar('slope' , a )
b_sum_op = tf.summary.scalar('intercept', b )
l_sum_op = tf.summary.scalar('loss' , loss)
# create a merged summary operation for all summary ops
summary_op = tf.summary.merge([a_sum_op,b_sum_op,l_sum_op])
# keep the linter happy
i = 0; loss_value = 0
# init session and compute stuff ...
with tf.Session() as sess:
# init summary file writer for tensorboard
sfw = tf.summary.FileWriter(os.getcwd(), sess.graph)
# init model parameters variables
sess.run(a.initializer)
sess.run(b.initializer)
for i in range(3000):
# (re)compute gradients and update model parameters for this iteration
sess.run([apply_grad_op,loss], {X: data[:, 0], Y: data[:, 1]})
# execute summary op for this iteration and get the latest loss value
iter_summary,loss_value = sess.run([summary_op,loss], {X: data[:, 0], Y: data[:, 1]})
# write loss summary for this iteration to file write
sfw.add_summary(iter_summary,i)
# blab about it on stdout
if i%1000 == 0: print('loss at iteration {} is: {:0.2f}'.format(i,loss_value))
# get the final model values
slope,intercept = sess.run([a,b],{X: data[:, 0], Y: data[:, 1]})
# clean up
sfw.close()
# blab about final loss value
print('loss at iteration {} is: {:0.2f}'.format(i,loss_value))
# blab about the solution
print('the model values are: a={:0.3f}, b={:0.3f}'.format(slope,intercept))
# assign for readability
xdat = data[:,0]; ydat = data[:,1]
# plot the original data
plt.plot(xdat,ydat,'o',label='data', markersize=3)
# add the estimated linear model
plt.plot(xdat, slope*xdat + intercept, 'r', label='model')
# configure the plot
plt.legend(); plt.grid(True)
# we're done
plt.show()
'''
Since we're not using linear method no need for L2 loss function (!!)
Lets try out an L1 loss function and see how our model changes
''' | [
"tensorflow.abs",
"matplotlib.pyplot.show",
"tensorflow.subtract",
"tensorflow.summary.scalar",
"matplotlib.pyplot.plot",
"os.getcwd",
"xlrd.open_workbook",
"matplotlib.pyplot.legend",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.multiply",
"tensorflow.Variable",
"numpy.mean",... | [((117, 180), 'xlrd.open_workbook', 'xlrd.open_workbook', (['"""fire_theft.xls"""'], {'encoding_override': '"""utf-8"""'}), "('fire_theft.xls', encoding_override='utf-8')\n", (135, 180), False, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((498, 560), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'num_samples', 'name': '"""num-fire"""'}), "(tf.float32, shape=num_samples, name='num-fire')\n", (512, 560), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((612, 675), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'num_samples', 'name': '"""num-theft"""'}), "(tf.float32, shape=num_samples, name='num-theft')\n", (626, 675), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((746, 765), 'numpy.mean', 'np.mean', (['data[:, 1]'], {}), '(data[:, 1])\n', (753, 765), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((822, 870), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'name': '"""slope"""', 'dtype': 'tf.float32'}), "(0.0, name='slope', dtype=tf.float32)\n", (833, 870), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((879, 931), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'name': '"""intercept"""', 'dtype': 'tf.float32'}), "(0.0, name='intercept', dtype=tf.float32)\n", (890, 931), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((1072, 1103), 'tensorflow.subtract', 'tf.subtract', (['Y', 'y'], {'name': '"""error"""'}), "(Y, y, name='error')\n", (1083, 1103), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((1267, 1319), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (1300, 1319), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((1679, 1708), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""slope"""', 'a'], {}), "('slope', a)\n", (1696, 1708), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((1727, 1760), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""intercept"""', 'b'], {}), "('intercept', b)\n", (1744, 1760), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((1775, 1806), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (1792, 1806), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((1882, 1930), 'tensorflow.summary.merge', 'tf.summary.merge', (['[a_sum_op, b_sum_op, l_sum_op]'], {}), '([a_sum_op, b_sum_op, l_sum_op])\n', (1898, 1930), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((3236, 3289), 'matplotlib.pyplot.plot', 'plt.plot', (['xdat', 'ydat', '"""o"""'], {'label': '"""data"""', 'markersize': '(3)'}), "(xdat, ydat, 'o', label='data', markersize=3)\n", (3244, 3289), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((3321, 3381), 'matplotlib.pyplot.plot', 'plt.plot', (['xdat', '(slope * xdat + intercept)', '"""r"""'], {'label': '"""model"""'}), "(xdat, slope * xdat + intercept, 'r', label='model')\n", (3329, 3381), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((3402, 3414), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3412, 3414), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((3416, 3430), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3424, 3430), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((3445, 3455), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3453, 3455), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((987, 1004), 'tensorflow.multiply', 'tf.multiply', (['a', 'X'], {}), '(a, X)\n', (998, 1004), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((1191, 1204), 'tensorflow.abs', 'tf.abs', (['error'], {}), '(error)\n', (1197, 1204), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((2019, 2031), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2029, 2031), True, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n'), ((2121, 2132), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2130, 2132), False, 'import os, xlrd, numpy as np, tensorflow as tf, matplotlib.pyplot as plt\n')] |
from __future__ import absolute_import
import os
import glob
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
class TimeDataset(Dataset):
def __init__(self):
self.seq_dir = 'H:/datasets/OTB100/BlurBody'
self.img_files = sorted(glob.glob(self.seq_dir + '/img/*.jpg'))
self.anno = np.loadtxt(self.seq_dir + '/groundtruth_rect.txt', delimiter=',', dtype=np.float32)
# print(anno, anno.shape)
self.total_len = self.anno.shape[0]
self.batch_len = 5
self.input_size = 4
self.stride = 1
self.seq_len = int((self.total_len - self.batch_len) / self.stride)
self.x = []
self.y = []
for i in range(self.seq_len):
content_1 = []
content_2 = []
for j in range(self.batch_len):
content_1.append(self.anno[i + j])
content_2.append([self.anno[i + j+1][2]/self.anno[i + j][2],self.anno[i + j+1][3]/self.anno[i + j][3]])
# if (j == self.batch_len - 1):
# self.y.append([self.anno[i + j + 1][2], self.anno[i + j + 1][3]])
self.x.append(content_1)
self.y.append(content_2)
self.x = np.array(self.x)
self.y = np.array(self.y)
# print(self.y.shape)
self.x_data = torch.from_numpy(self.x)
self.y_data = torch.from_numpy(self.y)
self.len = self.seq_len
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.seq_len
if __name__ == '__main__':
timeDataset = TimeDataset()
train_loader = DataLoader(dataset=timeDataset,
batch_size=10,
shuffle=False)
print(len(train_loader))
for i, data in enumerate(train_loader):
inputs, labels = data
print(labels.shape)
| [
"torch.utils.data.DataLoader",
"numpy.array",
"numpy.loadtxt",
"glob.glob",
"torch.from_numpy"
] | [((1665, 1726), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'timeDataset', 'batch_size': '(10)', 'shuffle': '(False)'}), '(dataset=timeDataset, batch_size=10, shuffle=False)\n', (1675, 1726), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((342, 430), 'numpy.loadtxt', 'np.loadtxt', (["(self.seq_dir + '/groundtruth_rect.txt')"], {'delimiter': '""","""', 'dtype': 'np.float32'}), "(self.seq_dir + '/groundtruth_rect.txt', delimiter=',', dtype=np.\n float32)\n", (352, 430), True, 'import numpy as np\n'), ((1236, 1252), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (1244, 1252), True, 'import numpy as np\n'), ((1270, 1286), 'numpy.array', 'np.array', (['self.y'], {}), '(self.y)\n', (1278, 1286), True, 'import numpy as np\n'), ((1339, 1363), 'torch.from_numpy', 'torch.from_numpy', (['self.x'], {}), '(self.x)\n', (1355, 1363), False, 'import torch\n'), ((1386, 1410), 'torch.from_numpy', 'torch.from_numpy', (['self.y'], {}), '(self.y)\n', (1402, 1410), False, 'import torch\n'), ((282, 320), 'glob.glob', 'glob.glob', (["(self.seq_dir + '/img/*.jpg')"], {}), "(self.seq_dir + '/img/*.jpg')\n", (291, 320), False, 'import glob\n')] |
#!/user/bin/python
import numpy as np
from matplotlib.pylab import *
import time
# Define Parameters
N = 50 # lattice length
T = 1. # Temperature
J = 2. # Interaction Energy
h = 0.0 # External field, dramatically increases calculation time if not 0
steps = -1 # number of total steps, inf if negative
update_ix = 1000 # iterations before updating figure
# Initialize spin configuration
L = N**2 # total number of sites on lattice
B = 1./T # inverse temperature
# initialize spin configuration as random configuration w/ value -1 or +1
spin_config = np.random.randint(low=0,high=2,size = (N,N))*2 -1
ion() # set interacting on for matplotlib, neccessary for updating figure
figure('Ising Model') # Create figure
ix = 0 # Set index (iterations) to zero
while (ix < steps) or steps < 0:
# Select spin with selection probability = 1/L
row = np.random.randint(N)
column = np.random.randint(N)
# Select spins for Hamiltonian interaction
sij = spin_config[row,column]
s1 = spin_config[(row+1)%N,column]
s2 = spin_config[(row-1)%N,column]
s3 = spin_config[row,(column+1)%N]
s4 = spin_config[row,(column-1)%N]
# Define Hamiltonian of spin configuration
H1 = -1*J*sij*(s1+s2+s3+s4)
# Define Hamiltonian with spin flipped
H2 = J*sij*(s1+s2+s3+s4)
# Apply external magnetic field if present -> dramatically slows algorithm
if h != 0:
H1 += h*spin_config.sum()
H2 += h*spin_config.sum() - 2*h*sij
# Determine Acceptance
if (H2-H1) <= 0: # lower energy, Accept
spin_config[row,column] = -1*spin_config[row,column]
else: # if higher energy, decide based on probability
probability = np.exp(-1.*B*(H2-H1))
if np.random.random() < probability:
spin_config[row,column] = -1*spin_config[row,column]
# update plot
if (ix % update_ix) == 0:
print('steps:', ix) # print progress
clf() # clear figure
imshow(spin_config,cmap='Greys_r',interpolation = 'none') # image plot
tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
tick_params(axis='y',which='both',right='off',left='off',labelleft='off')
draw() # update figure
pause(0.0001) # Pause required to update figure
# increment index (iteration)
ix += 1
| [
"numpy.random.randint",
"numpy.exp",
"numpy.random.random"
] | [((849, 869), 'numpy.random.randint', 'np.random.randint', (['N'], {}), '(N)\n', (866, 869), True, 'import numpy as np\n'), ((883, 903), 'numpy.random.randint', 'np.random.randint', (['N'], {}), '(N)\n', (900, 903), True, 'import numpy as np\n'), ((552, 597), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)', 'size': '(N, N)'}), '(low=0, high=2, size=(N, N))\n', (569, 597), True, 'import numpy as np\n'), ((1691, 1719), 'numpy.exp', 'np.exp', (['(-1.0 * B * (H2 - H1))'], {}), '(-1.0 * B * (H2 - H1))\n', (1697, 1719), True, 'import numpy as np\n'), ((1724, 1742), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1740, 1742), True, 'import numpy as np\n')] |
from subprocess import call
import os
from urllib.request import urlretrieve
from keras.preprocessing.image import load_img, img_to_array
import numpy as np
def load_data(model):
img_size = 0
if(model=="Resnet"):
img_size=224
else:
img_size=299
print(img_size)
x_train = []
y_train = []
x_test = []
y_test = []
class_names = []
category_num = 0
for category in sorted(os.listdir("Images")):
class_names.append(category)
count = 0
for img in sorted(os.listdir(os.path.join("Images", category))):
x = load_img(os.path.join("Images", category, img),target_size=(img_size,img_size))
if count < 40:
x_test.append(img_to_array(x))
y_test.append(category_num)
else:
x_train.append(img_to_array(x))
y_train.append(category_num)
count += 1
category_num += 1
print("Entrenamiento:",len(np.array(x_train)))
print("Test:",len(np.array(x_test)))
return (np.array(x_train), np.array(y_train)), (np.array(x_test), np.array(y_test)), class_names
| [
"os.path.join",
"keras.preprocessing.image.img_to_array",
"numpy.array",
"os.listdir"
] | [((466, 486), 'os.listdir', 'os.listdir', (['"""Images"""'], {}), "('Images')\n", (476, 486), False, 'import os\n'), ((1039, 1056), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1047, 1056), True, 'import numpy as np\n'), ((1081, 1097), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (1089, 1097), True, 'import numpy as np\n'), ((1112, 1129), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1120, 1129), True, 'import numpy as np\n'), ((1131, 1148), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1139, 1148), True, 'import numpy as np\n'), ((1152, 1168), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (1160, 1168), True, 'import numpy as np\n'), ((1170, 1186), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1178, 1186), True, 'import numpy as np\n'), ((581, 613), 'os.path.join', 'os.path.join', (['"""Images"""', 'category'], {}), "('Images', category)\n", (593, 613), False, 'import os\n'), ((659, 696), 'os.path.join', 'os.path.join', (['"""Images"""', 'category', 'img'], {}), "('Images', category, img)\n", (671, 696), False, 'import os\n'), ((787, 802), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['x'], {}), '(x)\n', (799, 802), False, 'from keras.preprocessing.image import load_img, img_to_array\n'), ((897, 912), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['x'], {}), '(x)\n', (909, 912), False, 'from keras.preprocessing.image import load_img, img_to_array\n')] |
from typing import Any, Protocol, TypeVar, Union, Tuple, Iterator, Optional, Iterable, Callable, overload
import numpy as np
import numpy.typing as npt
from .typing import Arr3i, Index3, Vec3i
SliceOpt = Union[int, slice, None]
def to_slice(s: SliceOpt = None) -> slice:
if isinstance(s, slice):
return s
if s is None:
return slice(s)
return slice(s, s + 1)
class SlicedRangeIterator:
"""1D Slice Iterator"""
@classmethod
def _indices(cls, low: int, high: int, s: slice, clip: bool = True) -> Tuple[int, int, int]:
step = s.step or 1
start = low if s.start is None else s.start
stop = high if s.stop is None else s.stop
if clip:
start = max(start, low + (start - low) % step)
stop = min(stop, high)
else:
start = start
stop = stop
return start, stop, step
def __init__(self, low: int, high: int, s: SliceOpt, clip: bool = True):
self._low = int(low)
self._high = int(high)
self._slice = to_slice(s)
self._start, self._stop, self._step = self._indices(low, high, self._slice, clip)
self.clip = clip
def range(self) -> range:
return range(self._start, self._stop, self._step)
def __contains__(self, item: Any) -> bool:
if isinstance(item, int):
return self._start <= item < self._stop and (item % self._step) == (self._start % self._step)
return False
def __iter__(self) -> Iterator[int]:
yield from range(self._start, self._stop, self._step)
def __len__(self) -> int:
ds = self._stop - self._start
return max(0, ds // self._step, ds % self._step > 0)
@property
def low(self) -> int:
return self._low
@property
def high(self) -> int:
return self._high
@property
def slice(self) -> slice:
return self._slice
@property
def start(self) -> int:
return self._start
@property
def stop(self) -> int:
return self._stop
@property
def step(self) -> int:
return self._step
def __floordiv__(self, other: int) -> "SlicedRangeIterator":
high = -(-self._high // other)
slice_stop = -(-self._stop // other)
return SlicedRangeIterator(
self._low // other,
high,
slice(self._start // other, slice_stop, max(1, self._step // other)),
self.clip,
)
class VoxelGridIterator:
"""3D Slice Iterator"""
@classmethod
def require_bounded(cls, x: SliceOpt, y: SliceOpt, z: SliceOpt) -> "VoxelGridIterator":
x = to_slice(x)
y = to_slice(y)
z = to_slice(z)
assert x.start is not None and x.stop is not None
assert y.start is not None and y.stop is not None
assert z.start is not None and z.stop is not None
return cls((x.start, y.start, z.start), (x.stop, y.stop, z.stop), x, y, z)
@classmethod
def empty(cls) -> "VoxelGridIterator":
return cls(np.zeros(3), np.zeros(3), 0, 0, 0) # type: ignore
def __init__(
self, low: Vec3i | Index3, high: Vec3i | Index3, x: SliceOpt = None, y: SliceOpt = None, z: SliceOpt = None, clip: bool = True
):
self._low: Arr3i = np.asarray(low, dtype=int)
self._high: Arr3i = np.asarray(high, dtype=int)
assert self._low.shape == (3,) and self._high.shape == (3,)
self._x = SlicedRangeIterator(self._low[0], self._high[0], x, clip)
self._y = SlicedRangeIterator(self._low[1], self._high[1], y, clip)
self._z = SlicedRangeIterator(self._low[2], self._high[2], z, clip)
self.clip = clip
def __contains__(self, item: Arr3i) -> bool:
if len(item) == 3:
return item[0] in self._x and item[1] in self._y and item[2] in self._z
return False
def iter_with_indices(self) -> Iterator[Tuple[Index3, Index3]]:
for i, u in enumerate(self._x.range()):
for j, v in enumerate(self._y.range()):
for k, w in enumerate(self._z.range()):
yield (i, j, k), (u, v, w)
def iter(self) -> Iterator[Index3]:
for u in self._x.range():
for v in self._y.range():
for w in self._z.range():
yield u, v, w
def __iter__(self) -> Iterator[Index3]:
return self.iter()
def __len__(self) -> int:
return len(self._x) * len(self._y) * len(self._z)
@property
def shape(self) -> Tuple[int, int, int]:
return len(self._x), len(self._y), len(self._z)
@property
def low(self) -> Arr3i:
return self._low
@property
def high(self) -> Arr3i:
return self._high
@property
def x(self) -> SlicedRangeIterator:
return self._x
@property
def y(self) -> SlicedRangeIterator:
return self._y
@property
def z(self) -> SlicedRangeIterator:
return self._z
@property
def start(self) -> Arr3i:
return np.asarray((self._x.start, self._y.start, self._z.start), dtype=int)
@property
def stop(self) -> Arr3i:
return np.asarray((self._x.stop, self._y.stop, self._z.stop), dtype=int)
@property
def step(self) -> Arr3i:
return np.asarray((self._x.step, self._y.step, self._z.step), dtype=int)
def __floordiv__(self, other: int) -> "VoxelGridIterator":
x = self._x.__floordiv__(other)
y = self._y.__floordiv__(other)
z = self._z.__floordiv__(other)
high = -(-self._high // other)
return VoxelGridIterator(self._low // other, high, x.slice, y.slice, z.slice, self.clip)
| [
"numpy.asarray",
"numpy.zeros"
] | [((3303, 3329), 'numpy.asarray', 'np.asarray', (['low'], {'dtype': 'int'}), '(low, dtype=int)\n', (3313, 3329), True, 'import numpy as np\n'), ((3358, 3385), 'numpy.asarray', 'np.asarray', (['high'], {'dtype': 'int'}), '(high, dtype=int)\n', (3368, 3385), True, 'import numpy as np\n'), ((5059, 5127), 'numpy.asarray', 'np.asarray', (['(self._x.start, self._y.start, self._z.start)'], {'dtype': 'int'}), '((self._x.start, self._y.start, self._z.start), dtype=int)\n', (5069, 5127), True, 'import numpy as np\n'), ((5187, 5252), 'numpy.asarray', 'np.asarray', (['(self._x.stop, self._y.stop, self._z.stop)'], {'dtype': 'int'}), '((self._x.stop, self._y.stop, self._z.stop), dtype=int)\n', (5197, 5252), True, 'import numpy as np\n'), ((5312, 5377), 'numpy.asarray', 'np.asarray', (['(self._x.step, self._y.step, self._z.step)'], {'dtype': 'int'}), '((self._x.step, self._y.step, self._z.step), dtype=int)\n', (5322, 5377), True, 'import numpy as np\n'), ((3064, 3075), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3072, 3075), True, 'import numpy as np\n'), ((3077, 3088), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3085, 3088), True, 'import numpy as np\n')] |
'''
Created on 15 Aug 2013
@author: <NAME>
'''
import math
import textwrap
import tkinter
from tkinter import messagebox
import numpy as np
np.seterr(all="ignore")
from core.isopach import Isopach
from settings import Model
from desktop import helper_functions
from desktop.thread_handlers import ThreadHandler
from desktop.timing_module import createWeibullTimingEstimationFunction
from desktop.tooltip import ToolTip
from desktop.frames.model_frame import ModelFrame
from desktop.frames.isopach_frame import IsopachFrame
from desktop.frames.calculation_frame import CalculationFrame
from desktop.frames.results_frame import ResultsFrame
######### Themes ###########
desiredOrder = ["aqua","vista","xpnative","clam"]
for theme in desiredOrder:
if theme in tkinter.ttk.Style().theme_names():
tkinter.ttk.Style().theme_use(theme)
break
############################
class App(tkinter.ttk.Frame):
def __init__(self):
tkinter.ttk.Frame.__init__(self)
self.master.title("AshCalc")
self.threadHandler = ThreadHandler()
self.calculating = False
self.weibullTimingEstimationFunction = createWeibullTimingEstimationFunction()
self.calculationFrame = CalculationFrame(self)
self.calculationFrame.grid(row=0,column=0,sticky="NSWE",padx=10,pady=10)
self.calculationFrame.startCalculationB.bind("<Button-1>",self.startCalculation)
self.calculationFrame.endCalculationB.configure(state=tkinter.DISABLED)
self.isopachFrame = IsopachFrame(self,self.estimateWeibullCalculationTime)
self.isopachFrame.grid(row=1,column=0,padx=10,sticky="NSE",pady=10)
self.modelFrame = ModelFrame(self)
self.modelFrame.grid(row=0,column=1,sticky="NESW",padx=10,pady=10)
self.modelFrame.weiNumberOfRuns_E.bind("<KeyRelease>",self.estimateWeibullCalculationTime)
self.modelFrame.weiIterationsPerRun_E.bind("<KeyRelease>",self.estimateWeibullCalculationTime)
self.estimateWeibullCalculationTime(None)
self.resultsFrame = ResultsFrame(self)
self.resultsFrame.grid(row=1,column=1,padx=10,sticky="NSEW",pady=10)
self.isopachFrame.loadData([Isopach(0.4, 16.25),Isopach(0.2, 30.63),Isopach(0.1, 58.87),Isopach(0.05, 95.75),Isopach(0.02, 181.56),Isopach(0.01, 275.1)])
self.createTooltips()
self.pack()
self.mainloop()
def startCalculation(self, event):
try:
isopachs = self.isopachFrame.getData()
modelDetails = self.modelFrame.getModelDetails()
self.threadHandler.startCalculation(modelDetails[0], [isopachs] + modelDetails[1:])
except ValueError as ve:
messagebox.showerror("Calculation error", ve.args[0])
return
self.resultsFrame.clear()
self.calculationFrame.calculationPB.start(interval=3)
self.calculationFrame.startCalculationB.configure(state=tkinter.DISABLED)
self.calculationFrame.startCalculationB.unbind("<Button-1>")
self.calculationFrame.endCalculationB.configure(state=tkinter.ACTIVE)
self.calculationFrame.endCalculationB.bind("<Button-1>",self.finishCalculation)
self.calculating = True
self.poll()
def poll(self):
result = self.threadHandler.getCurrentCalculationResult()
if result is not None:
modelType, results = result
if modelType == "Error":
messagebox.showerror("Calculation error", results.args[0])
else:
self.resultsFrame.displayNewModel(modelType,results)
self.finishCalculation(None)
elif self.calculating:
self.after(100, self.poll)
def finishCalculation(self,_):
self.threadHandler.cancelLastCalculation()
self.calculating = False
self.calculationFrame.startCalculationB.configure(state=tkinter.ACTIVE)
self.calculationFrame.startCalculationB.bind("<Button-1>", self.startCalculation)
self.calculationFrame.endCalculationB.configure(state=tkinter.DISABLED)
self.calculationFrame.endCalculationB.unbind("<Button-1>")
self.calculationFrame.calculationPB.stop()
def estimateWeibullCalculationTime(self,event):
try:
numberOfIsopachs = self.isopachFrame.getNumberOfIncludedIsopachs()
numberOfRuns = int(self.modelFrame.weiNumberOfRuns_E.get())
iterationsPerRun = int(self.modelFrame.weiIterationsPerRun_E.get())
if numberOfRuns <= 0 or iterationsPerRun <= 0 or numberOfIsopachs <= 0:
raise ValueError()
est = self.weibullTimingEstimationFunction(numberOfIsopachs,iterationsPerRun,numberOfRuns)
self.modelFrame.weiEstimatedTime_E.insertNew(helper_functions.roundToSF(est,2))
except ValueError:
self.modelFrame.weiEstimatedTime_E.insertNew("N/A")
def createTooltips(self):
statsFrame = self.resultsFrame.statsFrame
tips = [
(self.modelFrame.weiNumberOfRuns_E, True, "The number of possible sets of parameters that are generated. The final parameters returned are the set which best fit the data. See the instruction manual for further details."),
(self.modelFrame.weiIterationsPerRun_E, True, "The number of times the current parameters are adjusted within each run. See the instruction manual for further details."),
(self.modelFrame.weiEstimatedTime_E, True, "A rough estimate of the time required to execute this computation."),
(self.resultsFrame.statsFrame.totalEstimatedVolume_E, True, "The model's estimate for the total volume of the tephra deposit."),
(self.resultsFrame.statsFrame.relativeSquaredError_E, True, "A measure of the goodness of fit of the model. Comparisons are only valid when comparing different models for identical isopach data."),
(self.resultsFrame.statsFrame.expSegVolume_E, True, "The model's estimate for the volume of this segment of the tephra deposit."),
(self.resultsFrame.statsFrame.powSuggestedProximalLimit_E, True, "An estimate for the proximal limit of integration as described in Bonadonna and Houghton 2005. Requires 4 or more isopachs."),
(self.resultsFrame.errorSurfaceFrame.errorResolutionE, True, "The resolution of the error surface, which is modelled by a grid of 'resolution' x 'resolution' points."),
(self.isopachFrame.loadFromFileButton, False, "Load isopach data from a CSV file of the form: \n\tthickness1, \u221AArea1\n\tthickness2, \u221AArea2\n\t...\n\tthicknessN, \u221AAreaN\nwith thickness in metres and \u221AArea in kilometres"),
]
for target, wrap, tip in tips:
if wrap:
tip = "\n".join(textwrap.wrap(tip, 60))
ToolTip(target, tip)
| [
"desktop.frames.results_frame.ResultsFrame",
"desktop.frames.model_frame.ModelFrame",
"desktop.thread_handlers.ThreadHandler",
"desktop.helper_functions.roundToSF",
"numpy.seterr",
"textwrap.wrap",
"tkinter.messagebox.showerror",
"desktop.frames.isopach_frame.IsopachFrame",
"tkinter.ttk.Style",
"c... | [((155, 178), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (164, 178), True, 'import numpy as np\n'), ((976, 1008), 'tkinter.ttk.Frame.__init__', 'tkinter.ttk.Frame.__init__', (['self'], {}), '(self)\n', (1002, 1008), False, 'import tkinter\n'), ((1069, 1084), 'desktop.thread_handlers.ThreadHandler', 'ThreadHandler', ([], {}), '()\n', (1082, 1084), False, 'from desktop.thread_handlers import ThreadHandler\n'), ((1155, 1194), 'desktop.timing_module.createWeibullTimingEstimationFunction', 'createWeibullTimingEstimationFunction', ([], {}), '()\n', (1192, 1194), False, 'from desktop.timing_module import createWeibullTimingEstimationFunction\n'), ((1226, 1248), 'desktop.frames.calculation_frame.CalculationFrame', 'CalculationFrame', (['self'], {}), '(self)\n', (1242, 1248), False, 'from desktop.frames.calculation_frame import CalculationFrame\n'), ((1509, 1564), 'desktop.frames.isopach_frame.IsopachFrame', 'IsopachFrame', (['self', 'self.estimateWeibullCalculationTime'], {}), '(self, self.estimateWeibullCalculationTime)\n', (1521, 1564), False, 'from desktop.frames.isopach_frame import IsopachFrame\n'), ((1660, 1676), 'desktop.frames.model_frame.ModelFrame', 'ModelFrame', (['self'], {}), '(self)\n', (1670, 1676), False, 'from desktop.frames.model_frame import ModelFrame\n'), ((2009, 2027), 'desktop.frames.results_frame.ResultsFrame', 'ResultsFrame', (['self'], {}), '(self)\n', (2021, 2027), False, 'from desktop.frames.results_frame import ResultsFrame\n'), ((6400, 6420), 'desktop.tooltip.ToolTip', 'ToolTip', (['target', 'tip'], {}), '(target, tip)\n', (6407, 6420), False, 'from desktop.tooltip import ToolTip\n'), ((799, 818), 'tkinter.ttk.Style', 'tkinter.ttk.Style', ([], {}), '()\n', (816, 818), False, 'import tkinter\n'), ((837, 856), 'tkinter.ttk.Style', 'tkinter.ttk.Style', ([], {}), '()\n', (854, 856), False, 'import tkinter\n'), ((2133, 2152), 'core.isopach.Isopach', 'Isopach', (['(0.4)', '(16.25)'], {}), '(0.4, 16.25)\n', (2140, 2152), False, 'from core.isopach import Isopach\n'), ((2153, 2172), 'core.isopach.Isopach', 'Isopach', (['(0.2)', '(30.63)'], {}), '(0.2, 30.63)\n', (2160, 2172), False, 'from core.isopach import Isopach\n'), ((2173, 2192), 'core.isopach.Isopach', 'Isopach', (['(0.1)', '(58.87)'], {}), '(0.1, 58.87)\n', (2180, 2192), False, 'from core.isopach import Isopach\n'), ((2193, 2213), 'core.isopach.Isopach', 'Isopach', (['(0.05)', '(95.75)'], {}), '(0.05, 95.75)\n', (2200, 2213), False, 'from core.isopach import Isopach\n'), ((2214, 2235), 'core.isopach.Isopach', 'Isopach', (['(0.02)', '(181.56)'], {}), '(0.02, 181.56)\n', (2221, 2235), False, 'from core.isopach import Isopach\n'), ((2236, 2256), 'core.isopach.Isopach', 'Isopach', (['(0.01)', '(275.1)'], {}), '(0.01, 275.1)\n', (2243, 2256), False, 'from core.isopach import Isopach\n'), ((2593, 2646), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Calculation error"""', 've.args[0]'], {}), "('Calculation error', ve.args[0])\n", (2613, 2646), False, 'from tkinter import messagebox\n'), ((3268, 3326), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Calculation error"""', 'results.args[0]'], {}), "('Calculation error', results.args[0])\n", (3288, 3326), False, 'from tkinter import messagebox\n'), ((4449, 4483), 'desktop.helper_functions.roundToSF', 'helper_functions.roundToSF', (['est', '(2)'], {}), '(est, 2)\n', (4475, 4483), False, 'from desktop import helper_functions\n'), ((6372, 6394), 'textwrap.wrap', 'textwrap.wrap', (['tip', '(60)'], {}), '(tip, 60)\n', (6385, 6394), False, 'import textwrap\n')] |
"""
Very simple implementation for MNIST training code with Chainer using
Multi Layer Perceptron (MLP) model
This code is to explain the basic of training procedure.
"""
from __future__ import print_function
import time
import os
import numpy as np
import six
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda
from chainer import serializers
class MLP(chainer.Chain):
"""Neural Network definition, Multi Layer Perceptron"""
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred when `None`
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
y = self.l3(h2)
return y
class SoftmaxClassifier(chainer.Chain):
"""Classifier is for calculating loss, from predictor's output.
predictor is a model that predicts the probability of each label.
"""
def __init__(self, predictor):
super(SoftmaxClassifier, self).__init__()
with self.init_scope():
self.predictor = predictor
def __call__(self, x, t):
y = self.predictor(x)
self.loss = F.softmax_cross_entropy(y, t)
self.accuracy = F.accuracy(y, t)
return self.loss
def main():
# Configuration setting
gpu = -1 # GPU ID to be used for calculation. -1 indicates to use only CPU.
batchsize = 100 # Minibatch size for training
epoch = 20 # Number of training epoch
out = 'result/1' # Directory to save the results
unit = 50 # Number of hidden layer units, try incresing this value and see if how accuracy changes.
print('GPU: {}'.format(gpu))
print('# unit: {}'.format(unit))
print('# Minibatch-size: {}'.format(batchsize))
print('# epoch: {}'.format(epoch))
print('out directory: {}'.format(out))
# Set up a neural network to train
model = MLP(unit, 10)
# Classifier will calculate classification loss, based on the output of model
classifier_model = SoftmaxClassifier(model)
if gpu >= 0:
chainer.cuda.get_device(gpu).use() # Make a specified GPU current
classifier_model.to_gpu() # Copy the model to the GPU
xp = np if gpu < 0 else cuda.cupy
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(classifier_model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
n_epoch = epoch
N = len(train) # training data size
N_test = len(test) # test data size
# Learning loop
for epoch in range(1, n_epoch + 1):
print('epoch', epoch)
# training
perm = np.random.permutation(N)
sum_accuracy = 0
sum_loss = 0
start = time.time()
for i in six.moves.range(0, N, batchsize):
x = chainer.Variable(xp.asarray(train[perm[i:i + batchsize]][0]))
t = chainer.Variable(xp.asarray(train[perm[i:i + batchsize]][1]))
# Pass the loss function (Classifier defines it) and its arguments
optimizer.update(classifier_model, x, t)
sum_loss += float(classifier_model.loss.data) * len(t.data)
sum_accuracy += float(classifier_model.accuracy.data) * len(t.data)
end = time.time()
elapsed_time = end - start
throughput = N / elapsed_time
print('train mean loss={}, accuracy={}, throughput={} images/sec'.format(
sum_loss / N, sum_accuracy / N, throughput))
# evaluation
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N_test, batchsize):
index = np.asarray(list(range(i, i + batchsize)))
x = chainer.Variable(xp.asarray(test[index][0]))
t = chainer.Variable(xp.asarray(test[index][1]))
loss = classifier_model(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(classifier_model.accuracy.data) * len(t.data)
print('test mean loss={}, accuracy={}'.format(
sum_loss / N_test, sum_accuracy / N_test))
# Save the model and the optimizer
if not os.path.exists(out):
os.makedirs(out)
print('save the model')
serializers.save_npz('{}/classifier_mlp.model'.format(out), classifier_model)
serializers.save_npz('{}/mlp.model'.format(out), model)
print('save the optimizer')
serializers.save_npz('{}/mlp.state'.format(out), optimizer)
if __name__ == '__main__':
main()
| [
"chainer.optimizers.Adam",
"chainer.functions.softmax_cross_entropy",
"six.moves.range",
"os.makedirs",
"chainer.cuda.get_device",
"os.path.exists",
"time.time",
"numpy.random.permutation",
"chainer.functions.accuracy",
"chainer.datasets.get_mnist",
"chainer.links.Linear"
] | [((2597, 2622), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', ([], {}), '()\n', (2620, 2622), False, 'import chainer\n'), ((2709, 2737), 'chainer.datasets.get_mnist', 'chainer.datasets.get_mnist', ([], {}), '()\n', (2735, 2737), False, 'import chainer\n'), ((1425, 1454), 'chainer.functions.softmax_cross_entropy', 'F.softmax_cross_entropy', (['y', 't'], {}), '(y, t)\n', (1448, 1454), True, 'import chainer.functions as F\n'), ((1479, 1495), 'chainer.functions.accuracy', 'F.accuracy', (['y', 't'], {}), '(y, t)\n', (1489, 1495), True, 'import chainer.functions as F\n'), ((2972, 2996), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (2993, 2996), True, 'import numpy as np\n'), ((3059, 3070), 'time.time', 'time.time', ([], {}), '()\n', (3068, 3070), False, 'import time\n'), ((3088, 3120), 'six.moves.range', 'six.moves.range', (['(0)', 'N', 'batchsize'], {}), '(0, N, batchsize)\n', (3103, 3120), False, 'import six\n'), ((3578, 3589), 'time.time', 'time.time', ([], {}), '()\n', (3587, 3589), False, 'import time\n'), ((3887, 3924), 'six.moves.range', 'six.moves.range', (['(0)', 'N_test', 'batchsize'], {}), '(0, N_test, batchsize)\n', (3902, 3924), False, 'import six\n'), ((4451, 4470), 'os.path.exists', 'os.path.exists', (['out'], {}), '(out)\n', (4465, 4470), False, 'import os\n'), ((4480, 4496), 'os.makedirs', 'os.makedirs', (['out'], {}), '(out)\n', (4491, 4496), False, 'import os\n'), ((689, 712), 'chainer.links.Linear', 'L.Linear', (['None', 'n_units'], {}), '(None, n_units)\n', (697, 712), True, 'import chainer.links as L\n'), ((754, 777), 'chainer.links.Linear', 'L.Linear', (['None', 'n_units'], {}), '(None, n_units)\n', (762, 777), True, 'import chainer.links as L\n'), ((822, 843), 'chainer.links.Linear', 'L.Linear', (['None', 'n_out'], {}), '(None, n_out)\n', (830, 843), True, 'import chainer.links as L\n'), ((2378, 2406), 'chainer.cuda.get_device', 'chainer.cuda.get_device', (['gpu'], {}), '(gpu)\n', (2401, 2406), False, 'import chainer\n')] |
# ## Helper classes and functions
import re
import io
from string import digits
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.pyplot import figure
import tensorflow as tf
def preprocess(sentence):
"""
"""
#sentence = unicode_to_ascii(sentence.lower().strip())
num_digits= str.maketrans('','', digits)
sentence= sentence.lower()
sentence= re.sub(" +", " ", sentence)
sentence= re.sub("'", '', sentence)
sentence= sentence.translate(num_digits)
sentence= sentence.strip()
sentence= re.sub(r"([?.!,¿])", r" \1 ", sentence)
sentence = sentence.rstrip().strip()
sentence= 'start_ ' + sentence + ' _end'
return sentence
def create_dataset(path, num_examples):
"""
1. Remove the accents
2. Clean the sentences
3. Return word pairs in the format: [ENGLISH, SPANISH]
"""
#print(lines)
#print(path)
#print(word_pairs[-1])
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess(w) for w in l.split('\t')[:2]] for l in lines[:num_examples]]
return zip(*word_pairs)
def max_length(tensor):
"""
"""
return max(len(t) for t in tensor)
def convert(lang, tensor):
"""
"""
for t in tensor:
if t!=0:
print ("%d ----> %s" % (t, lang.index_word[t]))
def loss_function(real, pred):
"""
"""
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
@tf.function
def train_step(inp, targ, enc_hidden, optimizer, BATCH_SIZE, target_sentence_tokenizer, encoder, decoder):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([target_sentence_tokenizer.word_index['start_']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
def evaluate(sentence, units, max_target_length, max_source_length, encoder, decoder, source_tokenizer, target_tokenizer):
"""
Stop predicting when the model predicts the end token or when the max target legth is reached
"""
attention_plot = np.zeros((max_target_length, max_source_length))
sentence = preprocess(sentence)
#print(sentence)
#print(source_tokenizer.word_index)
inputs = [source_tokenizer.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_source_length,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([target_tokenizer.word_index['start_']], 0)
for t in range(max_target_length):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
# storing the attention weights to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += target_tokenizer.index_word[predicted_id] + ' '
if target_tokenizer.index_word[predicted_id] == '_end':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
def plot_attention(attention, sentence, predicted_sentence):
"""
Plot the attention weights
"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def translate(sentence, units, max_target_length, max_source_length, encoder, decoder, source_tokenizer, target_tokenizer):
result, sentence, attention_plot = evaluate(sentence, units, max_target_length, max_source_length, encoder, decoder, source_tokenizer, target_tokenizer)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
def plot_training(history):
figure(num=None, figsize=(11, 7))
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
figure(num=None, figsize=(11, 7))
# Plot training & validation masked_categorical_accuracy values
plt.plot(history.history['masked_categorical_accuracy'])
plt.plot(history.history['val_masked_categorical_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='lower right')
plt.show()
figure(num=None, figsize=(11, 7))
# Plot training & validation exact_matched_accuracy values
plt.plot(history.history['exact_matched_accuracy'])
plt.plot(history.history['val_exact_matched_accuracy'])
plt.title('Model exact match accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='lower right')
plt.show()
def plot_training2(history):
figure(num=None, figsize=(11, 7))
# Plot training & validation loss values
plt.plot(history[:, 1])
plt.plot(history[:, 4])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
figure(num=None, figsize=(11, 7))
# Plot training & validation masked_categorical_accuracy values
plt.plot(history[:, 2])
plt.plot(history[:, 5])
plt.title('Model categorical accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='lower right')
plt.show()
figure(num=None, figsize=(11, 7))
# Plot training & validation exact_matched_accuracy values
plt.plot(history[:, 3])
plt.plot(history[:, 6])
plt.title('Model exact match accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='lower right')
plt.show()
| [
"matplotlib.pyplot.title",
"tensorflow.reshape",
"matplotlib.pyplot.figure",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.math.equal",
"tensorflow.cast",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"io.open",
"matplotlib.ticker.MultipleLocator",
"re.sub",
"ma... | [((428, 455), 're.sub', 're.sub', (['""" +"""', '""" """', 'sentence'], {}), "(' +', ' ', sentence)\n", (434, 455), False, 'import re\n'), ((470, 495), 're.sub', 're.sub', (['"""\'"""', '""""""', 'sentence'], {}), '("\'", \'\', sentence)\n', (476, 495), False, 'import re\n'), ((586, 624), 're.sub', 're.sub', (['"""([?.!,¿])"""', '""" \\\\1 """', 'sentence'], {}), "('([?.!,¿])', ' \\\\1 ', sentence)\n", (592, 624), False, 'import re\n'), ((1455, 1541), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': '"""none"""'}), "(from_logits=True, reduction=\n 'none')\n", (1500, 1541), True, 'import tensorflow as tf\n'), ((1645, 1677), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'loss_.dtype'}), '(mask, dtype=loss_.dtype)\n', (1652, 1677), True, 'import tensorflow as tf\n'), ((1708, 1729), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_'], {}), '(loss_)\n', (1722, 1729), True, 'import tensorflow as tf\n'), ((2006, 2091), 'tensorflow.expand_dims', 'tf.expand_dims', (["([target_sentence_tokenizer.word_index['start_']] * BATCH_SIZE)", '(1)'], {}), "([target_sentence_tokenizer.word_index['start_']] * BATCH_SIZE, 1\n )\n", (2020, 2091), True, 'import tensorflow as tf\n'), ((2960, 3008), 'numpy.zeros', 'np.zeros', (['(max_target_length, max_source_length)'], {}), '((max_target_length, max_source_length))\n', (2968, 3008), True, 'import numpy as np\n'), ((3196, 3298), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'tf.keras.preprocessing.sequence.pad_sequences', (['[inputs]'], {'maxlen': 'max_source_length', 'padding': '"""post"""'}), "([inputs], maxlen=\n max_source_length, padding='post')\n", (3241, 3298), True, 'import tensorflow as tf\n'), ((3427, 3455), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['inputs'], {}), '(inputs)\n', (3447, 3455), True, 'import tensorflow as tf\n'), ((3605, 3663), 'tensorflow.expand_dims', 'tf.expand_dims', (["[target_tokenizer.word_index['start_']]", '(0)'], {}), "([target_tokenizer.word_index['start_']], 0)\n", (3619, 3663), True, 'import tensorflow as tf\n'), ((4611, 4639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4621, 4639), True, 'import matplotlib.pyplot as plt\n'), ((5012, 5022), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5020, 5022), True, 'import matplotlib.pyplot as plt\n'), ((5595, 5628), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 7)'}), '(num=None, figsize=(11, 7))\n', (5601, 5628), False, 'from matplotlib.pyplot import figure\n'), ((5679, 5712), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (5687, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5754), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (5725, 5754), True, 'import matplotlib.pyplot as plt\n'), ((5759, 5782), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (5768, 5782), True, 'import matplotlib.pyplot as plt\n'), ((5787, 5805), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (5797, 5805), True, 'import matplotlib.pyplot as plt\n'), ((5810, 5829), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (5820, 5829), True, 'import matplotlib.pyplot as plt\n'), ((5834, 5888), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""upper right"""'}), "(['Train', 'Validation'], loc='upper right')\n", (5844, 5888), True, 'import matplotlib.pyplot as plt\n'), ((5893, 5903), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5901, 5903), True, 'import matplotlib.pyplot as plt\n'), ((5909, 5942), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 7)'}), '(num=None, figsize=(11, 7))\n', (5915, 5942), False, 'from matplotlib.pyplot import figure\n'), ((6016, 6072), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['masked_categorical_accuracy']"], {}), "(history.history['masked_categorical_accuracy'])\n", (6024, 6072), True, 'import matplotlib.pyplot as plt\n'), ((6077, 6137), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_masked_categorical_accuracy']"], {}), "(history.history['val_masked_categorical_accuracy'])\n", (6085, 6137), True, 'import matplotlib.pyplot as plt\n'), ((6142, 6169), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {}), "('Model accuracy')\n", (6151, 6169), True, 'import matplotlib.pyplot as plt\n'), ((6174, 6196), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6184, 6196), True, 'import matplotlib.pyplot as plt\n'), ((6201, 6220), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (6211, 6220), True, 'import matplotlib.pyplot as plt\n'), ((6225, 6279), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""lower right"""'}), "(['Train', 'Validation'], loc='lower right')\n", (6235, 6279), True, 'import matplotlib.pyplot as plt\n'), ((6284, 6294), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6292, 6294), True, 'import matplotlib.pyplot as plt\n'), ((6300, 6333), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 7)'}), '(num=None, figsize=(11, 7))\n', (6306, 6333), False, 'from matplotlib.pyplot import figure\n'), ((6402, 6453), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['exact_matched_accuracy']"], {}), "(history.history['exact_matched_accuracy'])\n", (6410, 6453), True, 'import matplotlib.pyplot as plt\n'), ((6458, 6513), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_exact_matched_accuracy']"], {}), "(history.history['val_exact_matched_accuracy'])\n", (6466, 6513), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6557), 'matplotlib.pyplot.title', 'plt.title', (['"""Model exact match accuracy"""'], {}), "('Model exact match accuracy')\n", (6527, 6557), True, 'import matplotlib.pyplot as plt\n'), ((6562, 6584), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6572, 6584), True, 'import matplotlib.pyplot as plt\n'), ((6589, 6608), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (6599, 6608), True, 'import matplotlib.pyplot as plt\n'), ((6613, 6667), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""lower right"""'}), "(['Train', 'Validation'], loc='lower right')\n", (6623, 6667), True, 'import matplotlib.pyplot as plt\n'), ((6672, 6682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6680, 6682), True, 'import matplotlib.pyplot as plt\n'), ((6718, 6751), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 7)'}), '(num=None, figsize=(11, 7))\n', (6724, 6751), False, 'from matplotlib.pyplot import figure\n'), ((6802, 6825), 'matplotlib.pyplot.plot', 'plt.plot', (['history[:, 1]'], {}), '(history[:, 1])\n', (6810, 6825), True, 'import matplotlib.pyplot as plt\n'), ((6830, 6853), 'matplotlib.pyplot.plot', 'plt.plot', (['history[:, 4]'], {}), '(history[:, 4])\n', (6838, 6853), True, 'import matplotlib.pyplot as plt\n'), ((6858, 6881), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (6867, 6881), True, 'import matplotlib.pyplot as plt\n'), ((6886, 6904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (6896, 6904), True, 'import matplotlib.pyplot as plt\n'), ((6909, 6928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (6919, 6928), True, 'import matplotlib.pyplot as plt\n'), ((6933, 6987), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""upper right"""'}), "(['Train', 'Validation'], loc='upper right')\n", (6943, 6987), True, 'import matplotlib.pyplot as plt\n'), ((6992, 7002), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7000, 7002), True, 'import matplotlib.pyplot as plt\n'), ((7008, 7041), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 7)'}), '(num=None, figsize=(11, 7))\n', (7014, 7041), False, 'from matplotlib.pyplot import figure\n'), ((7115, 7138), 'matplotlib.pyplot.plot', 'plt.plot', (['history[:, 2]'], {}), '(history[:, 2])\n', (7123, 7138), True, 'import matplotlib.pyplot as plt\n'), ((7143, 7166), 'matplotlib.pyplot.plot', 'plt.plot', (['history[:, 5]'], {}), '(history[:, 5])\n', (7151, 7166), True, 'import matplotlib.pyplot as plt\n'), ((7171, 7210), 'matplotlib.pyplot.title', 'plt.title', (['"""Model categorical accuracy"""'], {}), "('Model categorical accuracy')\n", (7180, 7210), True, 'import matplotlib.pyplot as plt\n'), ((7215, 7237), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (7225, 7237), True, 'import matplotlib.pyplot as plt\n'), ((7242, 7261), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (7252, 7261), True, 'import matplotlib.pyplot as plt\n'), ((7266, 7320), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""lower right"""'}), "(['Train', 'Validation'], loc='lower right')\n", (7276, 7320), True, 'import matplotlib.pyplot as plt\n'), ((7325, 7335), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7333, 7335), True, 'import matplotlib.pyplot as plt\n'), ((7341, 7374), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 7)'}), '(num=None, figsize=(11, 7))\n', (7347, 7374), False, 'from matplotlib.pyplot import figure\n'), ((7443, 7466), 'matplotlib.pyplot.plot', 'plt.plot', (['history[:, 3]'], {}), '(history[:, 3])\n', (7451, 7466), True, 'import matplotlib.pyplot as plt\n'), ((7471, 7494), 'matplotlib.pyplot.plot', 'plt.plot', (['history[:, 6]'], {}), '(history[:, 6])\n', (7479, 7494), True, 'import matplotlib.pyplot as plt\n'), ((7499, 7538), 'matplotlib.pyplot.title', 'plt.title', (['"""Model exact match accuracy"""'], {}), "('Model exact match accuracy')\n", (7508, 7538), True, 'import matplotlib.pyplot as plt\n'), ((7543, 7565), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (7553, 7565), True, 'import matplotlib.pyplot as plt\n'), ((7570, 7589), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (7580, 7589), True, 'import matplotlib.pyplot as plt\n'), ((7594, 7648), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""lower right"""'}), "(['Train', 'Validation'], loc='lower right')\n", (7604, 7648), True, 'import matplotlib.pyplot as plt\n'), ((7653, 7663), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7661, 7663), True, 'import matplotlib.pyplot as plt\n'), ((1573, 1595), 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), '(real, 0)\n', (1586, 1595), True, 'import tensorflow as tf\n'), ((1875, 1892), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1890, 1892), True, 'import tensorflow as tf\n'), ((2420, 2449), 'tensorflow.expand_dims', 'tf.expand_dims', (['targ[:, t]', '(1)'], {}), '(targ[:, t], 1)\n', (2434, 2449), True, 'import tensorflow as tf\n'), ((3488, 3508), 'tensorflow.zeros', 'tf.zeros', (['(1, units)'], {}), '((1, units))\n', (3496, 3508), True, 'import tensorflow as tf\n'), ((4003, 4039), 'tensorflow.reshape', 'tf.reshape', (['attention_weights', '(-1,)'], {}), '(attention_weights, (-1,))\n', (4013, 4039), True, 'import tensorflow as tf\n'), ((4412, 4445), 'tensorflow.expand_dims', 'tf.expand_dims', (['[predicted_id]', '(0)'], {}), '([predicted_id], 0)\n', (4426, 4445), True, 'import tensorflow as tf\n'), ((4922, 4947), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (4944, 4947), True, 'import matplotlib.ticker as ticker\n'), ((4980, 5005), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (5002, 5005), True, 'import matplotlib.ticker as ticker\n'), ((4119, 4144), 'tensorflow.argmax', 'tf.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (4128, 4144), True, 'import tensorflow as tf\n'), ((984, 1015), 'io.open', 'io.open', (['path'], {'encoding': '"""UTF-8"""'}), "(path, encoding='UTF-8')\n", (991, 1015), False, 'import io\n')] |
import pytorch_lightning as pl
import torch
from xmuda.models.modules import Net2DFeat, Net3DFeat, FuseNet
from xmuda.models.LMSCNet import LMSCNet
from xmuda.common.utils.metrics import Metrics
import pickle
import numpy as np
import time
import os.path as osp
class RecNetLMSC(pl.LightningModule):
def __init__(self, preprocess_dir):
super().__init__()
self.class_frequencies = np.array([5.41773033e+09, 1.57835390e+07, 1.25136000e+05, 1.18809000e+05,
6.46799000e+05, 8.21951000e+05, 2.62978000e+05, 2.83696000e+05,
2.04750000e+05, 6.16887030e+07, 4.50296100e+06, 4.48836500e+07,
2.26992300e+06, 5.68402180e+07, 1.57196520e+07, 1.58442623e+08,
2.06162300e+06, 3.69705220e+07, 1.15198800e+06, 3.34146000e+05])
self.class_num = 20
self.lmscnet = LMSCNet(
class_num=self.class_num,
class_frequencies=self.class_frequencies)
with open(osp.join(preprocess_dir, "visible_voxels.pkl"), 'rb') as f:
self.invisible_voxels = pickle.load(f)
self.train_metrics = Metrics(self.class_num)
self.val_metrics = Metrics(self.class_num)
self.train_metrics_visible = Metrics(self.class_num)
self.val_metrics_visible = Metrics(self.class_num)
# tensorboard = self.logger.experiment
def forward(self, batch):
occupancy = batch['voxel_occupancy'].cuda()
# n_points_3d = batch['n_points_3d']
# img = batch['img']
# bs = img.shape[0]
# coords_3d = batch['coords_3d']
# occupancy = torch.zeros(bs, 256, 256, 32, device=self.device)
# # print(coords_3d.shape)
# # prev = 0
# for i in range(bs):
# idx = coords_3d[:, 3] == i
# b_coords = coords_3d[idx]
# occupancy[i, b_coords[:, 0], b_coords[:, 1], b_coords[:, 2]] = 1.0
# # prev = n_point
# occupancy = occupancy.transpose(2, 3)
out = self.lmscnet(occupancy)
return out
def training_step(self, batch, batch_idx):
pred = self(batch)
target = batch['ssc_label_1_4'].cuda()
loss = self.lmscnet.compute_loss(pred, target)
self.train_metrics.add_batch(prediction=pred, target=target)
self.train_metrics_visible.add_batch(prediction=pred,
target=target,
scenes=batch['scene'],
invisible_data_dict=self.invisible_voxels)
self.log('train/loss', loss.item())
self.log('train/loss_ssc', loss.item())
for metrics, suffix in [(self.train_metrics, ""), (self.train_metrics_visible, "_visible")]:
self.log("train/mIoU" + suffix,
metrics.get_semantics_mIoU().item())
self.log("train/IoU" + suffix, metrics.get_occupancy_IoU().item())
self.log("train/Precision" + suffix,
metrics.get_occupancy_Precision().item())
self.log("train/Recall" + suffix,
metrics.get_occupancy_Recall().item())
self.log("train/F1" + suffix, metrics.get_occupancy_F1().item())
metrics.reset_evaluator()
return loss
def validation_step(self, batch, batch_idx):
pred = self(batch)
target = batch['ssc_label_1_4'].cuda()
loss = self.lmscnet.compute_loss(pred, target)
# loss = self.bce_logits_loss(logits, occ_labels)
self.log('val/loss', loss.item())
self.log('val/loss_ssc', loss.item())
self.val_metrics.add_batch(prediction=pred, target=target)
self.val_metrics_visible.add_batch(prediction=pred,
target=target,
scenes=batch['scene'],
invisible_data_dict=self.invisible_voxels)
# pred_occ_labels = (torch.sigmoid(logits) > 0.5).float()
# acc = (pred_occ_labels == occ_labels).float().mean()
# self.log('train/acc', acc.item())
def validation_epoch_end(self, outputs):
for metrics, suffix in [(self.val_metrics, ""), (self.val_metrics_visible, "_visible")]:
self.log("val/mIoU" + suffix,
metrics.get_semantics_mIoU().item())
self.log("val/IoU" + suffix, metrics.get_occupancy_IoU().item())
self.log("val/Precision" + suffix,
metrics.get_occupancy_Precision().item())
self.log("val/Recall" + suffix,
metrics.get_occupancy_Recall().item())
self.log("val/F1" + suffix, metrics.get_occupancy_F1().item())
metrics.reset_evaluator()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-4)
return optimizer
| [
"xmuda.models.LMSCNet.LMSCNet",
"xmuda.common.utils.metrics.Metrics",
"pickle.load",
"numpy.array",
"os.path.join"
] | [((402, 649), 'numpy.array', 'np.array', (['[5417730330.0, 15783539.0, 125136.0, 118809.0, 646799.0, 821951.0, 262978.0,\n 283696.0, 204750.0, 61688703.0, 4502961.0, 44883650.0, 2269923.0, \n 56840218.0, 15719652.0, 158442623.0, 2061623.0, 36970522.0, 1151988.0, \n 334146.0]'], {}), '([5417730330.0, 15783539.0, 125136.0, 118809.0, 646799.0, 821951.0,\n 262978.0, 283696.0, 204750.0, 61688703.0, 4502961.0, 44883650.0, \n 2269923.0, 56840218.0, 15719652.0, 158442623.0, 2061623.0, 36970522.0, \n 1151988.0, 334146.0])\n', (410, 649), True, 'import numpy as np\n'), ((956, 1031), 'xmuda.models.LMSCNet.LMSCNet', 'LMSCNet', ([], {'class_num': 'self.class_num', 'class_frequencies': 'self.class_frequencies'}), '(class_num=self.class_num, class_frequencies=self.class_frequencies)\n', (963, 1031), False, 'from xmuda.models.LMSCNet import LMSCNet\n'), ((1217, 1240), 'xmuda.common.utils.metrics.Metrics', 'Metrics', (['self.class_num'], {}), '(self.class_num)\n', (1224, 1240), False, 'from xmuda.common.utils.metrics import Metrics\n'), ((1268, 1291), 'xmuda.common.utils.metrics.Metrics', 'Metrics', (['self.class_num'], {}), '(self.class_num)\n', (1275, 1291), False, 'from xmuda.common.utils.metrics import Metrics\n'), ((1329, 1352), 'xmuda.common.utils.metrics.Metrics', 'Metrics', (['self.class_num'], {}), '(self.class_num)\n', (1336, 1352), False, 'from xmuda.common.utils.metrics import Metrics\n'), ((1388, 1411), 'xmuda.common.utils.metrics.Metrics', 'Metrics', (['self.class_num'], {}), '(self.class_num)\n', (1395, 1411), False, 'from xmuda.common.utils.metrics import Metrics\n'), ((1172, 1186), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1183, 1186), False, 'import pickle\n'), ((1076, 1122), 'os.path.join', 'osp.join', (['preprocess_dir', '"""visible_voxels.pkl"""'], {}), "(preprocess_dir, 'visible_voxels.pkl')\n", (1084, 1122), True, 'import os.path as osp\n')] |
import os
from joblib.parallel import Parallel, delayed
import numpy as np
from tqdm import tqdm
from lost_ds.functional.api import (remove_empty,
is_multilabel,
label_selection,
)
from lost_ds.im_util import get_imagesize, get_fs
from lost_ds.geometry.lost_geom import LOSTGeometries
def _get_and_validate_order(order, df, lbl_col, seg_lbl_col):
if isinstance(order, list):
order = {lbl: idx for idx, lbl in enumerate(order)}
order = {k: v for k, v in sorted(order.items(), key=lambda item: item[1])}
# if a multilabel column is passed we have to make sure that a maximum of
# one label is defined in the passed 'order'. If order definition is OK the
# multilabel column can be expressed by a single label column by only
# picking labels from order. Otherwise an Exception will be thrown
if is_multilabel(df, lbl_col):
# # remove dataset labels that do not occure in order
df[seg_lbl_col] = df[lbl_col].apply(lambda x:
[lbl for lbl in x if lbl in list(order.keys())])
if df[seg_lbl_col].apply(lambda x: len(x) > 1).sum():
raise Exception('Found entries where multiple labels from order ' \
'are defined! Pass an order without multilabel classes and run again')
else:
df[seg_lbl_col] = df[seg_lbl_col].apply(lambda x:
x[0] if len(x) else np.nan)
else:
df[seg_lbl_col] = df[lbl_col]
return order, df
def semantic_segmentation(order, dst_dir, fill_value, df,
anno_dtypes=['polygon'], lbl_col='anno_lbl',
dst_path_col='seg_path', dst_lbl_col='seg_lbl',
line_thickness=None, radius=None, filesystem=None):
'''Create semantic segmentations from polygon-annos
Args:
df (pd.DataFrame): dataframe to generate the pixel maps from
order (dict, list): order of the classes. Higher pixel
values will overwrite lower ones.
Example: order=['Car', 'Person', 'Glasses'] or
order={'Car': 0, 'Person': 1, 'Glasses': 2}
Car will get pixel value 0, Person px value 1 and so on -
Person overwrites Car, Glasses overwrites Person ...
dst_dir (str): Directory to store the pixel maps.
fill_value (int): Pixel value for not annotated areas. Usually this
will be something like 0 for something like 'background'.
anno_dtypes (list of string): dtypes to use for segmentation. Possible
values are {'polygon', 'bbox', 'line', 'point'}. Annotations with
dtypes other than anno_dtypes will be removed from dataframe
lbl_col (str): Column containing the training labels
line_thickness (int): thickness of line-segmentation when using an
annotation of dtype line. Only takes effect when anno_dtypes
contains 'line'
radius (int): radius of circle-segmentation when using an annotation
of dtype point. Only takes effect when anno_dtypes contains 'point'
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
Returns:
pd.DataFrame: The original dataframe with new column (dst_col)
containing the path to the according segmentation file.
Furthermore the column dst_lbl_col contains the label the
segmentation looked up in order for creation
'''
fs = get_fs(filesystem)
df = remove_empty(df=df, col='anno_data')
order, df = _get_and_validate_order(order, df, lbl_col, dst_lbl_col)
df = df[df.anno_dtype.isin(anno_dtypes)]
def generate_seg(image_path, img_df):
geom = LOSTGeometries()
if not fs.exists(image_path):
raise Exception('Image {} does not exist'.format(image_path))
im_h, im_w = get_imagesize(image_path)
segmentation = np.full([im_h, im_w], fill_value, dtype=np.int32)
for label, level in order.items():
draw = label_selection(labels=[label], df=img_df, col=dst_lbl_col)
for _, row in draw.iterrows():
segmentation = geom.segmentation(segmentation, level,
row.anno_data, row.anno_dtype,
row.anno_format,row.anno_style,
line_thickness=line_thickness,
radius=radius)
filename = image_path.split('/')[-1].split('.')[0] + '.png'
seg_path = os.path.join(dst_dir, filename)
fs.write_img(segmentation, seg_path)
return image_path, seg_path
fs.makedirs(dst_dir, exist_ok=True)
# # sequential loop
# path_map = dict()
# for path in tqdm(image_paths, desc='segmentation'):
# seg_path = generate_seg(path)
# path_map[path] = seg_path
# parallel loop
paths = Parallel(n_jobs=-1)(delayed(generate_seg)(path, img_df)
for path, img_df in tqdm(
df.groupby('img_path'),
desc='segmentation'))
path_map = {im_path: seg_path for im_path, seg_path in paths}
df[dst_path_col] = df.img_path.apply(lambda x: path_map[x])
return df
| [
"numpy.full",
"os.path.join",
"lost_ds.im_util.get_imagesize",
"joblib.parallel.delayed",
"lost_ds.im_util.get_fs",
"lost_ds.functional.api.label_selection",
"lost_ds.geometry.lost_geom.LOSTGeometries",
"joblib.parallel.Parallel",
"lost_ds.functional.api.is_multilabel",
"lost_ds.functional.api.rem... | [((944, 970), 'lost_ds.functional.api.is_multilabel', 'is_multilabel', (['df', 'lbl_col'], {}), '(df, lbl_col)\n', (957, 970), False, 'from lost_ds.functional.api import remove_empty, is_multilabel, label_selection\n'), ((3626, 3644), 'lost_ds.im_util.get_fs', 'get_fs', (['filesystem'], {}), '(filesystem)\n', (3632, 3644), False, 'from lost_ds.im_util import get_imagesize, get_fs\n'), ((3654, 3690), 'lost_ds.functional.api.remove_empty', 'remove_empty', ([], {'df': 'df', 'col': '"""anno_data"""'}), "(df=df, col='anno_data')\n", (3666, 3690), False, 'from lost_ds.functional.api import remove_empty, is_multilabel, label_selection\n'), ((3871, 3887), 'lost_ds.geometry.lost_geom.LOSTGeometries', 'LOSTGeometries', ([], {}), '()\n', (3885, 3887), False, 'from lost_ds.geometry.lost_geom import LOSTGeometries\n'), ((4021, 4046), 'lost_ds.im_util.get_imagesize', 'get_imagesize', (['image_path'], {}), '(image_path)\n', (4034, 4046), False, 'from lost_ds.im_util import get_imagesize, get_fs\n'), ((4070, 4119), 'numpy.full', 'np.full', (['[im_h, im_w]', 'fill_value'], {'dtype': 'np.int32'}), '([im_h, im_w], fill_value, dtype=np.int32)\n', (4077, 4119), True, 'import numpy as np\n'), ((4759, 4790), 'os.path.join', 'os.path.join', (['dst_dir', 'filename'], {}), '(dst_dir, filename)\n', (4771, 4790), False, 'import os\n'), ((5141, 5160), 'joblib.parallel.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (5149, 5160), False, 'from joblib.parallel import Parallel, delayed\n'), ((4183, 4242), 'lost_ds.functional.api.label_selection', 'label_selection', ([], {'labels': '[label]', 'df': 'img_df', 'col': 'dst_lbl_col'}), '(labels=[label], df=img_df, col=dst_lbl_col)\n', (4198, 4242), False, 'from lost_ds.functional.api import remove_empty, is_multilabel, label_selection\n'), ((5161, 5182), 'joblib.parallel.delayed', 'delayed', (['generate_seg'], {}), '(generate_seg)\n', (5168, 5182), False, 'from joblib.parallel import Parallel, delayed\n')] |
"""
@author: ludvigolsen
"""
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from utipy.utils.check_instance import check_instance
from utipy.utils.convert_to_type import convert_to_type
# TODO: Cythonize
def window(x: Union[list, np.ndarray, pd.Series], size: int = 2, gap: int = 1, sample_rate: int = 1,
rolling: bool = True, reverse_direction: bool = False,
discard_shorts: bool = True) -> Tuple[List[np.ndarray], int]:
"""
Splits array, e.g. time series, into rolling (optional) windows and returns as list of arrays and the number of windows.
Parameters
----------
x : list, np.ndarray, pd.Series
The time series array to window.
size : int
Window size
gap : int
Gap size.
sample_rate : int
Size and gap will be multiplied by the given sample rate
allowing you to specify those in seconds instead of samples.
rolling : bool
Use rolling windows.
If False:
Will grab "size * sample_rate" elements greedily.
Be aware of the gap setting that defaults to 1.
reverse_direction : bool
Start from the end of the array instead of the beginning.
Does not change order of elements within windows.
discard_shorts: bool
If the given array is shorter than size*sample_rate,
return ([],0) if (True) or ([x],0) if (False).
Returns
-------
List of np.ndarrays, number of windows
"""
_ = check_instance(x)
x = convert_to_type(x, 'np.ndarray')
assert isinstance(size, int), "size must be an integer"
assert isinstance(gap, int), "gap must be an integer"
assert isinstance(sample_rate, int), "sample_rate must be an integer"
assert isinstance(rolling, bool), "rolling must be a bool"
assert isinstance(reverse_direction,
bool), "reverse_direction must be a bool"
assert isinstance(discard_shorts, bool), "discard_shorts must be a bool"
assert size >= 1, "size must be at least 1"
assert sample_rate >= 1, "sample_rate must be at least 1"
if rolling:
assert gap >= 1, "gap must be at least 1 when creating rolling windows"
# How many samples per file?
n_samples = sample_rate * size
gap_samples = gap * sample_rate
# If the array is too short
if len(x) < n_samples:
if discard_shorts:
return [], 0
else:
return [x], 0
if rolling:
n_windows = np.int32((len(x) - n_samples) / gap_samples) + 1
if reverse_direction:
stims = [x[len(x) - stimuli * gap_samples - n_samples:len(x) - stimuli * gap_samples]
for stimuli in range(n_windows)]
else:
stims = [x[stimuli * gap_samples:stimuli * gap_samples + n_samples]
for stimuli in range(n_windows)]
else:
modulus = len(x) % (n_samples + gap_samples)
n_windows = (len(x) - modulus) / (n_samples + gap_samples)
n_windows = np.int32(n_windows)
# If we have enough samples for a window, just not the final gap
if modulus >= n_samples:
n_windows += 1
if reverse_direction:
stims = [x[len(x) - (stimuli + 1) * n_samples - stimuli * gap_samples:
len(x) - (stimuli) * n_samples - stimuli * gap_samples]
for stimuli in range(n_windows)]
else:
stims = [x[stimuli * n_samples + stimuli * gap_samples:
(stimuli + 1) * n_samples + stimuli * gap_samples]
for stimuli in range(n_windows)]
return stims, n_windows
| [
"utipy.utils.check_instance.check_instance",
"utipy.utils.convert_to_type.convert_to_type",
"numpy.int32"
] | [((1512, 1529), 'utipy.utils.check_instance.check_instance', 'check_instance', (['x'], {}), '(x)\n', (1526, 1529), False, 'from utipy.utils.check_instance import check_instance\n'), ((1538, 1570), 'utipy.utils.convert_to_type.convert_to_type', 'convert_to_type', (['x', '"""np.ndarray"""'], {}), "(x, 'np.ndarray')\n", (1553, 1570), False, 'from utipy.utils.convert_to_type import convert_to_type\n'), ((3039, 3058), 'numpy.int32', 'np.int32', (['n_windows'], {}), '(n_windows)\n', (3047, 3058), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import cv2
from skimage import transform as trans
def warping(img, landmark):
'''
Return warped img. Size 112x112
:param np.array img: Full frame image
:param np.array landmark: array with 5 key points coordinates of the face
:return: warped image
:rtype: np.array
'''
image_size = [112, 112]
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041]], dtype=np.float32)
if image_size[1] == 112:
src[:, 0] += 8.0
dst = landmark.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
assert len(image_size) == 2
warped = cv2.warpAffine(img, M, (image_size[1], image_size[0]), borderValue=0.0)
return warped
def example():
'''
Example of using warping function
'''
train_df = pd.read_csv('/path/to/train_df.csv')
row = train_df.iloc[0]
img = cv2.imread(row.crop_path)[..., ::-1]
landmarks5 = np.zeros((5, 2))
for i, (x, y) in enumerate(zip(['x0', 'x1', 'x2', 'x3', 'x4'],
['y0', 'y1', 'y2', 'y3', 'y4'])):
landmarks5[i, 0] = int(row.bbox_x + row[x])
landmarks5[i, 1] = int(row.bbox_y + row[y])
warped_img = warping(img, landmarks5)
cv2.imwrite('/path/to/save/warped_img.jpg', warped_img)
| [
"pandas.read_csv",
"cv2.imwrite",
"numpy.zeros",
"skimage.transform.SimilarityTransform",
"cv2.warpAffine",
"cv2.imread",
"numpy.array"
] | [((380, 513), 'numpy.array', 'np.array', (['[[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [33.5493, \n 92.3655], [62.7299, 92.2041]]'], {'dtype': 'np.float32'}), '([[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [\n 33.5493, 92.3655], [62.7299, 92.2041]], dtype=np.float32)\n', (388, 513), True, 'import numpy as np\n'), ((656, 683), 'skimage.transform.SimilarityTransform', 'trans.SimilarityTransform', ([], {}), '()\n', (681, 683), True, 'from skimage import transform as trans\n'), ((789, 860), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(image_size[1], image_size[0])'], {'borderValue': '(0.0)'}), '(img, M, (image_size[1], image_size[0]), borderValue=0.0)\n', (803, 860), False, 'import cv2\n'), ((967, 1003), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/train_df.csv"""'], {}), "('/path/to/train_df.csv')\n", (978, 1003), True, 'import pandas as pd\n'), ((1097, 1113), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (1105, 1113), True, 'import numpy as np\n'), ((1401, 1456), 'cv2.imwrite', 'cv2.imwrite', (['"""/path/to/save/warped_img.jpg"""', 'warped_img'], {}), "('/path/to/save/warped_img.jpg', warped_img)\n", (1412, 1456), False, 'import cv2\n'), ((1042, 1067), 'cv2.imread', 'cv2.imread', (['row.crop_path'], {}), '(row.crop_path)\n', (1052, 1067), False, 'import cv2\n')] |
#
# idaho-camera-traps.py
#
# Prepare the Idaho Camera Traps dataset for release on LILA.
#
#%% Imports and constants
import json
import os
import numpy as np
import dateutil
import pandas as pd
import datetime
import shutil
from tqdm import tqdm
from bson import json_util
from collections import defaultdict
# Multi-threading for .csv file comparison and image existence validation
from multiprocessing.pool import Pool as Pool
from multiprocessing.pool import ThreadPool as ThreadPool
n_threads = 14
n_threads_file_copy = 20
input_base = r'i:\idfg-images'
output_base = r'h:\idaho-camera-traps'
output_image_base = r'j:\idaho-camera-traps-output'
assert os.path.isdir(input_base)
assert os.path.isdir(output_base)
assert os.path.isdir(output_image_base)
output_image_base_public = os.path.join(output_image_base,'public')
output_image_base_private = os.path.join(output_image_base,'private')
# We are going to map the original filenames/locations to obfuscated strings, but once
# we've done that, we will re-use the mappings every time we run this script.
force_generate_mappings = False
# This is the file to which mappings get saved
id_mapping_file = os.path.join(output_base,'id_mapping.json')
# The maximum time (in seconds) between images within which two images are considered the
# same sequence.
max_gap_within_sequence = 30
# This is a two-column file, where each line is [string in the original metadata],[category name we want to map it to]
category_mapping_file = os.path.join(output_base,'category_mapping.csv')
# The output file, using the original strings
output_json_original_strings = os.path.join(output_base,'idaho-camera-traps-original-strings.json')
# The output file, using obfuscated strings for everything but filenamed
output_json_remapped_ids = os.path.join(output_base,'idaho-camera-traps-remapped-ids.json')
# The output file, using obfuscated strings and obfuscated filenames
output_json = os.path.join(output_base,'idaho-camera-traps.json')
# One time only, I ran MegaDetector on the whole dataset...
megadetector_results_file = r'H:\idaho-camera-traps\idfg-2021-07-26idaho-camera-traps_detections.json'
# ...then set aside any images that *may* have contained humans that had not already been
# annotated as such. Those went in this folder...
human_review_folder = os.path.join(output_base,'human_review')
# ...and the ones that *actually* had humans (identified via manual review) got
# copied to this folder...
human_review_selection_folder = os.path.join(output_base,'human_review_selections')
# ...which was enumerated to this text file, which is a manually-curated list of
# images that were flagged as human.
human_review_list = os.path.join(output_base,'human_flagged_images.txt')
# Unopinionated .json conversion of the .csv metadata
sequence_info_cache = os.path.join(output_base,'sequence_info.json')
valid_opstates = ['normal','maintenance','snow on lens','foggy lens','foggy weather',
'malfunction','misdirected','snow on lense','poop/slobber','sun','tilted','vegetation obstruction']
opstate_mappings = {'snow on lense':'snow on lens','poop/slobber':'lens obscured','maintenance':'human'}
survey_species_presence_columns = ['elkpresent','deerpresent','prongpresent']
presence_to_count_columns = {
'otherpresent':['MooseAntlerless','MooseCalf','MooseOther','MooseBull','MooseUnkn','BlackBearAdult','BlackBearCub','LionAdult',
'LionKitten','WolfAdult','WolfPup','CattleCow','CattleCalf','other'],
'elkpresent':['ElkSpike','ElkAntlerless','ElkCalf','ElkRaghorn','ElkMatBull','ElkUnkn','ElkPedNub'],
'deerpresent':['MDbuck','MDantlerless','MDfawn','WTDbuck','WTDantlerless','WTDfawn','WTDunkn','MDunkn'],
'prongpresent':['PronghornBuck','PronghornFawn','PHunkn']
}
required_columns = ['File','Folder','Date','Time','otherpresent','other','otherwhat','opstate']
expected_presence_columns = ['elkpresent','deerpresent','prongpresent','humanpresent','otherpresent']
expected_count_columns = set()
for presence_column in presence_to_count_columns.keys():
count_columns = presence_to_count_columns[presence_column]
for count_column in count_columns:
expected_count_columns.add(count_column)
def list_is_sorted(l):
return all(l[i] <= l[i+1] for i in range(len(l)-1))
#%% List files (images + .csv)
def get_files():
all_files_list = os.path.join(output_base,'all_files.json')
force_file_enumeration = False
if (os.path.isfile(all_files_list) and (not force_file_enumeration)):
print('File list exists, bypassing enumeration')
with open(all_files_list,'r') as f:
all_files = json.load(f)
else:
from pathlib import Path
all_files = []
for path in Path(input_base).rglob('*.*'):
path = str(path)
path = os.path.relpath(path,input_base)
all_files.append(path)
with open(all_files_list,'w') as f:
json.dump(all_files,f,indent=1)
print('Enumerated {} files'.format(len(all_files)))
image_files = [s for s in all_files if (s.lower().endswith('.jpg') or s.lower().endswith('.jpeg'))]
csv_files = [s for s in all_files if (\
(s.lower().endswith('.csv')) and \
('Backups' not in s) and \
('Metadata.csv' not in s) and \
('ExportedDataFiles' not in s) and \
('CSV Files' not in s)
)]
print('{} image files, {} .csv files'.format(len(image_files),len(csv_files)))
# Ignore .csv files in folders with multiple .csv files
# ...which would require some extra work to decipher.
csv_files_to_ignore = []
folder_to_csv_files = defaultdict(list)
# fn = csv_files[0]
for fn in csv_files:
folder_name = os.path.dirname(fn)
folder_to_csv_files[folder_name].append(fn)
for folder_name in folder_to_csv_files.keys():
if len(folder_to_csv_files[folder_name]) > 1:
print('Multiple .csv files for {}:'.format(folder_name))
for csv_file in folder_to_csv_files[folder_name]:
print(csv_file)
csv_files_to_ignore.append(csv_file)
print('')
n_csv_original = len(csv_files)
csv_files = [s for s in csv_files if s not in csv_files_to_ignore]
print('Processing {} of {} csv files'.format(len(csv_files),n_csv_original))
return image_files,csv_files
#%% Parse each .csv file into sequences (function)
# csv_file = csv_files[-1]
def csv_to_sequences(csv_file):
print('Processing {}'.format(csv_file))
csv_file_absolute = os.path.join(input_base,csv_file)
# os.startfile(csv_file_absolute)
sequences = []
# survey = csv_file.split('\\')[0]
# Sample paths from which we need to derive locations:
#
# St.Joe_elk\AM99\Trip 1\100RECNX\TimelapseData.csv
# Beaverhead_elk\AM34\Trip 1\100RECNX\TimelapseData.csv
#
# ClearCreek_mustelids\Winter2015-16\FS-001-P\FS-001-P.csv
# ClearCreek_mustelids\Summer2015\FS-001\FS-001.csv
# ClearCreek_mustelids\Summer2016\IDFG-016\IDFG-016.csv
#
# I:\idfg-images\ClearCreek_mustelids\Summer2016\IDFG-017b
# I:\idfg-images\ClearCreek_mustelids\Summer2016\IDFG-017a
if 'St.Joe_elk' in csv_file or 'Beaverhead_elk' in csv_file:
location_name = '_'.join(csv_file.split('\\')[0:2]).replace(' ','')
else:
assert 'ClearCreek_mustelids' in csv_file
tokens = csv_file.split('\\')
assert 'FS-' in tokens[2] or 'IDFG-' in tokens[2]
location_name = '_'.join([tokens[0],tokens[2]]).replace('-P','')
if location_name.endswith('017a') or location_name.endswith('017b'):
location_name = location_name[:-1]
# Load .csv file
df = pd.read_csv(csv_file_absolute)
df['datetime'] = None
df['seq_id'] = None
df['synthetic_frame_number'] = None
# Validate the opstate column
opstates = set(df['opstate'])
for s in opstates:
if isinstance(s,str):
s = s.strip()
if len(s) > 0:
assert s in valid_opstates,'Invalid opstate: {}'.format(s)
column_names = list(df.columns)
for s in required_columns:
assert s in column_names
count_columns = [s for s in column_names if s in expected_count_columns]
presence_columns = [s for s in column_names if s.endswith('present')]
for s in presence_columns:
if s not in expected_presence_columns:
assert 'Unexpected presence column {} in {}'.format(s,csv_file)
for s in expected_presence_columns:
if s not in presence_columns:
assert 'Missing presence column {} in {}'.format(s,csv_file)
if False:
for s in expected_count_columns:
if s not in count_columns:
print('Missing count column {} in {}'.format(s,csv_file))
## Create datetimes
# print('Creating datetimes')
# i_row = 0; row = df.iloc[i_row]
for i_row,row in df.iterrows():
date = row['Date']
time = row['Time']
datestring = date + ' ' + time
dt = dateutil.parser.parse(datestring)
assert dt.year >= 2015 and dt.year <= 2019
df.loc[i_row,'datetime'] = dt
# Make sure data are sorted chronologically
#
# In odd circumstances, they are not... so sort them first, but warn
datetimes = list(df['datetime'])
if not list_is_sorted(datetimes):
print('Datetimes not sorted for {}'.format(csv_file))
df = df.sort_values('datetime')
df.reset_index(drop=True, inplace=True)
datetimes = list(df['datetime'])
assert list_is_sorted(datetimes)
# Debugging when I was trying to see what was up with the unsorted dates
if False:
for i in range(0,len(datetimes)-1):
dt = datetimes[i+1]
prev_dt = datetimes[i]
delta = dt - prev_dt
assert delta >= datetime.timedelta(0)
## Parse into sequences
# print('Creating sequences')
current_sequence_id = None
next_frame_number = 0
previous_datetime = None
sequence_id_to_rows = defaultdict(list)
# i_row = 0; row = df.iloc[i_row]
for i_row,row in df.iterrows():
dt = row['datetime']
assert dt is not None and isinstance(dt,datetime.datetime)
# Start a new sequence if:
#
# * This image has no timestamp
# * This image has a frame number of zero
# * We have no previous image timestamp
#
if previous_datetime is None:
delta = None
else:
delta = (dt - previous_datetime).total_seconds()
# Start a new sequence if necessary
if delta is None or delta > max_gap_within_sequence:
next_frame_number = 0
current_sequence_id = location_name + '_seq_' + str(dt) # str(uuid.uuid1())
assert current_sequence_id is not None
sequence_id_to_rows[current_sequence_id].append(i_row)
df.loc[i_row,'seq_id'] = current_sequence_id
df.loc[i_row,'synthetic_frame_number'] = next_frame_number
next_frame_number = next_frame_number + 1
previous_datetime = dt
# ...for each row
location_sequences = list(set(list(df['seq_id'])))
location_sequences.sort()
inconsistent_sequences = []
## Parse labels for each sequence
# sequence_id = location_sequences[0]
for sequence_id in location_sequences:
sequence_row_indices = sequence_id_to_rows[sequence_id]
assert len(sequence_row_indices) > 0
# Row indices in a sequence should be adjacent
if len(sequence_row_indices) > 1:
d = np.diff(sequence_row_indices)
assert(all(d==1))
# sequence_df = df[df['seq_id']==sequence_id]
sequence_df = df.iloc[sequence_row_indices]
## Determine what's present
presence_columns_marked = []
survey_species = []
other_species = []
# Be conservative; assume humans are present in all maintenance images
opstates = set(sequence_df['opstate'])
assert all([ ( (isinstance(s,float)) or (len(s.strip())== 0) or (s.strip() in valid_opstates)) for s in opstates]),\
'Invalid optstate in: {}'.format(' | '.join(opstates))
for presence_column in presence_columns:
presence_values = list(sequence_df[presence_column])
# The presence columns are *almost* always identical for all images in a sequence
single_presence_value = (len(set(presence_values)) == 1)
# assert single_presence_value
if not single_presence_value:
# print('Warning: presence value for {} is inconsistent for {}'.format(presence_column,sequence_id))
inconsistent_sequences.append(sequence_id)
if any(presence_values):
presence_columns_marked.append(presence_column)
# ...for each presence column
# Tally up the standard (survey) species
survey_species = [s.replace('present','') for s in presence_columns_marked if s != 'otherpresent']
for opstate in opstates:
if not isinstance(opstate,str):
continue
opstate = opstate.strip()
if len(opstate) == 0:
continue
if opstate in opstate_mappings:
opstate = opstate_mappings[opstate]
if (opstate != 'normal') and (opstate not in survey_species):
survey_species.append(opstate)
# If no presence columns are marked, all counts should be zero
if len(presence_columns_marked) == 0:
# count_column = count_columns[0]
for count_column in count_columns:
values = list(set(list(sequence_df[count_column])))
# Occasionally a count gets entered (correctly) without the presence column being marked
# assert len(values) == 1 and values[0] == 0, 'Non-zero counts with no presence columns marked for sequence {}'.format(sequence_id)
if (not(len(values) == 1 and values[0] == 0)):
print('Warning: presence and counts are inconsistent for {}'.format(sequence_id))
# Handle this by virtually checking the "right" box
for presence_column in presence_to_count_columns.keys():
count_columns_this_species = presence_to_count_columns[presence_column]
if count_column in count_columns_this_species:
if presence_column not in presence_columns_marked:
presence_columns_marked.append(presence_column)
# Make sure we found a match
assert len(presence_columns_marked) > 0
# Handle 'other' tags
if 'otherpresent' in presence_columns_marked:
sequence_otherwhats = set()
sequence_comments = set()
for i,r in sequence_df.iterrows():
otherwhat = r['otherwhat']
if isinstance(otherwhat,str):
otherwhat = otherwhat.strip()
if len(otherwhat) > 0:
sequence_otherwhats.add(otherwhat)
comment = r['comment']
if isinstance(comment,str):
comment = comment.strip()
if len(comment) > 0:
sequence_comments.add(comment)
freetext_species = []
for s in sequence_otherwhats:
freetext_species.append(s)
for s in sequence_comments:
freetext_species.append(s)
counted_species = []
otherpresent_columns = presence_to_count_columns['otherpresent']
# column_name = otherpresent_columns[0]
for column_name in otherpresent_columns:
if column_name in sequence_df and column_name != 'other':
column_counts = list(sequence_df[column_name])
column_count_positive = any([c > 0 for c in column_counts])
if column_count_positive:
# print('Found non-survey counted species column: {}'.format(column_name))
counted_species.append(column_name)
# ...for each non-empty presence column
# Very rarely, the "otherpresent" column is checked, but no more detail is available
if not ( (len(freetext_species) > 0) or (len(counted_species) > 0) ):
other_species.append('unknown')
other_species += freetext_species
other_species += counted_species
# ...handling non-survey species
all_species = other_species + survey_species
# Build the sequence data
images = []
# i_row = 0; row = sequence_df.iloc[i_row]
for i_row,row in sequence_df.iterrows():
im = {}
# Only one folder used a single .csv file for two subfolders
if ('RelativePath' in row) and (isinstance(row['RelativePath'],str)) and (len(row['RelativePath'].strip()) > 0):
assert 'IDFG-028' in location_name
im['file_name'] = os.path.join(row['RelativePath'],row['File'])
else:
im['file_name'] = row['File']
im['datetime'] = row['datetime']
images.append(im)
sequence = {}
sequence['csv_source'] = csv_file
sequence['sequence_id'] = sequence_id
sequence['images'] = images
sequence['species_present'] = all_species
sequence['location'] = location_name
sequences.append(sequence)
# ...for each sequence
return sequences
# ...def csv_to_sequences()
#%% Parse each .csv file into sequences (loop)
if __name__ == "__main__":
#%%
import multiprocessing
multiprocessing.freeze_support()
image_files,csv_files = get_files()
#%%
if n_threads == 1:
# i_file = -1; csv_file = csv_files[i_file]
sequences_by_file = []
for i_file,csv_file in enumerate(csv_files):
print('Processing file {} of {}'.format(i_file,len(csv_files)))
sequences = csv_to_sequences(csv_file)
sequences_by_file.append(sequences)
else:
pool = Pool(n_threads)
sequences_by_file = list(pool.imap(csv_to_sequences,csv_files))
#%% Save sequence data
with open(sequence_info_cache,'w') as f:
json.dump(sequences_by_file,f,indent=2,default=json_util.default)
#%% Load sequence data
if False:
#%%
with open(sequence_info_cache,'r') as f:
sequences_by_file = json.load(f,object_hook=json_util.object_hook)
#%% Validate file mapping (based on the existing enumeration)
missing_images = []
image_files_set = set(image_files)
n_images_in_sequences = 0
sequence_ids = set()
# sequences = sequences_by_file[0]
for i_sequences,sequences in enumerate(tqdm(sequences_by_file)):
assert len(sequences) > 0
csv_source = sequences[0]['csv_source']
csv_file_absolute = os.path.join(input_base,csv_source)
csv_folder = os.path.dirname(csv_file_absolute)
assert os.path.isfile(csv_file_absolute)
# sequence = sequences[0]
for i_sequence,sequence in enumerate(sequences):
assert sequence['csv_source'] == csv_source
sequence_id = sequence['sequence_id']
if sequence_id in sequence_ids:
print('Warning: duplicate sequence for {}, creating new sequence'.format(sequence_id))
sequence['sequence_id'] = sequence['sequence_id'] + '_' + str(i_sequences) + '_' + str(i_sequence)
sequence_id = sequence['sequence_id']
assert sequence_id not in sequence_ids
sequence_ids.add(sequence_id)
species_present = sequence['species_present']
images = sequence['images']
for im in images:
n_images_in_sequences += 1
image_file_relative = im['file_name']
# Actually, one folder has relative paths
# assert '\\' not in image_file_relative and '/' not in image_file_relative
image_file_absolute = os.path.join(csv_folder,image_file_relative)
image_file_container_relative = os.path.relpath(image_file_absolute,input_base)
# os.startfile(csv_folder)
# assert os.path.isfile(image_file_absolute)
# found_file = os.path.isfile(image_file_absolute)
found_file = image_file_container_relative in image_files_set
if not found_file:
print('Warning: can\'t find image {}'.format(image_file_absolute))
missing_images.append(image_file_absolute)
# ...for each image
# ...for each sequence
# ...for each .csv file
print('{} of {} images missing ({} on disk)'.format(len(missing_images),n_images_in_sequences,
len(image_files)))
#%% Load manual category mappings
with open(category_mapping_file,'r') as f:
category_mapping_lines = f.readlines()
category_mapping_lines = [s.strip() for s in category_mapping_lines]
category_mappings = {}
for s in category_mapping_lines:
tokens = s.split(',',1)
category_name = tokens[0].strip()
category_value = tokens[1].strip().replace('"','').replace(',','+')
assert ',' not in category_name
assert ',' not in category_value
# The second column is blank when the first column already represents the category name
if len(category_value) == 0:
category_value = category_name
category_mappings[category_name] = category_value
#%% Convert to CCT .json (original strings)
human_flagged_images = []
with open(human_review_list,'r') as f:
human_flagged_images = f.readlines()
human_flagged_images = [s.strip().replace('/','\\') for s in human_flagged_images]
human_flagged_images = set(human_flagged_images)
print('Read {} human flagged images'.format(len(human_flagged_images)))
annotations = []
image_id_to_image = {}
category_name_to_category = {}
# Force the empty category to be ID 0
empty_category_id = 0
empty_category = {}
empty_category['id'] = empty_category_id
empty_category['name'] = 'empty'
category_name_to_category['empty'] = empty_category
human_category_id = 1
human_category = {}
human_category['id'] = human_category_id
human_category['name'] = 'human'
category_name_to_category['human'] = human_category
next_category_id = 2
annotation_ids = set()
if False:
target_folder = r'ClearCreek_mustelids\Summer2015\FS-035'
for sequences in sequences_by_file:
if target_folder in sequences[0]['csv_source']:
break
# For each .csv file...
#
# sequences = sequences_by_file[0]
for sequences in tqdm(sequences_by_file):
# For each sequence...
#
# sequence = sequences[0]
for sequence in sequences:
species_present = sequence['species_present']
species_present = [s.lower().strip().replace(',',';') for s in species_present]
sequence_images = sequence['images']
location = sequence['location'].lower().strip()
sequence_id = sequence['sequence_id']
csv_source = sequence['csv_source']
csv_folder_relative = os.path.dirname(csv_source)
sequence_category_ids = set()
# Find categories for this image
if len(species_present) == 0:
sequence_category_ids.add(0)
assert category_name_to_category['empty']['id'] == list(sequence_category_ids)[0]
else:
# When 'unknown' is used in combination with another label, use that
# label; the "unknown" here doesn't mean "another unknown species", it means
# there is some other unknown property about the main species.
if 'unknown' in species_present and len(species_present) > 1:
assert all([((s in category_mappings) or (s in valid_opstates) or (s in opstate_mappings.values()))\
for s in species_present if s != 'unknown'])
species_present = [s for s in species_present if s != 'unknown']
# category_name_string = species_present[0]
for category_name_string in species_present:
# This piece of text had a lot of complicated syntax in it, and it would have
# been too complicated to handle in a general way
if 'coyotoes' in category_name_string:
# print('Ignoring category {}'.format(category_name_string))
continue
if category_name_string not in category_mappings:
assert category_name_string in valid_opstates or category_name_string in opstate_mappings.values()
else:
category_name_string = category_mappings[category_name_string]
assert ',' not in category_name_string
category_names = category_name_string.split('+')
assert len(category_names) <= 2
# Don't process redundant labels
category_names = set(category_names)
# category_name = category_names[0]
for category_name in category_names:
if category_name == 'ignore':
continue
category_name = category_name.replace('"','')
# If we've seen this category before...
if category_name in category_name_to_category:
category = category_name_to_category[category_name]
category_id = category['id']
# If this is a new category...
else:
# print('Adding new category for {}'.format(category_name))
category_id = next_category_id
category = {}
category['id'] = category_id
category['name'] = category_name
category_name_to_category[category_name] = category
next_category_id += 1
sequence_category_ids.add(category_id)
# ...for each category (inner)
# ...for each category (outer)
# ...if we do/don't have species in this sequence
# We should have at least one category assigned (which may be "empty" or "unknown")
assert len(sequence_category_ids) > 0
# assert len(sequence_category_ids) > 0
# Was any image in this sequence manually flagged as human?
for i_image,im in enumerate(sequence_images):
file_name_relative = os.path.join(csv_folder_relative,im['file_name'])
if file_name_relative in human_flagged_images:
# print('Flagging sequence {} as human based on manual review'.format(sequence_id))
assert human_category_id not in sequence_category_ids
sequence_category_ids.add(human_category_id)
break
# For each image in this sequence...
#
# i_image = 0; im = images[i_image]
for i_image,im in enumerate(sequence_images):
image_id = sequence_id + '_' + im['file_name']
assert image_id not in image_id_to_image
output_im = {}
output_im['id'] = image_id
output_im['file_name'] = os.path.join(csv_folder_relative,im['file_name'])
output_im['seq_id'] = sequence_id
output_im['seq_num_frames'] = len(sequence)
output_im['frame_num'] = i_image
output_im['datetime'] = str(im['datetime'])
output_im['location'] = location
image_id_to_image[image_id] = output_im
# Create annotations for this image
for i_ann,category_id in enumerate(sequence_category_ids):
ann = {}
ann['id'] = 'ann_' + image_id + '_' + str(i_ann)
assert ann['id'] not in annotation_ids
annotation_ids.add(ann['id'])
ann['image_id'] = image_id
ann['category_id'] = category_id
ann['sequence_level_annotation'] = True
annotations.append(ann)
# ...for each image in this sequence
# ...for each sequence
# ...for each .csv file
images = list(image_id_to_image.values())
categories = list(category_name_to_category.values())
print('Loaded {} annotations in {} categories for {} images'.format(
len(annotations),len(categories),len(images)))
# Verify that all images have annotations
image_id_to_annotations = defaultdict(list)
# ann = ict_data['annotations'][0]
# For debugging only
categories_to_counts = defaultdict(int)
for ann in tqdm(annotations):
image_id_to_annotations[ann['image_id']].append(ann)
categories_to_counts[ann['category_id']] = categories_to_counts[ann['category_id']] + 1
for im in tqdm(images):
image_annotations = image_id_to_annotations[im['id']]
assert len(image_annotations) > 0
#%% Create output (original strings)
info = {}
info['contributor'] = 'Idaho Department of Fish and Game'
info['description'] = 'Idaho Camera traps'
info['version'] = '2021.07.19'
output_data = {}
output_data['images'] = images
output_data['annotations'] = annotations
output_data['categories'] = categories
output_data['info'] = info
with open(output_json_original_strings,'w') as f:
json.dump(output_data,f,indent=1)
#%% Validate .json file
from data_management.databases import sanity_check_json_db
options = sanity_check_json_db.SanityCheckOptions()
options.baseDir = input_base
options.bCheckImageSizes = False
options.bCheckImageExistence = False
options.bFindUnusedImages = False
_, _, _ = sanity_check_json_db.sanity_check_json_db(output_json_original_strings, options)
#%% Preview labels
from visualization import visualize_db
viz_options = visualize_db.DbVizOptions()
viz_options.num_to_visualize = 1000
viz_options.trim_to_images_with_bboxes = False
viz_options.add_search_links = False
viz_options.sort_by_filename = False
viz_options.parallelize_rendering = True
viz_options.include_filename_links = True
viz_options.classes_to_exclude = ['empty','deer','elk']
html_output_file, _ = visualize_db.process_images(db_path=output_json_original_strings,
output_dir=os.path.join(
output_base,'preview'),
image_base_dir=input_base,
options=viz_options)
os.startfile(html_output_file)
#%% Look for humans that were found by MegaDetector that haven't already been identified as human
# This whole step only needed to get run once
if False:
pass
#%%
human_confidence_threshold = 0.5
# Load MD results
with open(megadetector_results_file,'r') as f:
md_results = json.load(f)
# Get a list of filenames that MD tagged as human
human_md_categories =\
[category_id for category_id in md_results['detection_categories'] if \
((md_results['detection_categories'][category_id] == 'person') or \
(md_results['detection_categories'][category_id] == 'vehicle'))]
assert len(human_md_categories) == 2
# im = md_results['images'][0]
md_human_images = set()
for im in md_results['images']:
if 'detections' not in im:
continue
if im['max_detection_conf'] < human_confidence_threshold:
continue
for detection in im['detections']:
if detection['category'] not in human_md_categories:
continue
elif detection['conf'] < human_confidence_threshold:
continue
else:
md_human_images.add(im['file'])
break
# ...for each detection
# ...for each image
print('MD found {} potential human images (of {})'.format(len(md_human_images),len(md_results['images'])))
# Map images to annotations in ICT
with open(output_json_original_strings,'r') as f:
ict_data = json.load(f)
category_id_to_name = {c['id']:c['name'] for c in categories}
image_id_to_annotations = defaultdict(list)
# ann = ict_data['annotations'][0]
for ann in tqdm(ict_data['annotations']):
image_id_to_annotations[ann['image_id']].append(ann)
human_ict_categories = ['human']
manual_human_images = set()
# For every image
# im = ict_data['images'][0]
for im in tqdm(ict_data['images']):
# Does this image already have a human annotation?
manual_human = False
annotations = image_id_to_annotations[im['id']]
assert len(annotations) > 0
for ann in annotations:
category_name = category_id_to_name[ann['category_id']]
if category_name in human_ict_categories:
manual_human_images.add(im['file_name'].replace('\\','/'))
# ...for each annotation
# ...for each image
print('{} images identified as human in source metadata'.format(len(manual_human_images)))
missing_human_images = []
for fn in md_human_images:
if fn not in manual_human_images:
missing_human_images.append(fn)
print('{} potentially untagged human images'.format(len(missing_human_images)))
#%% Copy images for review to a new folder
os.makedirs(human_review_folder,exist_ok=True)
missing_human_images.sort()
# fn = missing_human_images[0]
for i_image,fn in enumerate(tqdm(missing_human_images)):
input_fn_absolute = os.path.join(input_base,fn).replace('\\','/')
assert os.path.isfile(input_fn_absolute)
output_path = os.path.join(human_review_folder,str(i_image).zfill(4) + '_' + fn.replace('/','~'))
shutil.copyfile(input_fn_absolute,output_path)
#%% Manual step...
# Copy any images from that list that have humans in them to...
human_review_selection_folder = r'H:\idaho-camera-traps\human_review_selections'
assert os.path.isdir(human_review_selection_folder)
#%% Create a list of the images we just manually flagged
human_tagged_filenames = os.listdir(human_review_selection_folder)
human_tagged_relative_paths = []
# fn = human_tagged_filenames[0]
for fn in human_tagged_filenames:
# E.g. '0000_Beaverhead_elk~AM174~Trip 1~100RECNX~IMG_1397.JPG'
relative_path = fn[5:].replace('~','/')
human_tagged_relative_paths.append(relative_path)
with open(human_review_list,'w') as f:
for s in human_tagged_relative_paths:
f.write(s + '\n')
#%% Translate location, image, sequence IDs
# Load mappings if available
if (not force_generate_mappings) and (os.path.isfile(id_mapping_file)):
print('Loading ID mappings from {}'.format(id_mapping_file))
with open(id_mapping_file,'r') as f:
mappings = json.load(f)
image_id_mappings = mappings['image_id_mappings']
annotation_id_mappings = mappings['annotation_id_mappings']
location_id_mappings = mappings['location_id_mappings']
sequence_id_mappings = mappings['sequence_id_mappings']
else:
# Generate mappings
mappings = {}
next_location_id = 0
location_id_string_to_n_sequences = defaultdict(int)
location_id_string_to_n_images = defaultdict(int)
image_id_mappings = {}
annotation_id_mappings = {}
location_id_mappings = {}
sequence_id_mappings = {}
for im in tqdm(images):
# If we've seen this location before...
if im['location'] in location_id_mappings:
location_id = location_id_mappings[im['location']]
else:
# Otherwise assign a string-formatted int as the ID
location_id = str(next_location_id)
location_id_mappings[im['location']] = location_id
next_location_id += 1
# If we've seen this sequence before...
if im['seq_id'] in sequence_id_mappings:
sequence_id = sequence_id_mappings[im['seq_id']]
else:
# Otherwise assign a string-formatted int as the ID
n_sequences_this_location = location_id_string_to_n_sequences[location_id]
sequence_id = 'loc_{}_seq_{}'.format(location_id.zfill(4),str(n_sequences_this_location).zfill(6))
sequence_id_mappings[im['seq_id']] = sequence_id
n_sequences_this_location += 1
location_id_string_to_n_sequences[location_id] = n_sequences_this_location
assert im['id'] not in image_id_mappings
# Assign an image ID
n_images_this_location = location_id_string_to_n_images[location_id]
image_id_mappings[im['id']] = 'loc_{}_im_{}'.format(location_id.zfill(4),str(n_images_this_location).zfill(6))
n_images_this_location += 1
location_id_string_to_n_images[location_id] = n_images_this_location
# ...for each image
# Assign annotation mappings
for i_ann,ann in enumerate(tqdm(annotations)):
assert ann['image_id'] in image_id_mappings
assert ann['id'] not in annotation_id_mappings
annotation_id_mappings[ann['id']] = 'ann_{}'.format(str(i_ann).zfill(8))
mappings['image_id_mappings'] = image_id_mappings
mappings['annotation_id_mappings'] = annotation_id_mappings
mappings['location_id_mappings'] = location_id_mappings
mappings['sequence_id_mappings'] = sequence_id_mappings
# Save mappings
with open(id_mapping_file,'w') as f:
json.dump(mappings,f,indent=2)
print('Saved ID mappings to {}'.format(id_mapping_file))
# Back this file up, lest we should accidentally re-run this script with force_generate_mappings = True
# and overwrite the mappings we used.
datestr = str(datetime.datetime.now()).replace(':','-')
backup_file = id_mapping_file.replace('.json','_' + datestr + '.json')
shutil.copyfile(id_mapping_file,backup_file)
# ...if we are/aren't re-generating mappings
#%% Apply mappings
for im in images:
im['id'] = image_id_mappings[im['id']]
im['seq_id'] = sequence_id_mappings[im['seq_id']]
im['location'] = location_id_mappings[im['location']]
for ann in annotations:
ann['id'] = annotation_id_mappings[ann['id']]
ann['image_id'] = image_id_mappings[ann['image_id']]
print('Applied mappings')
#%% Write new dictionaries (modified strings, original files)
output_data = {}
output_data['images'] = images
output_data['annotations'] = annotations
output_data['categories'] = categories
output_data['info'] = info
with open(output_json_remapped_ids,'w') as f:
json.dump(output_data,f,indent=2)
#%% Validate .json file (modified strings, original files)
from data_management.databases import sanity_check_json_db
options = sanity_check_json_db.SanityCheckOptions()
options.baseDir = input_base
options.bCheckImageSizes = False
options.bCheckImageExistence = False
options.bFindUnusedImages = False
_, _, _ = sanity_check_json_db.sanity_check_json_db(output_json_remapped_ids, options)
#%% Preview labels (original files)
from visualization import visualize_db
viz_options = visualize_db.DbVizOptions()
viz_options.num_to_visualize = 1000
viz_options.trim_to_images_with_bboxes = False
viz_options.add_search_links = False
viz_options.sort_by_filename = False
viz_options.parallelize_rendering = True
viz_options.include_filename_links = True
# viz_options.classes_to_exclude = ['empty','deer','elk']
# viz_options.classes_to_include = ['bobcat']
viz_options.classes_to_include = [viz_options.multiple_categories_tag]
html_output_file, _ = visualize_db.process_images(db_path=output_json_remapped_ids,
output_dir=os.path.join(
output_base,'preview'),
image_base_dir=input_base,
options=viz_options)
os.startfile(html_output_file)
#%% Copy images to final output folder (prep)
force_copy = False
with open(output_json_remapped_ids,'r') as f:
d = json.load(f)
images = d['images']
private_categories = ['human','domestic dog','vehicle']
private_image_ids = set()
category_id_to_name = {c['id']:c['name'] for c in d['categories']}
# ann = d['annotations'][0]
for ann in d['annotations']:
category_name = category_id_to_name[ann['category_id']]
if category_name in private_categories:
private_image_ids.add(ann['image_id'])
print('Moving {} of {} images to the private folder'.format(len(private_image_ids),len(images)))
def process_image(im):
input_relative_path = im['file_name']
input_absolute_path = os.path.join(input_base,input_relative_path)
if not os.path.isfile(input_absolute_path):
print('Warning: file {} is not available'.format(input_absolute_path))
return
location = im['location']
image_id = im['id']
location_folder = 'loc_' + location.zfill(4)
assert location_folder in image_id
output_relative_path = location_folder + '/' + image_id + '.jpg'
# Is this a public or private image?
private_image = (image_id in private_image_ids)
# Generate absolute path
if private_image:
output_absolute_path = os.path.join(output_image_base_private,output_relative_path)
else:
output_absolute_path = os.path.join(output_image_base_public,output_relative_path)
# Copy to output
output_dir = os.path.dirname(output_absolute_path)
os.makedirs(output_dir,exist_ok=True)
if force_copy or (not os.path.isfile(output_absolute_path)):
shutil.copyfile(input_absolute_path,output_absolute_path)
# Update the filename reference
im['file_name'] = output_relative_path
# ...def process_image(im)
#%% Copy images to final output folder (execution)
# For each image
if n_threads_file_copy == 1:
# im = images[0]
for im in tqdm(images):
process_image(im)
else:
pool = ThreadPool(n_threads_file_copy)
pool.map(process_image,images)
print('Finished copying, writing .json output')
# Write output .json
with open(output_json,'w') as f:
json.dump(d,f,indent=1)
#%% Make sure the right number of images got there
from pathlib import Path
all_output_files = []
all_output_files_list = os.path.join(output_base,'all_output_files.json')
for path in Path(output_image_base).rglob('*.*'):
path = str(path)
path = os.path.relpath(path,output_image_base)
all_output_files.append(path)
with open(all_output_files_list,'w') as f:
json.dump(all_output_files,f,indent=1)
print('Enumerated {} output files (of {} images)'.format(len(all_output_files),len(images)))
#%% Validate .json file (final filenames)
from data_management.databases import sanity_check_json_db
options = sanity_check_json_db.SanityCheckOptions()
options.baseDir = input_base
options.bCheckImageSizes = False
options.bCheckImageExistence = False
options.bFindUnusedImages = False
_, _, _ = sanity_check_json_db.sanity_check_json_db(output_json, options)
#%% Preview labels (final filenames)
from visualization import visualize_db
viz_options = visualize_db.DbVizOptions()
viz_options.num_to_visualize = 1500
viz_options.trim_to_images_with_bboxes = False
viz_options.add_search_links = False
viz_options.sort_by_filename = False
viz_options.parallelize_rendering = True
viz_options.include_filename_links = True
# viz_options.classes_to_exclude = ['empty','deer','elk']
viz_options.classes_to_include = ['bear','mountain lion']
# viz_options.classes_to_include = ['horse']
# viz_options.classes_to_include = [viz_options.multiple_categories_tag]
# viz_options.classes_to_include = ['human','vehicle','domestic dog']
html_output_file, _ = visualize_db.process_images(db_path=output_json,
output_dir=os.path.join(
output_base,'final-preview-01'),
image_base_dir=output_image_base_public,
options=viz_options)
os.startfile(html_output_file)
#%% Create zipfiles
#%% List public files
from pathlib import Path
all_public_output_files = []
all_public_output_files_list = os.path.join(output_base,'all_public_output_files.json')
if not os.path.isfile(all_public_output_files_list):
for path in Path(output_image_base_public).rglob('*.*'):
path = str(path)
path = os.path.relpath(path,output_image_base)
all_public_output_files.append(path)
with open(all_public_output_files_list,'w') as f:
json.dump(all_public_output_files,f,indent=1)
else:
with open(all_public_output_files_list,'r') as f:
all_public_output_files = json.load(f)
print('Enumerated {} public output files'.format(len(all_public_output_files)))
#%% Find the size of each file
filename_to_size = {}
all_public_output_sizes_list = os.path.join(output_base,'all_public_output_sizes.json')
if not os.path.isfile(all_public_output_sizes_list):
# fn = all_public_output_files[0]
for fn in tqdm(all_public_output_files):
p = os.path.join(output_image_base,fn)
assert os.path.isfile(p)
filename_to_size[fn] = os.path.getsize(p)
with open(all_public_output_sizes_list,'w') as f:
json.dump(filename_to_size,f,indent=1)
else:
with open(all_public_output_sizes_list,'r') as f:
filename_to_size = json.load(f)
assert len(filename_to_size) == len(all_public_output_files)
#%% Split into chunks of approximately-equal size
import humanfriendly
total_size = sum(filename_to_size.values())
print('{} in {} files'.format(humanfriendly.format_size(total_size),len(all_public_output_files)))
bytes_per_part = 320e9
file_lists = []
current_file_list = []
n_bytes_current_file_list = 0
for fn in all_public_output_files:
size = filename_to_size[fn]
current_file_list.append(fn)
n_bytes_current_file_list += size
if n_bytes_current_file_list > bytes_per_part:
file_lists.append(current_file_list)
current_file_list = []
n_bytes_current_file_list = 0
# ...for each file
file_lists.append(current_file_list)
assert sum([len(l) for l in file_lists]) == len(all_public_output_files)
print('List sizes:')
for l in file_lists:
print(len(l))
#%% Create a zipfile for each chunk
from zipfile import ZipFile
import zipfile
import os
def create_zipfile(i_file_list):
file_list = file_lists[i_file_list]
zipfile_name = os.path.join('k:\\idaho-camera-traps-images.part_{}.zip'.format(i_file_list))
print('Processing archive {} to file {}'.format(i_file_list,zipfile_name))
with ZipFile(zipfile_name, 'w') as zipObj:
for filename_relative in file_list:
assert filename_relative.startswith('public')
filename_absolute = os.path.join(output_image_base,filename_relative)
zipObj.write(filename_absolute.replace('\\','/'), filename_relative, compress_type=zipfile.ZIP_STORED)
# ...for each filename
# with ZipFile()
# ...def create_zipfile()
# i_file_list = 0; file_list = file_lists[i_file_list]
n_zip_threads = 1 # len(file_lists)
if n_zip_threads == 1:
for i_file_list in range(0,len(file_lists)):
create_zipfile(i_file_list)
else:
pool = ThreadPool(n_zip_threads)
indices = list(range(0,len(file_lists)))
pool.map(create_zipfile,indices)
| [
"pandas.read_csv",
"collections.defaultdict",
"os.path.isfile",
"pathlib.Path",
"visualization.visualize_db.DbVizOptions",
"os.path.join",
"os.path.dirname",
"humanfriendly.format_size",
"datetime.timedelta",
"multiprocessing.pool.Pool",
"shutil.copyfile",
"datetime.datetime.now",
"os.startf... | [((664, 689), 'os.path.isdir', 'os.path.isdir', (['input_base'], {}), '(input_base)\n', (677, 689), False, 'import os\n'), ((697, 723), 'os.path.isdir', 'os.path.isdir', (['output_base'], {}), '(output_base)\n', (710, 723), False, 'import os\n'), ((731, 763), 'os.path.isdir', 'os.path.isdir', (['output_image_base'], {}), '(output_image_base)\n', (744, 763), False, 'import os\n'), ((792, 833), 'os.path.join', 'os.path.join', (['output_image_base', '"""public"""'], {}), "(output_image_base, 'public')\n", (804, 833), False, 'import os\n'), ((861, 903), 'os.path.join', 'os.path.join', (['output_image_base', '"""private"""'], {}), "(output_image_base, 'private')\n", (873, 903), False, 'import os\n'), ((1168, 1212), 'os.path.join', 'os.path.join', (['output_base', '"""id_mapping.json"""'], {}), "(output_base, 'id_mapping.json')\n", (1180, 1212), False, 'import os\n'), ((1494, 1543), 'os.path.join', 'os.path.join', (['output_base', '"""category_mapping.csv"""'], {}), "(output_base, 'category_mapping.csv')\n", (1506, 1543), False, 'import os\n'), ((1621, 1690), 'os.path.join', 'os.path.join', (['output_base', '"""idaho-camera-traps-original-strings.json"""'], {}), "(output_base, 'idaho-camera-traps-original-strings.json')\n", (1633, 1690), False, 'import os\n'), ((1791, 1856), 'os.path.join', 'os.path.join', (['output_base', '"""idaho-camera-traps-remapped-ids.json"""'], {}), "(output_base, 'idaho-camera-traps-remapped-ids.json')\n", (1803, 1856), False, 'import os\n'), ((1940, 1992), 'os.path.join', 'os.path.join', (['output_base', '"""idaho-camera-traps.json"""'], {}), "(output_base, 'idaho-camera-traps.json')\n", (1952, 1992), False, 'import os\n'), ((2320, 2361), 'os.path.join', 'os.path.join', (['output_base', '"""human_review"""'], {}), "(output_base, 'human_review')\n", (2332, 2361), False, 'import os\n'), ((2501, 2553), 'os.path.join', 'os.path.join', (['output_base', '"""human_review_selections"""'], {}), "(output_base, 'human_review_selections')\n", (2513, 2553), False, 'import os\n'), ((2692, 2745), 'os.path.join', 'os.path.join', (['output_base', '"""human_flagged_images.txt"""'], {}), "(output_base, 'human_flagged_images.txt')\n", (2704, 2745), False, 'import os\n'), ((2822, 2869), 'os.path.join', 'os.path.join', (['output_base', '"""sequence_info.json"""'], {}), "(output_base, 'sequence_info.json')\n", (2834, 2869), False, 'import os\n'), ((4401, 4444), 'os.path.join', 'os.path.join', (['output_base', '"""all_files.json"""'], {}), "(output_base, 'all_files.json')\n", (4413, 4444), False, 'import os\n'), ((5956, 5973), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5967, 5973), False, 'from collections import defaultdict\n'), ((6901, 6935), 'os.path.join', 'os.path.join', (['input_base', 'csv_file'], {}), '(input_base, csv_file)\n', (6913, 6935), False, 'import os\n'), ((8065, 8095), 'pandas.read_csv', 'pd.read_csv', (['csv_file_absolute'], {}), '(csv_file_absolute)\n', (8076, 8095), True, 'import pandas as pd\n'), ((10508, 10525), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10519, 10525), False, 'from collections import defaultdict\n'), ((18974, 19006), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (19004, 19006), False, 'import multiprocessing\n'), ((24618, 24641), 'tqdm.tqdm', 'tqdm', (['sequences_by_file'], {}), '(sequences_by_file)\n', (24622, 24641), False, 'from tqdm import tqdm\n'), ((31778, 31795), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (31789, 31795), False, 'from collections import defaultdict\n'), ((31897, 31913), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (31908, 31913), False, 'from collections import defaultdict\n'), ((31929, 31946), 'tqdm.tqdm', 'tqdm', (['annotations'], {}), '(annotations)\n', (31933, 31946), False, 'from tqdm import tqdm\n'), ((32128, 32140), 'tqdm.tqdm', 'tqdm', (['images'], {}), '(images)\n', (32132, 32140), False, 'from tqdm import tqdm\n'), ((32886, 32927), 'data_management.databases.sanity_check_json_db.SanityCheckOptions', 'sanity_check_json_db.SanityCheckOptions', ([], {}), '()\n', (32925, 32927), False, 'from data_management.databases import sanity_check_json_db\n'), ((33092, 33177), 'data_management.databases.sanity_check_json_db.sanity_check_json_db', 'sanity_check_json_db.sanity_check_json_db', (['output_json_original_strings', 'options'], {}), '(output_json_original_strings, options\n )\n', (33133, 33177), False, 'from data_management.databases import sanity_check_json_db\n'), ((33285, 33312), 'visualization.visualize_db.DbVizOptions', 'visualize_db.DbVizOptions', ([], {}), '()\n', (33310, 33312), False, 'from visualization import visualize_db\n'), ((34079, 34109), 'os.startfile', 'os.startfile', (['html_output_file'], {}), '(html_output_file)\n', (34091, 34109), False, 'import os\n'), ((43718, 43759), 'data_management.databases.sanity_check_json_db.SanityCheckOptions', 'sanity_check_json_db.SanityCheckOptions', ([], {}), '()\n', (43757, 43759), False, 'from data_management.databases import sanity_check_json_db\n'), ((43924, 44000), 'data_management.databases.sanity_check_json_db.sanity_check_json_db', 'sanity_check_json_db.sanity_check_json_db', (['output_json_remapped_ids', 'options'], {}), '(output_json_remapped_ids, options)\n', (43965, 44000), False, 'from data_management.databases import sanity_check_json_db\n'), ((44130, 44157), 'visualization.visualize_db.DbVizOptions', 'visualize_db.DbVizOptions', ([], {}), '()\n', (44155, 44157), False, 'from visualization import visualize_db\n'), ((45053, 45083), 'os.startfile', 'os.startfile', (['html_output_file'], {}), '(html_output_file)\n', (45065, 45083), False, 'import os\n'), ((47803, 47853), 'os.path.join', 'os.path.join', (['output_base', '"""all_output_files.json"""'], {}), "(output_base, 'all_output_files.json')\n", (47815, 47853), False, 'import os\n'), ((48349, 48390), 'data_management.databases.sanity_check_json_db.SanityCheckOptions', 'sanity_check_json_db.SanityCheckOptions', ([], {}), '()\n', (48388, 48390), False, 'from data_management.databases import sanity_check_json_db\n'), ((48555, 48618), 'data_management.databases.sanity_check_json_db.sanity_check_json_db', 'sanity_check_json_db.sanity_check_json_db', (['output_json', 'options'], {}), '(output_json, options)\n', (48596, 48618), False, 'from data_management.databases import sanity_check_json_db\n'), ((48741, 48768), 'visualization.visualize_db.DbVizOptions', 'visualize_db.DbVizOptions', ([], {}), '()\n', (48766, 48768), False, 'from visualization import visualize_db\n'), ((49812, 49842), 'os.startfile', 'os.startfile', (['html_output_file'], {}), '(html_output_file)\n', (49824, 49842), False, 'import os\n'), ((50002, 50059), 'os.path.join', 'os.path.join', (['output_base', '"""all_public_output_files.json"""'], {}), "(output_base, 'all_public_output_files.json')\n", (50014, 50059), False, 'import os\n'), ((50763, 50820), 'os.path.join', 'os.path.join', (['output_base', '"""all_public_output_sizes.json"""'], {}), "(output_base, 'all_public_output_sizes.json')\n", (50775, 50820), False, 'import os\n'), ((4492, 4522), 'os.path.isfile', 'os.path.isfile', (['all_files_list'], {}), '(all_files_list)\n', (4506, 4522), False, 'import os\n'), ((6050, 6069), 'os.path.dirname', 'os.path.dirname', (['fn'], {}), '(fn)\n', (6065, 6069), False, 'import os\n'), ((9460, 9493), 'dateutil.parser.parse', 'dateutil.parser.parse', (['datestring'], {}), '(datestring)\n', (9481, 9493), False, 'import dateutil\n'), ((19447, 19462), 'multiprocessing.pool.Pool', 'Pool', (['n_threads'], {}), '(n_threads)\n', (19451, 19462), True, 'from multiprocessing.pool import Pool as Pool\n'), ((19630, 19698), 'json.dump', 'json.dump', (['sequences_by_file', 'f'], {'indent': '(2)', 'default': 'json_util.default'}), '(sequences_by_file, f, indent=2, default=json_util.default)\n', (19639, 19698), False, 'import json\n'), ((20204, 20227), 'tqdm.tqdm', 'tqdm', (['sequences_by_file'], {}), '(sequences_by_file)\n', (20208, 20227), False, 'from tqdm import tqdm\n'), ((20349, 20385), 'os.path.join', 'os.path.join', (['input_base', 'csv_source'], {}), '(input_base, csv_source)\n', (20361, 20385), False, 'import os\n'), ((20406, 20440), 'os.path.dirname', 'os.path.dirname', (['csv_file_absolute'], {}), '(csv_file_absolute)\n', (20421, 20440), False, 'import os\n'), ((20456, 20489), 'os.path.isfile', 'os.path.isfile', (['csv_file_absolute'], {}), '(csv_file_absolute)\n', (20470, 20489), False, 'import os\n'), ((32731, 32766), 'json.dump', 'json.dump', (['output_data', 'f'], {'indent': '(1)'}), '(output_data, f, indent=1)\n', (32740, 32766), False, 'import json\n'), ((36041, 36058), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (36052, 36058), False, 'from collections import defaultdict\n'), ((36130, 36159), 'tqdm.tqdm', 'tqdm', (["ict_data['annotations']"], {}), "(ict_data['annotations'])\n", (36134, 36159), False, 'from tqdm import tqdm\n'), ((36406, 36430), 'tqdm.tqdm', 'tqdm', (["ict_data['images']"], {}), "(ict_data['images'])\n", (36410, 36430), False, 'from tqdm import tqdm\n'), ((37491, 37538), 'os.makedirs', 'os.makedirs', (['human_review_folder'], {'exist_ok': '(True)'}), '(human_review_folder, exist_ok=True)\n', (37502, 37538), False, 'import os\n'), ((38217, 38261), 'os.path.isdir', 'os.path.isdir', (['human_review_selection_folder'], {}), '(human_review_selection_folder)\n', (38230, 38261), False, 'import os\n'), ((38387, 38428), 'os.listdir', 'os.listdir', (['human_review_selection_folder'], {}), '(human_review_selection_folder)\n', (38397, 38428), False, 'import os\n'), ((39046, 39077), 'os.path.isfile', 'os.path.isfile', (['id_mapping_file'], {}), '(id_mapping_file)\n', (39060, 39077), False, 'import os\n'), ((39667, 39683), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (39678, 39683), False, 'from collections import defaultdict\n'), ((39725, 39741), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (39736, 39741), False, 'from collections import defaultdict\n'), ((39913, 39925), 'tqdm.tqdm', 'tqdm', (['images'], {}), '(images)\n', (39917, 39925), False, 'from tqdm import tqdm\n'), ((42701, 42746), 'shutil.copyfile', 'shutil.copyfile', (['id_mapping_file', 'backup_file'], {}), '(id_mapping_file, backup_file)\n', (42716, 42746), False, 'import shutil\n'), ((43528, 43563), 'json.dump', 'json.dump', (['output_data', 'f'], {'indent': '(2)'}), '(output_data, f, indent=2)\n', (43537, 43563), False, 'import json\n'), ((45231, 45243), 'json.load', 'json.load', (['f'], {}), '(f)\n', (45240, 45243), False, 'import json\n'), ((45906, 45951), 'os.path.join', 'os.path.join', (['input_base', 'input_relative_path'], {}), '(input_base, input_relative_path)\n', (45918, 45951), False, 'import os\n'), ((46827, 46864), 'os.path.dirname', 'os.path.dirname', (['output_absolute_path'], {}), '(output_absolute_path)\n', (46842, 46864), False, 'import os\n'), ((46873, 46911), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (46884, 46911), False, 'import os\n'), ((47354, 47366), 'tqdm.tqdm', 'tqdm', (['images'], {}), '(images)\n', (47358, 47366), False, 'from tqdm import tqdm\n'), ((47427, 47458), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['n_threads_file_copy'], {}), '(n_threads_file_copy)\n', (47437, 47458), True, 'from multiprocessing.pool import ThreadPool as ThreadPool\n'), ((47626, 47651), 'json.dump', 'json.dump', (['d', 'f'], {'indent': '(1)'}), '(d, f, indent=1)\n', (47635, 47651), False, 'import json\n'), ((47952, 47992), 'os.path.relpath', 'os.path.relpath', (['path', 'output_image_base'], {}), '(path, output_image_base)\n', (47967, 47992), False, 'import os\n'), ((48085, 48125), 'json.dump', 'json.dump', (['all_output_files', 'f'], {'indent': '(1)'}), '(all_output_files, f, indent=1)\n', (48094, 48125), False, 'import json\n'), ((50075, 50119), 'os.path.isfile', 'os.path.isfile', (['all_public_output_files_list'], {}), '(all_public_output_files_list)\n', (50089, 50119), False, 'import os\n'), ((50836, 50880), 'os.path.isfile', 'os.path.isfile', (['all_public_output_sizes_list'], {}), '(all_public_output_sizes_list)\n', (50850, 50880), False, 'import os\n'), ((50942, 50971), 'tqdm.tqdm', 'tqdm', (['all_public_output_files'], {}), '(all_public_output_files)\n', (50946, 50971), False, 'from tqdm import tqdm\n'), ((53574, 53599), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['n_zip_threads'], {}), '(n_zip_threads)\n', (53584, 53599), True, 'from multiprocessing.pool import ThreadPool as ThreadPool\n'), ((4692, 4704), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4701, 4704), False, 'import json\n'), ((4892, 4925), 'os.path.relpath', 'os.path.relpath', (['path', 'input_base'], {}), '(path, input_base)\n', (4907, 4925), False, 'import os\n'), ((5016, 5049), 'json.dump', 'json.dump', (['all_files', 'f'], {'indent': '(1)'}), '(all_files, f, indent=1)\n', (5025, 5049), False, 'import json\n'), ((12159, 12188), 'numpy.diff', 'np.diff', (['sequence_row_indices'], {}), '(sequence_row_indices)\n', (12166, 12188), True, 'import numpy as np\n'), ((19863, 19910), 'json.load', 'json.load', (['f'], {'object_hook': 'json_util.object_hook'}), '(f, object_hook=json_util.object_hook)\n', (19872, 19910), False, 'import json\n'), ((25179, 25206), 'os.path.dirname', 'os.path.dirname', (['csv_source'], {}), '(csv_source)\n', (25194, 25206), False, 'import os\n'), ((33806, 33842), 'os.path.join', 'os.path.join', (['output_base', '"""preview"""'], {}), "(output_base, 'preview')\n", (33818, 33842), False, 'import os\n'), ((34500, 34512), 'json.load', 'json.load', (['f'], {}), '(f)\n', (34509, 34512), False, 'import json\n'), ((35906, 35918), 'json.load', 'json.load', (['f'], {}), '(f)\n', (35915, 35918), False, 'import json\n'), ((37658, 37684), 'tqdm.tqdm', 'tqdm', (['missing_human_images'], {}), '(missing_human_images)\n', (37662, 37684), False, 'from tqdm import tqdm\n'), ((37784, 37817), 'os.path.isfile', 'os.path.isfile', (['input_fn_absolute'], {}), '(input_fn_absolute)\n', (37798, 37817), False, 'import os\n'), ((37940, 37987), 'shutil.copyfile', 'shutil.copyfile', (['input_fn_absolute', 'output_path'], {}), '(input_fn_absolute, output_path)\n', (37955, 37987), False, 'import shutil\n'), ((39235, 39247), 'json.load', 'json.load', (['f'], {}), '(f)\n', (39244, 39247), False, 'import json\n'), ((41686, 41703), 'tqdm.tqdm', 'tqdm', (['annotations'], {}), '(annotations)\n', (41690, 41703), False, 'from tqdm import tqdm\n'), ((42275, 42307), 'json.dump', 'json.dump', (['mappings', 'f'], {'indent': '(2)'}), '(mappings, f, indent=2)\n', (42284, 42307), False, 'import json\n'), ((44780, 44816), 'os.path.join', 'os.path.join', (['output_base', '"""preview"""'], {}), "(output_base, 'preview')\n", (44792, 44816), False, 'import os\n'), ((45975, 46010), 'os.path.isfile', 'os.path.isfile', (['input_absolute_path'], {}), '(input_absolute_path)\n', (45989, 46010), False, 'import os\n'), ((46594, 46655), 'os.path.join', 'os.path.join', (['output_image_base_private', 'output_relative_path'], {}), '(output_image_base_private, output_relative_path)\n', (46606, 46655), False, 'import os\n'), ((46704, 46764), 'os.path.join', 'os.path.join', (['output_image_base_public', 'output_relative_path'], {}), '(output_image_base_public, output_relative_path)\n', (46716, 46764), False, 'import os\n'), ((47001, 47059), 'shutil.copyfile', 'shutil.copyfile', (['input_absolute_path', 'output_absolute_path'], {}), '(input_absolute_path, output_absolute_path)\n', (47016, 47059), False, 'import shutil\n'), ((47874, 47897), 'pathlib.Path', 'Path', (['output_image_base'], {}), '(output_image_base)\n', (47878, 47897), False, 'from pathlib import Path\n'), ((49516, 49561), 'os.path.join', 'os.path.join', (['output_base', '"""final-preview-01"""'], {}), "(output_base, 'final-preview-01')\n", (49528, 49561), False, 'import os\n'), ((50234, 50274), 'os.path.relpath', 'os.path.relpath', (['path', 'output_image_base'], {}), '(path, output_image_base)\n', (50249, 50274), False, 'import os\n'), ((50393, 50440), 'json.dump', 'json.dump', (['all_public_output_files', 'f'], {'indent': '(1)'}), '(all_public_output_files, f, indent=1)\n', (50402, 50440), False, 'import json\n'), ((50545, 50557), 'json.load', 'json.load', (['f'], {}), '(f)\n', (50554, 50557), False, 'import json\n'), ((50989, 51024), 'os.path.join', 'os.path.join', (['output_image_base', 'fn'], {}), '(output_image_base, fn)\n', (51001, 51024), False, 'import os\n'), ((51043, 51060), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (51057, 51060), False, 'import os\n'), ((51096, 51114), 'os.path.getsize', 'os.path.getsize', (['p'], {}), '(p)\n', (51111, 51114), False, 'import os\n'), ((51198, 51238), 'json.dump', 'json.dump', (['filename_to_size', 'f'], {'indent': '(1)'}), '(filename_to_size, f, indent=1)\n', (51207, 51238), False, 'import json\n'), ((51336, 51348), 'json.load', 'json.load', (['f'], {}), '(f)\n', (51345, 51348), False, 'import json\n'), ((51607, 51644), 'humanfriendly.format_size', 'humanfriendly.format_size', (['total_size'], {}), '(total_size)\n', (51632, 51644), False, 'import humanfriendly\n'), ((52813, 52839), 'zipfile.ZipFile', 'ZipFile', (['zipfile_name', '"""w"""'], {}), "(zipfile_name, 'w')\n", (52820, 52839), False, 'from zipfile import ZipFile\n'), ((4813, 4829), 'pathlib.Path', 'Path', (['input_base'], {}), '(input_base)\n', (4817, 4829), False, 'from pathlib import Path\n'), ((10284, 10305), 'datetime.timedelta', 'datetime.timedelta', (['(0)'], {}), '(0)\n', (10302, 10305), False, 'import datetime\n'), ((18278, 18324), 'os.path.join', 'os.path.join', (["row['RelativePath']", "row['File']"], {}), "(row['RelativePath'], row['File'])\n", (18290, 18324), False, 'import os\n'), ((21621, 21666), 'os.path.join', 'os.path.join', (['csv_folder', 'image_file_relative'], {}), '(csv_folder, image_file_relative)\n', (21633, 21666), False, 'import os\n'), ((21714, 21762), 'os.path.relpath', 'os.path.relpath', (['image_file_absolute', 'input_base'], {}), '(image_file_absolute, input_base)\n', (21729, 21762), False, 'import os\n'), ((29517, 29567), 'os.path.join', 'os.path.join', (['csv_folder_relative', "im['file_name']"], {}), "(csv_folder_relative, im['file_name'])\n", (29529, 29567), False, 'import os\n'), ((30362, 30412), 'os.path.join', 'os.path.join', (['csv_folder_relative', "im['file_name']"], {}), "(csv_folder_relative, im['file_name'])\n", (30374, 30412), False, 'import os\n'), ((46950, 46986), 'os.path.isfile', 'os.path.isfile', (['output_absolute_path'], {}), '(output_absolute_path)\n', (46964, 46986), False, 'import os\n'), ((50141, 50171), 'pathlib.Path', 'Path', (['output_image_base_public'], {}), '(output_image_base_public)\n', (50145, 50171), False, 'from pathlib import Path\n'), ((53022, 53072), 'os.path.join', 'os.path.join', (['output_image_base', 'filename_relative'], {}), '(output_image_base, filename_relative)\n', (53034, 53072), False, 'import os\n'), ((37719, 37747), 'os.path.join', 'os.path.join', (['input_base', 'fn'], {}), '(input_base, fn)\n', (37731, 37747), False, 'import os\n'), ((42572, 42595), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (42593, 42595), False, 'import datetime\n')] |
import logging
from urllib import error, request
import os
import glob
import h5py
import numpy as np
logging.basicConfig(level=logging.INFO)
S_URL_CREATIS_PREFIX = "https://www.creatis.insa-lyon.fr/EvaluationPlatform/picmus/dataset"
pt_exp = os.path.abspath(os.path.dirname(__file__))
TO_PYMUS="/".join(pt_exp.split("/")[:-1]) + "/"
PYMUS_DATA_LOCAL = TO_PYMUS + "data/"
PYMUS_DATA_MNT = "/mnt/pymus-data/"
def detectDataSource():
if os.path.exists(PYMUS_DATA_MNT):
return PYMUS_DATA_MNT
return PYMUS_DATA_LOCAL
TO_DATA = detectDataSource()
TO_DATA_TEST = TO_DATA + "test/"
TO_DATA_TMP = TO_DATA + "tmp/"
S_PW_CHOICES = [1 + 2*i for i in range(37) ]
S_PHT_CHOICES = ["numerical","in_vitro_type1","in_vitro_type2"]
S_DATA_TYPES = ["scan","sequence","probe","dataset"]
class DataNotSupported(Exception):
pass
def download_data(remote_filename,url,local_path,force_download=None):
if not os.path.exists(local_path):
try:
os.makedirs(local_path)
except Exception as e:
logging.error(" DIR creation failed %s " % e )
return 1
if force_download is None and remote_filename in [ n.split("/")[-1] for n in glob.glob(local_path + "/*") ]:
logging.info(" File found %s -> %s " % (local_path,remote_filename) )
return 0
else:
f_write = open(local_path + remote_filename,"wb")
try:
f_read = request.urlopen(url + "/" + remote_filename)
logging.info(" Downloading %s/%s ... " % (url,remote_filename))
f_write.write(f_read.read())
except error.HTTPError as e:
logging.error(" HTTP Error - url = %s/%s - ERR = %s " % (url,remote_filename,e) )
return 1
except error.URLError as e:
logging.error(" URL Error - url = %s/%s - ERR = %s " % (url,remote_filename,e) )
return 1
return 0
def download_dataset(filename,path_to):
dwnld_data = download_data(filename,S_URL_CREATIS_PREFIX,path_to)
if dwnld_data > 0:
logging.error(" Error downloading data ")
def creatis_dataset_filename(phantom_selection, nbPW):
if phantom_selection not in S_PHT_CHOICES or nbPW not in S_PW_CHOICES:
raise DataNotSupported(" Data request %s %s not supported " % ( phantom_selection,nbPW) )
return "dataset_rf_%s_transmission_1_nbPW_%s.hdf5" % (phantom_selection,nbPW)
def has_data(prefix,fname):
spl_str = "%s/" % prefix
return fname.split("/")[-1] in [ g.split(spl_str)[-1] for g in glob.glob(TO_DATA + "%s*" % spl_str) ]
def generic_hdf5_write(filename,prefix=None,overwrite=False,fields={}):
f = h5py.File(filename,"w")
g_prf = f
if prefix is not None:
try:
g_prf = f[str(prefix)]
except KeyError:
g_prf = f.create_group(str(prefix))
for key_name,key_val_array in fields.items():
if isinstance(key_val_array,str):
key_val_array = np.array(key_val_array).astype(np.string_)
elif not isinstance(key_val_array,np.ndarray):
key_val_array = np.array([key_val_array])
if key_name in g_prf.keys():
if overwrite:
del g_prf[key_name]
g_prf.create_dataset(key_name,data=key_val_array)
else:
g_prf.create_dataset(key_name,data=key_val_array)
f.close()
def generic_hdf5_read(filename,prefix,data_kv):
try:
f = h5py.File(filename,"r")
except:
logging.error(" File %s not found " % filename )
return 0
g_prf = f
if prefix is not None:
try:
g_prf = f[str(prefix)]
except KeyError:
logging.error(" cannot read data for %s at %s " % (filename,prefix))
return 0
for key_name in data_kv.keys():
key_split = key_name.split("/")
i, g = 1, g_prf
for key in key_split:
if key in g.keys():
g = g[key]
else:
logging.error(" No data %s in %s:%s " % ("/".join(key_split[:i]),filename,prefix) )
continue
i += 1
try:
data_kv[key_name] = g[:]
except:
data_kv[key_name] = g[()]
f.close()
return 1
| [
"h5py.File",
"logging.error",
"os.makedirs",
"logging.basicConfig",
"os.path.dirname",
"os.path.exists",
"urllib.request.urlopen",
"logging.info",
"numpy.array",
"glob.glob"
] | [((103, 142), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (122, 142), False, 'import logging\n'), ((262, 287), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'import os\n'), ((440, 470), 'os.path.exists', 'os.path.exists', (['PYMUS_DATA_MNT'], {}), '(PYMUS_DATA_MNT)\n', (454, 470), False, 'import os\n'), ((2440, 2464), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (2449, 2464), False, 'import h5py\n'), ((901, 927), 'os.path.exists', 'os.path.exists', (['local_path'], {}), '(local_path)\n', (915, 927), False, 'import os\n'), ((1162, 1231), 'logging.info', 'logging.info', (["(' File found %s -> %s ' % (local_path, remote_filename))"], {}), "(' File found %s -> %s ' % (local_path, remote_filename))\n", (1174, 1231), False, 'import logging\n'), ((1862, 1903), 'logging.error', 'logging.error', (['""" Error downloading data """'], {}), "(' Error downloading data ')\n", (1875, 1903), False, 'import logging\n'), ((3090, 3114), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (3099, 3114), False, 'import h5py\n'), ((939, 962), 'os.makedirs', 'os.makedirs', (['local_path'], {}), '(local_path)\n', (950, 962), False, 'import os\n'), ((1322, 1366), 'urllib.request.urlopen', 'request.urlopen', (["(url + '/' + remote_filename)"], {}), "(url + '/' + remote_filename)\n", (1337, 1366), False, 'from urllib import error, request\n'), ((1370, 1434), 'logging.info', 'logging.info', (["(' Downloading %s/%s ... ' % (url, remote_filename))"], {}), "(' Downloading %s/%s ... ' % (url, remote_filename))\n", (1382, 1434), False, 'import logging\n'), ((3125, 3172), 'logging.error', 'logging.error', (["(' File %s not found ' % filename)"], {}), "(' File %s not found ' % filename)\n", (3138, 3172), False, 'import logging\n'), ((991, 1036), 'logging.error', 'logging.error', (["(' DIR creation failed %s ' % e)"], {}), "(' DIR creation failed %s ' % e)\n", (1004, 1036), False, 'import logging\n'), ((1501, 1587), 'logging.error', 'logging.error', (["(' HTTP Error - url = %s/%s - ERR = %s ' % (url, remote_filename, e))"], {}), "(' HTTP Error - url = %s/%s - ERR = %s ' % (url,\n remote_filename, e))\n", (1514, 1587), False, 'import logging\n'), ((1628, 1713), 'logging.error', 'logging.error', (["(' URL Error - url = %s/%s - ERR = %s ' % (url, remote_filename, e))"], {}), "(' URL Error - url = %s/%s - ERR = %s ' % (url,\n remote_filename, e))\n", (1641, 1713), False, 'import logging\n'), ((2323, 2359), 'glob.glob', 'glob.glob', (["(TO_DATA + '%s*' % spl_str)"], {}), "(TO_DATA + '%s*' % spl_str)\n", (2332, 2359), False, 'import glob\n'), ((2804, 2829), 'numpy.array', 'np.array', (['[key_val_array]'], {}), '([key_val_array])\n', (2812, 2829), True, 'import numpy as np\n'), ((3275, 3344), 'logging.error', 'logging.error', (["(' cannot read data for %s at %s ' % (filename, prefix))"], {}), "(' cannot read data for %s at %s ' % (filename, prefix))\n", (3288, 3344), False, 'import logging\n'), ((1128, 1156), 'glob.glob', 'glob.glob', (["(local_path + '/*')"], {}), "(local_path + '/*')\n", (1137, 1156), False, 'import glob\n'), ((2693, 2716), 'numpy.array', 'np.array', (['key_val_array'], {}), '(key_val_array)\n', (2701, 2716), True, 'import numpy as np\n')] |
from joblib import Parallel, delayed, parallel_backend
from lshiftml.helpers.helpers import grouper
from copy import deepcopy
import numpy as np
import time
from rascal.representations import SphericalInvariants as SOAP
def get_features(frames,calculator,hypers):
calculatorinstance = calculator(**hypers)
#print("worker spawned")
return calculatorinstance.transform(frames).get_features(calculatorinstance)
def get_features_in_parallel(frames,calculator,hypers,blocksize=25,n_cores=-1):
"""helper function that returns the features of a calculator (from calculator.transform())
in parallel
"""
#block is necessary to ensure that shape of the chunks is equal
#replace by get_atomic_species functions
with parallel_backend(backend="threading"):
results = Parallel(n_jobs=n_cores)(delayed(get_features)(frame, calculator, hypers) for frame in grouper(blocksize,frames))
return np.concatenate(results)
if __name__ == "__main__":
pass
| [
"joblib.parallel_backend",
"lshiftml.helpers.helpers.grouper",
"joblib.Parallel",
"joblib.delayed",
"numpy.concatenate"
] | [((944, 967), 'numpy.concatenate', 'np.concatenate', (['results'], {}), '(results)\n', (958, 967), True, 'import numpy as np\n'), ((757, 794), 'joblib.parallel_backend', 'parallel_backend', ([], {'backend': '"""threading"""'}), "(backend='threading')\n", (773, 794), False, 'from joblib import Parallel, delayed, parallel_backend\n'), ((814, 838), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_cores'}), '(n_jobs=n_cores)\n', (822, 838), False, 'from joblib import Parallel, delayed, parallel_backend\n'), ((839, 860), 'joblib.delayed', 'delayed', (['get_features'], {}), '(get_features)\n', (846, 860), False, 'from joblib import Parallel, delayed, parallel_backend\n'), ((901, 927), 'lshiftml.helpers.helpers.grouper', 'grouper', (['blocksize', 'frames'], {}), '(blocksize, frames)\n', (908, 927), False, 'from lshiftml.helpers.helpers import grouper\n')] |
import os
import sys
import pickle
import json
import random
import operator
import inspect
import numpy as np
import matplotlib.pyplot as plt
import pylatex as tex
from cycler import cycler
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as plticker
from scipy.stats import pearsonr
from scipy.stats import norm as normal_dist_gen
import seaborn
from simulator.plot import Plot
import simulator.simulator_utils as s_utils
class ReportGenerator:
_colors = ['blue',
'green',
'red',
'cyan',
'magenta',
'yellow',
'black',
'green',
'purple',
'plum',
'orange']
def __init__(self, files, labels, file_prefix='rep_gen_'):
"""Creates a new `ReportGenerator` instance.
Args:
`files`: A list of lists s.t. each list contains a filenames
that should be grouped together and showed by the same curve.
`labels`: A list of strings of labels for legend.
`file_prefix`: A prefix that will be attached to all files
generated by this instance (plot pngs and pdfs).
"""
if not isinstance(files, list) or not files:
raise ValueError('`files` must be non-empty list.')
if not isinstance(labels, list) or not labels:
raise ValueError('`labels` must be non-empty list.')
if any(not isinstance(f, list) for f in files):
raise ValueError('Each element in `files` must be a list.')
if any(not f for f in files):
raise ValueError('There are empty list in `files`.')
if len(labels) != len(files):
err_msg = ("`labels` list must be of same size as "
"`files` list.")
raise ValueError(err_msg)
dirname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'summaries',
'reports')
if not os.path.exists(dirname):
os.makedirs(dirname)
self._dirname = os.path.join(dirname, file_prefix)
if not os.path.exists(self._dirname):
os.makedirs(self._dirname)
for f in os.listdir(self._dirname):
if f.endswith('.pdf') or f.endswith('.tex'):
os.remove(os.path.join(self._dirname, f))
self._images_dirname = os.path.join(self._dirname, 'images')
if not os.path.exists(self._images_dirname):
os.makedirs(self._images_dirname)
self._mses = [MultiSummaryExtractor(f) for f in files]
self._labels = labels
self._file_prefix = file_prefix
max_len = len(files)
self._linewidths = [2 for i in range(max_len)]
self._markers = ['o' for i in range(max_len)]
self._width = r'1\textwidth'
self._position = 'ht'
self._pdf_filename = os.path.join(self._dirname, self._file_prefix)
self._fig_width = 8
self._fig_height = 4
def generate_report(self, custom_text=None):
"""Generates pdf file with report for multiple experiments.
Args:
`custom_text`: A text that will be added to the beginning of the
pdf file.
"""
doc = tex.Document(self._pdf_filename)
doc.append("""Results
""")
self._create_specs_table(doc)
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_sep_ratio_vs_err_differ())
plot_.add_caption('Average overfitting for min value of test error vs separation ratio.')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_sep_ratio_vs_final_err_differ())
plot_.add_caption('Average overfitting for final value of test error vs separation ratio.')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_diffusion_vs_min_error())
plot_.add_caption('Average min 0-1 error vs average diffusion to achieve this error.')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_n_steps_vs_min_error())
plot_.add_caption('Average min 0-1 error vs average epochs to achieve this error.')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_sep_ratio_vs_min_error())
plot_.add_caption('Average min 0-1 error vs separation ratio.')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_sep_ratio_vs_accept_ratio())
plot_.add_caption('Average accept ratio vs separation ratio.')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_sep_ratio_vs_mix_ratio())
plot_.add_caption('Average mixing ratio vs separation ratio.')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_sep_ratio_vs_visit_ratio())
plot_.add_caption('Average visiting ratio vs separation ratio.')
plt.close()
doc.generate_pdf(clean_tex=True)
def _create_specs_table(self, doc):
"""Generates summary table."""
col_names = ['Model',
'Dataset',
'Data Size',
"beta_0",
'Swap Step',
'Burn In',
'Batch Size',
'Noise Type',
'Proba Coeff']
table_spec = "|@{}l@{}".join(['' for i in range(len(col_names) + 1)]) + '|'
tabular = tex.Tabular(table_spec,
pos=self._position,
booktabs=False,
row_height=0.1,)
with doc.create(tabular) as table:
table.add_hline()
table.add_row(col_names)
table.add_hline()
for i, mse in enumerate(self._mses):
for summ_ext in mse._summ_ext:
vals_ = {n:[] for n in col_names}
se = mse._summ_ext[summ_ext]
desc = se.get_description()
vals_['Model'].append(s_utils.get_value_from_name(
se.get_name(), 'model_name'))
vals_['Dataset'].append(s_utils.get_value_from_name(
se.get_name(), 'dataset_name'))
vals_['Data Size'].append(s_utils.get_value_from_name(
se.get_name(), 'train_data_size'))
vals_["beta_0"].append(s_utils.get_value_from_name(
se.get_name(), 'beta_0'))
vals_['Swap Step'].append(desc['swap_step'])
vals_['Burn In'].append(desc['burn_in_period'])
vals_['Batch Size'].append(desc['batch_size'])
vals_['Noise Type'].append(desc['noise_type'])
vals_['Proba Coeff'].append(desc['proba_coeff'])
for k in vals_:
vals_[k] = list(set(vals_[k]))
row = []
for col_name in col_names:
if len(vals_[col_name]) == 1:
row.append(tex.basic.TextColor(self._colors[i],
str(vals_[col_name][0])))
else:
row.append(' ')
table.add_row(row)
table.add_hline()
def _plot_diffusion_vs_min_error(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
diff_vals, err_vals, seps = (
mse.get_diffusion_vs_min_error())
annotation = [(str(seps[j]), diff_vals[j], err_vals[j])
for j in range(len(diff_vals))]
figs.append(plot.plot(x=diff_vals,
y=err_vals,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i],
annotate=annotation))
plot.legend(fig,
ax,
xlabel='AVERAGE DIFFUSION',
ylabel='AVERAGE MIN 0-1 ERROR',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'diffusion.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_n_steps_vs_min_error(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
step_vals, err_vals, seps = (
mse.get_n_steps_vs_min_error())
annotation = [(str(seps[j]), step_vals[j], err_vals[j])
for j in range(len(step_vals))]
figs.append(plot.plot(x=step_vals,
y=err_vals,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i],
annotate=annotation))
plot.legend(fig,
ax,
xlabel='AVERAGE EPOCHS',
ylabel='AVERAGE MIN 0-1 ERROR',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'n_steps.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_sep_ratio_vs_min_error(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
seps, errs, stddevs = mse.get_sep_ratio_vs_min_error()
figs.append(plot.plot(x=seps,
y=errs,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i]))
plot.legend(fig,
ax,
xlabel='SEPARATION RATIO',
ylabel='AVERAGE MIN 0-1 ERROR',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'min_loss_sep.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_sep_ratio_vs_accept_ratio(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
seps, accs, errs = mse.get_sep_ratio_vs_accept_ratio()
figs.append(plot.plot(x=seps,
y=accs,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i]))
plot.legend(fig,
ax,
xlabel='SEPARATION RATIO',
ylabel='ACCEPT RATIO',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'sep_vs_accept.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_sep_ratio_vs_mix_ratio(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
seps, mixs, errs = mse.get_sep_ratio_vs_mix_ratio()
figs.append(plot.plot(x=seps,
y=mixs,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i]))
plot.legend(fig,
ax,
xlabel='SEPARATION RATIO',
ylabel='MIXING RATIO',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'sep_vs_mix.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_sep_ratio_vs_visit_ratio(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
seps, visits, errs = mse.get_sep_ratio_vs_visit_ratio()
figs.append(plot.plot(x=seps,
y=visits,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i]))
plot.legend(fig,
ax,
xlabel='SEPARATION RATIO',
ylabel='VISIT RATIO',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'sep_vs_visit.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_sep_ratio_vs_err_differ(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
seps, visits, errs = mse.get_sep_ratio_vs_err_differ()
figs.append(plot.plot(x=seps,
y=visits,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i]))
plot.legend(fig,
ax,
xlabel='SEPARATION RATIO',
ylabel='OVERFITTING ERROR',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'sep_vs_min_overfit.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_sep_ratio_vs_final_err_differ(self):
fig, ax = plt.subplots()
figs = []
plot = Plot()
for i, mse in enumerate(self._mses):
seps, visits, errs = mse.get_sep_ratio_vs_final_err_differ()
figs.append(plot.plot(x=seps,
y=visits,
fig=fig,
ax=ax,
label=self._labels[i],
color=self._colors[i],
splined_points_mult=None,
linewidth=self._linewidths[i],
marker=self._markers[i]))
plot.legend(fig,
ax,
xlabel='SEPARATION RATIO',
ylabel='FINAL OVERFITTING ERROR',
fig_width=self._fig_width,
fig_height=self._fig_height,
bbox_to_anchor=(1.15, 0.5))
img_path = os.path.join(self._images_dirname, 'sep_vs_final_overfit.png')
fig.savefig(img_path, bbox_inches='tight')
return img_path
class MultiSummaryExtractor:
def __init__(self, names):
"""Instantiates a new MultiSummaryExtractor instance.
Args:
`names`: A list of simulation names.
Raises:
ValueError: If for any simulation name in `names` the simulation
do not exist.
TypeError: If `names` is not `list` or `names` is an empty list.
"""
dirname = os.path.abspath(os.path.dirname(__file__))
self._dirname = os.path.join(dirname, 'summaries')
filenames = os.listdir(self._dirname)
if not isinstance(names, list) or not names:
raise TypeError("`names` argument must be non-empty list.")
if any(name not in filenames for name in names):
raise ValueError('The following simulation(s) do not exist(s):',
[name for name in names if name not in filenames])
self._summ_ext = {
name:SummaryExtractor(name)
for name in names
}
def get_sep_ratio_vs_accept_ratio(self):
"""Returns data (sep_ratio list, `accept_ratio` list, stddev list)."""
sep_ratios = []
accept_ratios = []
errs = []
for name in self._summ_ext:
se = self._summ_ext[name]
sep, acc, stddev = se.get_sep_ratio_vs_accept_ratio()
sep_ratios.append(sep)
accept_ratios.append(acc)
errs.append(stddev)
x, y, z = zip(*sorted(zip(sep_ratios, accept_ratios, errs)))
return list(x), list(y), list(z)
def get_sep_ratio_vs_mix_ratio(self):
"""Returns data (sep_ratio list, `accept_ratio` list, stddev list)."""
sep_ratios = []
mix_ratios = []
errs = []
for name in self._summ_ext:
se = self._summ_ext[name]
sep, mix, stddev = se.get_sep_ratio_vs_mix_ratio()
sep_ratios.append(sep)
mix_ratios.append(mix)
errs.append(stddev)
x, y, z = zip(*sorted(zip(sep_ratios, mix_ratios, errs)))
return list(x), list(y), list(z)
def get_sep_ratio_vs_visit_ratio(self):
"""Returns data (sep_ratio list, `accept_ratio` list, stddev list)."""
sep_ratios = []
visit_ratios = []
errs = []
for name in self._summ_ext:
se = self._summ_ext[name]
sep, visit, stddev = se.get_sep_ratio_vs_visit_ratio()
sep_ratios.append(sep)
visit_ratios.append(visit)
errs.append(stddev)
x, y, z = zip(*sorted(zip(sep_ratios, visit_ratios, errs)))
return list(x), list(y), list(z)
def get_diffusion_vs_min_error(self):
sep_ratios = []
diff_vals = []
errs = []
for name in self._summ_ext:
se = self._summ_ext[name]
diff_val, diff_err, loss_val, loss_err, sep = (
se.get_diffusion_vs_min_error())
sep_ratios.append(sep)
diff_vals.append(diff_val)
errs.append(loss_val)
x, y, z = zip(*sorted(zip(diff_vals, errs, sep_ratios)))
return list(x), list(y), list(z)
def get_n_steps_vs_min_error(self):
sep_ratios = []
err_vals = []
step_vals = []
for name in self._summ_ext:
se = self._summ_ext[name]
step_val, step_err, loss_val, loss_err, sep = (
se.get_n_steps_vs_min_error())
sep_ratios.append(sep)
step_vals.append(step_val)
err_vals.append(loss_val)
x, y, z = zip(*sorted(zip(step_vals, err_vals, sep_ratios)))
return list(x), list(y), list(z)
def get_sep_ratio_vs_min_error(self):
sep_ratios = []
err_vals = []
err_errs = []
for name in self._summ_ext:
se = self._summ_ext[name]
sep, loss_val, loss_err = se.get_sep_ratio_vs_min_error()
sep_ratios.append(sep)
err_vals.append(loss_val)
err_errs.append(loss_err)
x, y, z = zip(*sorted(zip(sep_ratios, err_vals, err_errs)))
return list(x), list(y), list(z)
def get_sep_ratio_vs_err_differ(self):
sep_ratios = []
err_vals = []
err_errs = []
for name in self._summ_ext:
se = self._summ_ext[name]
sep, differ, differ_std = se.get_sep_ratio_vs_err_differ()
sep_ratios.append(sep)
err_vals.append(differ)
err_errs.append(differ_std)
x, y, z = zip(*sorted(zip(sep_ratios, err_vals, err_errs)))
return list(x), list(y), list(z)
def get_sep_ratio_vs_final_err_differ(self):
sep_ratios = []
err_vals = []
err_errs = []
for name in self._summ_ext:
se = self._summ_ext[name]
sep, differ, differ_std = se.get_sep_ratio_vs_final_err_differ()
sep_ratios.append(sep)
err_vals.append(differ)
err_errs.append(differ_std)
x, y, z = zip(*sorted(zip(sep_ratios, err_vals, err_errs)))
return list(x), list(y), list(z)
class SummaryReportGenerator:
"""Generates pdf report for individual simulations."""
_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
_colors += ['blue',
'orange',
'purple',
'dimgray',
'maroon',
'gold',
'rosybrown',
'tomato']
_colors += _colors
def __init__(self, names, labels, simulation_num=0, report_name='summary_report',
sample_every=1, lower=500, higher=700, include_fnames=False, **kwargs):
"""Instantiates `SummaryReportGenerator` instance.
Args:
names: A list of filenames to process.
labels: A list of labels to assign for each of the filename.
simulation_num: A number of simulation for which to generate the
report.
report_name: A filename of the resulted PDF report.
sample_every: A number specifying the interval s.t. the
resulting plots will be sampled.
lower:
higher:
include_fnames: If `True`, adds image filename to the caption.
kwargs:
ylim_err: Tuple of two numbers specifying the limit for
y-axis for error plots.
ylim_loss: Tuple of two numbers specifying the limit for
y-axis for loss plots.
epoch_range:
**TODO**: make separate yaxis_cycle for error and loss.
"""
self._include_fnames = include_fnames
self._pathdelim = ('/' if 'win' not in sys.platform else '\\')
self._simulation_num = simulation_num
self._summ_ext = {f:SummaryExtractor(f) for f in names}
self._original_names = []
for i, name in enumerate(names):
self._original_names.append(self._summ_ext[name]._name)
self._summ_ext[name]._original_name = self._summ_ext[name]._name
self._summ_ext[name]._name = labels[i]
dirname = os.path.abspath(os.path.dirname(__file__))
dirname = os.path.join(dirname, 'summaries', 'reports')
if not os.path.exists(dirname):
os.makedirs(dirname)
self._dirname = os.path.join(dirname, report_name)
self._images_dirname = os.path.join(self._dirname, 'images')
self._pdf_filename = os.path.join(self._dirname, report_name)
self._sample_every = sample_every
if not os.path.exists(self._images_dirname):
os.makedirs(self._images_dirname)
self._width = r'1\textwidth'
self._position = 'ht'
self.yaxis_cycle = cycler(y=[0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72])
self.sgd_color_cycler = cycler(color=['black', 'black'])
self.lower = lower
self.higher = higher
self.ylim_err = kwargs.get('ylim_err', (0, 1))
self.ylim_loss = kwargs.get('ylim_loss', (0, 5))
epoch_range = (kwargs.get('epoch_range', None)
or (0, max(se.get_description()['n_epochs'] for se in self._summ_ext.values())))
self.epoch_range = epoch_range
def generate_report(self, custom_text="Results"):
"""Generates pdf file with report for for multiple individual simulations.
Args:
`custom_text`: A text that will be added to the beginning of the
pdf file.
"""
doc = tex.Document(self._pdf_filename,
font_size='tiny')
doc.append(custom_text)
doc.append(tex.LineBreak())
self._create_specs_table(doc)
#################### Min vals for error and loss ####################
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_min_error(self._sample_every,
self._simulation_num,
ylim=self.ylim_err,
xlim=self.epoch_range)
plot_.add_image(imgpath)
caption = 'Train-test min error'
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_min_loss(self._sample_every,
self._simulation_num,
ylim=self.ylim_loss)
plot_.add_image(imgpath)
caption = 'Train-test min loss'
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
#################### Min vals for error and loss + logscaled ####################
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_min_error(self._sample_every,
self._simulation_num,
ylim=(0.05, 0.6),
xlim=(50, 2000),
log_x=4,
store_image=False)
imgpath = ''.join(imgpath.split('.')[:-1]) + '_logscaled.png'
plt.savefig(imgpath, bbox_inches='tight')
plt.close()
plot_.add_image(imgpath)
caption = 'Train-test min error'
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
doc.append(tex.basic.NewPage())
#################### Plots with diffusion ####################
for name, se in self._summ_ext.items():
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_error_epochs_diffusion(
se, self._simulation_num, ylim=self.ylim_err)
plot_.add_image(imgpath)
caption = 'Train-Test Error vs Diffusion vs Epochs for ' + str(se.get_name())
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
for name, se in self._summ_ext.items():
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_loss_epochs_diffusion(
se, self._simulation_num, ylim=self.ylim_loss)
plot_.add_image(imgpath)
caption = 'Train-Test Loss vs Diffusion vs Epochs for ' + str(se.get_name())
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
#################### Plots with diffusion and gaps ####################
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_error_gap_all(
self._simulation_num, ylim=self.ylim_err)
plot_.add_image(imgpath)
caption = 'Train-Test Error Gap'
if self._include_fnames:
caption += tex.utils.escape_latex(' \n ') + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_loss_gap_all(
self._simulation_num, ylim=self.ylim_loss)
plot_.add_image(imgpath)
caption = 'Train-Test Loss Gap'
if self._include_fnames:
caption += tex.utils.escape_latex(' \n ') + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
#################### Plots of everything together ####################
for name, se in self._summ_ext.items():
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_error_per_sim(se,
self._sample_every,
self._simulation_num,
ylim=self.ylim_err)
plot_.add_image(imgpath)
caption = 'Train-test error for ' + se.get_name()
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
for name, se in self._summ_ext.items():
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_loss_per_sim(se,
self._sample_every,
self._simulation_num,
ylim=self.ylim_loss)
plot_.add_image(imgpath)
caption = 'Train-test loss for ' + se.get_name()
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
doc.append(tex.basic.NewPage())
#################### Plots of temperature mixing ####################
for name, se in self._summ_ext.items():
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_mixing(se, self._simulation_num)
plot_.add_image(imgpath)
caption = 'Mixing ' + se.get_name()
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
#################### Plots of diffusions ####################
for name, se in self._summ_ext.items():
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_diffusion(se, self._simulation_num)
plot_.add_image(imgpath)
caption = 'Diffusion ' + se.get_name()
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
for name, se in self._summ_ext.items():
n_replicas = se.get_description()['n_replicas']
if n_replicas == 1:
imgpath = self._plot_loss_histogram_for_noise_level(se,)
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(imgpath)
caption = ("Histogram of energy distributions for " + se.get_name())
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
else:
for i in range(n_replicas - 1):
noise_levels = [i, i+1]
imgpath = self._plot_loss_histogram_for_noise_level(
se, noise_level=noise_levels)
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(imgpath)
caption = ("Histogram of energy distributions for "
+ se.get_name()
+ " for Noise Levels: "
+ '-'.join([str(x) for x in noise_levels])
+ '.')
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
# m\clearpage must be added. Otherwise overflow
doc.append(tex.basic.NewPage())
#################### MOA weights values ####################
if any(('mode' in se.get_description()
and se.get_description()['mode'] is not None
and 'moa' == se.get_description()['mode'].lower())
for name, se in self._summ_ext.items()):
for name, se in self._summ_ext.items():
if se.get_description()['n_replicas'] != 1:
with doc.create(tex.Figure(position=self._position)) as plot_:
se._plot_moa_weights(self._simulation_num)
imgname = 'moa_weights_vals_' + se.get_name().replace(' ', '') + '.png'
imgpath = os.path.join(self._images_dirname,
imgname)
plt.savefig(imgpath, bbox_inches='tight')
plot_.add_image(imgpath)
caption = 'Weight values of each replica for ' + se.get_name()
if self._include_fnames:
caption += '. Filename: ' + imgname
plot_.add_caption(caption)
plt.close()
#################### Histograms of noise levels ####################
# find rank in terms of min test error for each one of the
# replicas
for name, se in self._summ_ext.items():
if (se.get_description()['n_replicas'] == 1
or se.get_description()['burn_in_period']==np.inf):
continue
errs = {}
for r in range(se.get_description()['n_replicas']):
x, y = se.get_summary('test_error',
replica_id=r,
simulation_num=self._simulation_num)
errs[r] = min(y)
sorted_errs = sorted(list(errs.items()), key=lambda x: x[1])
rids_errs_ranks = [(r[0], r[1], i) for i, r in enumerate(sorted_errs)]
rids = [r[0] for r in rids_errs_ranks]
for r in rids_errs_ranks:
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_noise_level_histogram(
se, replica_id=r[0], simulation_num=self._simulation_num)
plot_.add_image(imgpath)
caption = ('Histogram of noise levels for replica '
+ str(r[0])
+ " for "
+ se.get_name()
+ ".\nMin Test Error: "
+ str(r[1])
+ ", Rank: "
+ str(r[2]+1)
+ "/"
+ str(se.get_description()['n_replicas']))
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_min_error_with_markers(
self._sample_every, self._simulation_num, ylim=self.ylim_err,
lower=self.lower, higher=self.higher)
plot_.add_image(imgpath)
caption = 'Train-test min error with swap markers'
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
imgpath = self._plot_train_test_min_loss_with_markers(self._sample_every,
self._simulation_num,
ylim=self.ylim_loss,
lower=self.lower,
higher=self.higher)
plot_.add_image(imgpath)
caption = 'Train-test min loss with swap markers'
if self._include_fnames:
caption += '. Filename: ' + imgpath.split(self._pathdelim)[-1]
plot_.add_caption(caption)
plt.close()
'''
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_train_test_error(self._sample_every,
self._simulation_num))
plot_.add_caption('Train-test error (everything together)')
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_train_test_loss(self._sample_every,
self._simulation_num))
plot_.add_caption('Train-test loss (everything together)')
plt.close()
'''
doc.generate_pdf(clean_tex=True)
def _get_next_sgd_color(self):
for color in self.sgd_color_cycler*100:
yield color
def generate_min_loss_err_report(self, simulation_nums, custom_text='Results'):
doc = doc = tex.Document(self._pdf_filename,
font_size='tiny')
doc.append(custom_text)
doc.append(tex.LineBreak())
self._create_specs_table(doc)
for s in simulation_nums:
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_train_test_min_error(self._sample_every,
simulation_num=s))
plot_.add_caption('Train-test min error ' + str(s))
plt.close()
with doc.create(tex.Figure(position=self._position)) as plot_:
plot_.add_image(self._plot_train_test_min_loss(self._sample_every,
simulation_num=s))
plot_.add_caption('Train-test min loss ' + str(s))
plt.close()
doc.generate_pdf(clean_tex=True)
def _plot_train_test_error(self, sample_every=1, simulation_num=0):
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
for name in self._summ_ext:
se = self._summ_ext[name]
for r in range(se.get_description()['n_replicas']):
x, y = se.get_summary(summ_name='test_error',
replica_id=r,
simulation_num=self._simulation_num)
x1, y1 = se.get_summary(summ_name='train_error',
replica_id=r,
simulation_num=self._simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + '_test_' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test error'
label_train = se.get_name() + ' Train error'
#color = 'black'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + '_test_' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test error (replica ' + str(r) + ')'
label_train = se.get_name() + ' Train error (replica ' + str(r) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
plot.plot(np.linspace(start=0, stop=n_epochs, num=len(y1)),
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
plot.legend(fig=fig,
ax=ax,
xlabel='EPOCHS',
ylabel='ERROR',)
img_path = os.path.join(self._images_dirname, 'train_test_error.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_min_error_with_markers(self, sample_every=1, simulation_num=0, ylim=(0, 1), lower=500, higher=700):
def _get_next_yloc():
for y in self.yaxis_cycle*10000:
yield y['y']
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
added_noise_keys = None
for name in self._summ_ext:
se = self._summ_ext[name]
sep, loss_val, loss_err = se.get_sep_ratio_vs_min_error()
min_rid = se._vals['replica_id_min_err_sim_' + str(simulation_num)]
x, y = se.get_summary(summ_name='test_error',
replica_id=min_rid,
simulation_num=simulation_num)
x1, y1 = se.get_summary(summ_name='train_error',
replica_id=min_rid,
simulation_num=simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
prev_x = x.copy()
prev_x1 = x1.copy()
x = [x[i] for i in range(len(y)) if lower <= prev_x[i] <= higher]
y = [y[i] for i in range(len(y)) if lower <= prev_x[i] <= higher]
x1 = [x1[i] for i in range(len(y1)) if lower <= prev_x1[i] <= higher]
y1 = [y1[i] for i in range(len(y1)) if lower <= prev_x1[i] <= higher]
n_epochs = higher - lower
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + '_test_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
#color = 'black'
label_test = se.get_name() + ' Test error (replica ' + str(min_rid) + ')'
label_train = se.get_name() + ' Train error (replica ' + str(min_rid) + ')'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + '_test_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test loss (error ' + str(min_rid) + ')'
label_train = se.get_name() + ' Train loss (error ' + str(min_rid) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
plot.plot(x1,
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
x, noise_vals = se.get_summary(
'noise_values', replica_id=min_rid, simulation_num=simulation_num)
noises = sorted(list(set(noise_vals)))
noises = [(n, i) for i, n in enumerate(noises)]
noise_keys = {n:i for n, i in noises}
next_yloc = _get_next_yloc()
for i, noise in enumerate(noise_vals):
if i > 0 and lower <= x[i] <= higher and noise != noise_vals[i-1]:
ax.axvline(x[i])
ax.text(x[i-1], next_yloc.__next__(), str(noise_keys[noise_vals[i-1]]) + '->' + str(noise_keys[noise]))
added_noise_keys = noise_keys
if added_noise_keys:
xlabel = 'EPOCHS\n' + json.dumps(added_noise_keys)
else:
xlabel = 'EPOCHS'
plot.legend(fig=fig,
ax=ax,
xlabel=xlabel,
ylabel='ERROR',
ylimit=ylim)
img_path = os.path.join(self._images_dirname,
'train_test_min_error_with_markers' + str(simulation_num) + '.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_min_error(self,
sample_every=1,
simulation_num=0,
ylim=(0, 1),
xlim=None,
log_y=None,
log_x=None,
store_image=True):
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
for name in self._summ_ext:
se = self._summ_ext[name]
if (se.get_description()['n_replicas'] != 1
and 'mode' in se.get_description()
and se.get_description()['mode'] is not None
and se.get_description()['mode'].lower() == 'moa'):
train_label = se.get_name() + ' Train error (MOA)'
test_label = se.get_name() + ' Test error (MOA)'
train_summ_name = 'moa_train_error'
test_summ_name = 'moa_test_error'
test_linestyle = '-'
train_linestyle = '--'
x, y = se.get_summary(test_summ_name, simulation_num=simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label=test_label, linestyle=test_linestyle,
color=self._colors[color_idx])
x, y = se.get_summary(train_summ_name, simulation_num=simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label=train_label, linestyle=train_linestyle,
color=self._colors[color_idx])
color_idx += 1
sep, loss_val, loss_err = se.get_sep_ratio_vs_min_error()
min_rid = se._vals['replica_id_min_err_sim_' + str(simulation_num)]
x, y = se.get_summary(summ_name='test_error',
replica_id=min_rid,
simulation_num=simulation_num)
x1, y1 = se.get_summary(summ_name='train_error',
replica_id=min_rid,
simulation_num=simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + ' test ' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test error'
label_train = se.get_name() + ' Train error'
#color = 'black'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + '_test_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test error (replica ' + str(min_rid) + ')'
label_train = se.get_name() + ' Train error (replica ' + str(min_rid) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
#y_vals = y[5:].copy()
#chunkified = [(np.mean(s), int((idx + 1)*avg_interval/2))
# for idx, s in enumerate(y_vals[i:i+avg_interval] for i in range(0, len(y_vals), avg_interval))]
#annots = [("{0:.3f}".format(c[0]), x[_find_nearest_idx(x, c[1])], )]
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
plot.plot(np.linspace(start=0, stop=n_epochs, num=len(y1)),
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
plot.legend(fig=fig,
ax=ax,
xlabel='EPOCHS',
ylabel='ERROR',
ylimit=ylim,
xlimit=xlim,
log_x=log_x,
log_y=log_y)
img_path = os.path.join(self._images_dirname,
'train_test_min_error' + str(simulation_num) + '.png')
if store_image:
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_min_loss(self,
sample_every=1,
simulation_num=0,
ylim=(0, 5),
log_y=None,
epoch_lim=None):
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
for name in self._summ_ext:
se = self._summ_ext[name]
if (se.get_description()['n_replicas'] != 1
and 'mode' in se.get_description()
and se.get_description()['mode'] is not None
and se.get_description()['mode'].lower() == 'moa'):
train_label = se.get_name() + ' Train loss (MOA)'
test_label = se.get_name() + ' Test loss (MOA)'
train_summ_name = 'moa_train_loss'
test_summ_name = 'moa_test_loss'
test_linestyle = '-'
train_linestyle = '--'
x, y = se.get_summary(test_summ_name, simulation_num=simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label=test_label, linestyle=test_linestyle,
color=self._colors[color_idx])
x, y = se.get_summary(train_summ_name, simulation_num=simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label=train_label, linestyle=train_linestyle,
color=self._colors[color_idx])
color_idx += 1
sep, loss_val, loss_err = se.get_sep_ratio_vs_min_error()
min_rid = se._vals['replica_id_min_err_sim_' + str(simulation_num)]
x, y = se.get_summary(summ_name='test_loss',
replica_id=min_rid,
simulation_num=simulation_num)
x1, y1 = se.get_summary(summ_name='train_loss',
replica_id=min_rid,
simulation_num=simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + '_test_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test loss'
label_train = se.get_name() + ' Train loss'
#color = 'black'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + '_test_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test loss (replica ' + str(min_rid) + ')'
label_train = se.get_name() + ' Train loss (replica ' + str(min_rid) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
x = np.array(x)
y = np.array(y)
if epoch_lim is not None:
indices = np.where(x <= epoch_lim)[0]
x = x[indices]
y = y[indices]
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
x1 = np.linspace(start=0, stop=n_epochs, num=len(y1))
if epoch_lim is not None:
indices = np.where(x1 <= epoch_lim)
y1 = y1[indices]
x1 = x1[indices]
plot.plot(x1,
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
#if log_y is not None:
plot.legend(fig=fig,
ax=ax,
xlabel='EPOCHS',
ylabel='LOSS',
ylimit=ylim)
img_path = os.path.join(self._images_dirname,
'train_test_min_loss' + str(simulation_num) + '.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_error_epochs_diffusion(self, se, simulation_num=0, ylim=(0, 1)):
se._plot_diffusion_vs_min_error(simulation_num=simulation_num, ylim=ylim)
img_path = os.path.join(
self._images_dirname,
'train_test_error_epochs_diffusion'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_loss_epochs_diffusion(self, se, simulation_num=0, ylim=(0, 5)):
se._plot_diffusion_vs_min_loss(simulation_num=simulation_num,
ylim=ylim)
img_path = os.path.join(
self._images_dirname,
'train_test_loss_epochs_diffusion'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_error_gap(self, se, simulation_num=0, ylim=(0, 1)):
se._plot_train_test_error_gap(simulation_num, ylim=ylim)
img_path = os.path.join(
self._images_dirname,
'train_test_error_gap_diffusion'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_error_gap_all(self, simulation_num=0, ylim=(0, 1), linewidth=1.2):
"""Plots `_plot_train_test_error_gap` for all simulations together."""
fig = plt.figure(figsize=(18, 10))
ax = fig.gca(projection='3d')
for i, name in enumerate(self._summ_ext):
se = self._summ_ext[name]
#if se.get_description()['n_replicas'] == 1:
# continue
x_gap, y_gap, x_diff, result = se._data_for_train_test_error_gap(
simulation_num=simulation_num, ylim=ylim)
ax.plot(x_gap, y_gap, np.zeros(len(result['test'])),
color=se._colors[i], linewidth=linewidth,
label='Test-Train Error Gap for ' + se.get_name())
ax.plot(x_gap, ylim[1]*np.ones(len(result['test'])), result['diff'],
color=se._colors[i], linewidth=linewidth)
ax.view_init(20, 270)
xlabel = ('EPOCHS\n'
+ 'Gap-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(y_gap, result['diff'])[0]))
plt.xlabel(xlabel, labelpad=20)
plt.ylabel('Error Gap')
plt.ylim(min(0, min(y_gap)), ylim[1])
ax.set_zlabel('Diffusion')
plt.legend()
img_path = os.path.join(
self._images_dirname,
'train_test_error_gap_diffusion_all'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_loss_gap(self, se, simulation_num=0, ylim=(0, 5)):
se._plot_train_test_loss_gap(simulation_num, ylim=ylim)
img_path = os.path.join(
self._images_dirname,
'train_test_loss_gap_diffusion'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_loss_gap_all(self, simulation_num=0, ylim=(0, 1), linewidth=1.2):
"""Plots `_plot_train_test_loss_gap` for all simulations together."""
fig = plt.figure(figsize=(18, 10))
ax = fig.gca(projection='3d')
for i, name in enumerate(self._summ_ext):
se = self._summ_ext[name]
#if se.get_description()['n_replicas'] == 1:
# continue
x_gap, y_gap, x_diff, result = se._data_for_train_test_loss_gap(
simulation_num=simulation_num, ylim=ylim)
ax.plot(x_gap, y_gap, np.zeros(len(result['test'])),
color=se._colors[i], linewidth=linewidth,
label='Test-Train Loss Gap for ' + se.get_name())
ax.plot(x_gap, ylim[1]*np.ones(len(result['test'])), result['diff'],
color=se._colors[i], linewidth=linewidth)
ax.view_init(20, 270)
xlabel = ('EPOCHS\n'
+ 'Gap-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(y_gap, result['diff'])[0]))
plt.xlabel(xlabel, labelpad=20)
plt.ylabel('Loss Gap')
plt.ylim(min(0, min(y_gap)), ylim[1])
ax.set_zlabel('Diffusion')
plt.legend()
img_path = os.path.join(
self._images_dirname,
'train_test_loss_gap_diffusion_all'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_min_loss_with_markers(
self, sample_every=1, simulation_num=0, ylim=(0, 5), lower=500, higher=700):
#yaxis_cycle = cycler(y=[0.66, 0.68, 0.7, 0.72, 0.7, 0.68])
def _get_next_yloc():
for y in self.yaxis_cycle*10000:
yield y['y']/3
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
added_noise_keys = None
for name in self._summ_ext:
se = self._summ_ext[name]
sep, loss_val, loss_err = se.get_sep_ratio_vs_min_error()
min_rid = se._vals['replica_id_min_err_sim_' + str(simulation_num)]
x, y = se.get_summary(summ_name='test_loss',
replica_id=min_rid,
simulation_num=simulation_num)
x1, y1 = se.get_summary(summ_name='train_loss',
replica_id=min_rid,
simulation_num=simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
prev_x = x.copy()
prev_x1 = x1.copy()
x = [x[i] for i in range(len(y)) if lower <= prev_x[i] <= higher]
y = [y[i] for i in range(len(y)) if lower <= prev_x[i] <= higher]
x1 = [x1[i] for i in range(len(y1)) if lower <= prev_x1[i] <= higher]
y1 = [y1[i] for i in range(len(y1)) if lower <= prev_x1[i] <= higher]
n_epochs = higher - lower
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + '_test_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
#color = 'black'
label_test = se.get_name() + ' Test loss'
label_train = se.get_name() + ' Train loss'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + '_test_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(min_rid) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test loss (replica ' + str(min_rid) + ')'
label_train = se.get_name() + ' Train loss (replica ' + str(min_rid) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
plot.plot(x1,
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
x, noise_vals = se.get_summary('noise_values', replica_id=min_rid, simulation_num=simulation_num)
noises = sorted(list(set(noise_vals)))
noises = [(n, i) for i, n in enumerate(noises)]
noise_keys = {n:i for n, i in noises}
next_yloc = _get_next_yloc()
text_loc = ylim[1]
for i, noise in enumerate(noise_vals):
if i > 0 and lower <= x[i] <= higher and noise != noise_vals[i-1]:
ax.axvline(x[i])
ax.text(x[i-1], text_loc + next_yloc.__next__()*5, str(noise_keys[noise_vals[i-1]]) + '->' + str(noise_keys[noise]))
added_noise_keys = noise_keys
if added_noise_keys:
xlabel = 'EPOCHS\n' + json.dumps(added_noise_keys)
else:
xlabel = 'EPOCHS'
plot.legend(fig=fig,
ax=ax,
xlabel=xlabel,
ylabel='ERROR',
ylimit=ylim)
img_path = os.path.join(self._images_dirname,
'train_test_min_loss_with_markers' + str(simulation_num) + '.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_loss(self, summ_name, sample_every=1, simulation_num=0):
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
for name in self._summ_ext:
se = self._summ_ext[name]
for r in range(se.get_description()['n_replicas']):
x, y = se.get_summary(summ_name=summ_name,
replica_id=r,
simulation_num=simulation_num)
if se.get_description()['n_replicas'] == 1:
#color = 'black'
color = self._get_next_sgd_color().__next__()['color']
else:
color = self._colors[color_idx]
def _plot_train_test_loss(self, sample_every=1, simulation_num=0):
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
for name in self._summ_ext:
se = self._summ_ext[name]
for r in range(se.get_description()['n_replicas']):
x, y = se.get_summary(summ_name='test_loss',
replica_id=r,
simulation_num=simulation_num)
x1, y1 = se.get_summary(summ_name='train_loss',
replica_id=r,
simulation_num=simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + '_test_' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test loss'
label_train = se.get_name() + ' Train loss'
#color = 'black'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + ' Test loss' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + ' Train loss' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test loss (replica ' + str(r) + ')'
label_train = se.get_name() + ' Train loss (replica ' + str(r) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
plot.plot(np.linspace(start=0, stop=n_epochs, num=len(y1)),
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
plot.legend(fig=fig,
ax=ax,
xlabel='EPOCHS',
ylabel='ERROR',)
img_path = os.path.join(self._images_dirname, 'train_test_loss.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_error_per_sim(self, se, sample_every=1, simulation_num=0, ylim=(0, 1)):
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
for r in range(se.get_description()['n_replicas']):
x, y = se.get_summary(summ_name='test_error',
replica_id=r,
simulation_num=simulation_num)
x1, y1 = se.get_summary(summ_name='train_error',
replica_id=r,
simulation_num=simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + '_test_' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test error'
label_train = se.get_name() + ' Train error'
#color = 'black'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + '_test_' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test error (replica ' + str(r) + ')'
label_train = se.get_name() + ' Train error (replica ' + str(r) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
plot.plot(np.linspace(start=0, stop=n_epochs, num=len(y1)),
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
plot.legend(fig=fig,
ax=ax,
xlabel='EPOCHS',
ylabel='ERROR',
ylimit=ylim)
img_path = os.path.join(
self._images_dirname,
'train_test_error_per_sim'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_train_test_loss_per_sim(self, se, sample_every=1, simulation_num=0, ylim=(0, 5)):
fig, ax = plt.subplots()
plot = Plot()
color_idx = 0
for r in range(se.get_description()['n_replicas']):
x, y = se.get_summary(summ_name='test_loss',
replica_id=r,
simulation_num=simulation_num)
x1, y1 = se.get_summary(summ_name='train_loss',
replica_id=r,
simulation_num=simulation_num)
n_epochs = se._summaries[simulation_num]['latest_epoch'] + 1
if se.get_description()['n_replicas'] == 1:
#label_test = se.get_name() + '_test_' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
#color = 'black'
label_test = se.get_name() + ' Test loss'
label_train = se.get_name() + ' Train loss'
color = self._get_next_sgd_color().__next__()['color']
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 2
linewidth_test = 2
else:
#label_test = se.get_name() + '_test_' + str(r) + '_min:' + "{0:.3f}".format(min(y))
#label_train = se.get_name() + '_train_' + str(r) + '_min:' + "{0:.3f}".format(min(y1))
label_test = se.get_name() + ' Test loss (replica ' + str(r) + ')'
label_train = se.get_name() + ' Train loss (replica ' + str(r) + ')'
color = self._colors[color_idx]
linestyle_test = '-'
linestyle_train = '--'
linewidth_train = 1.5
linewidth_test = 1.5
color_idx += 1
plot.plot(x,
y,
fig=fig,
ax=ax,
label=label_test,
linewidth=linewidth_test,
color=color,
linestyle=linestyle_test,
splined_points_mult=None)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
plot.plot(np.linspace(start=0, stop=n_epochs, num=len(y1)),
y1,
fig=fig,
ax=ax,
label=label_train,
linewidth=linewidth_train,
linestyle=linestyle_train,
color=color,
splined_points_mult=None)
plot.legend(fig=fig,
ax=ax,
xlabel='EPOCHS',
ylabel='ERROR',
ylimit=ylim)
img_path = os.path.join(
self._images_dirname,
'train_test_loss_per_sim'+se.get_name().replace(' ', '')+'.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_mixing(self, se, simulation_num=0):
fig = se._plot_mixing(simulation_num)
img_path = os.path.join(self._images_dirname, se.get_name().replace(' ', '') + 'mixing.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_diffusion(self, se, simulation_num=0):
fig = se._plot_diffusion(simulation_num)
img_path = os.path.join(self._images_dirname, se.get_name().replace(' ', '') + 'diffusion.png')
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_noise_level_histogram(self, se, replica_id=0, simulation_num=0):
fig = se._plot_noise_level_histogram(replica_id, simulation_num)
name = '_'.join(['hist', str(simulation_num), str(replica_id)])
name = se.get_name().replace(' ', '') + name + '.png'
img_path = os.path.join(self._images_dirname, name)
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _plot_loss_histogram_for_noise_level(self, se, noise_level=0, simulation_num=0):
fig = se._plot_loss_histogram_for_noise_level(noise_level=noise_level,
simulation_num=simulation_num)
levelstr = (str(noise_level) if not isinstance(noise_level, list)
else '-'.join([str(x) for x in noise_level]))
name = '_'.join(['hist_loss_noise_levels',
str(simulation_num),
levelstr])
name = se.get_name().replace(' ', '') + name + '.png'
img_path = os.path.join(self._images_dirname, name)
plt.savefig(img_path, bbox_inches='tight')
return img_path
def _create_specs_table(self, doc):
"""Generates summary table."""
#original_names = "\n".join(self._original_names)
for name in self._original_names:
doc.append(name)
doc.append(tex.LineBreak())
doc.append(str(self._summ_ext[name].get_description()['noise_list']))
doc.append(tex.LineBreak())
doc.append(tex.LineBreak())
col_names = ['Name',
'Accept',
'Mixing',
'Visit',
'Err',
'MoaErr',
'BurnIn',
'Swap',
'Sep',
'Coeff',
'DSize',
'LR',
'Batch',
'#params']
table_spec = "|@{}l@{}".join(['' for i in range(len(col_names) + 1)]) + '|'
tabular = tex.Tabular(table_spec,
pos=self._position,
booktabs=False,
row_height=0.1,)
with doc.create(tabular) as table:
table.add_hline()
table.add_row(col_names)
table.add_hline()
for summ_ext in self._summ_ext:
vals_ = {n:[] for n in col_names}
se = self._summ_ext[summ_ext]
desc = se.get_description()
vals_['Accept'].append("{0:.3f}".format(se.get_accept_ratio()))
vals_['Mixing'].append("{0:.3f}".format(se.get_mix_ratio()))
vals_['Visit'].append("{0:.3f}".format(se.get_visit_ratio()))
sep, err, std = se.get_sep_ratio_vs_min_error()
vals_['Err'].append("{0:.5f}".format(err))
if 'moa' == desc['mode']:
vals_['MoaErr'].append("{0:.3f}".format(se.get_moa_min_err()))
else:
vals_['MoaErr'].append('NaN')
vals_['BurnIn'].append(str(desc['burn_in_period']))
vals_['Name'].append(se.get_name())
vals_['Swap'].append(str(desc['swap_step']))
vals_['Sep'].append(desc['separation_ratio'])
vals_['Coeff'].append(desc['proba_coeff'])
vals_['LR'].append(desc['learning_rate'])
vals_['Batch'].append(desc['batch_size'])
try:
train_data_size = s_utils.get_value_from_name(se._original_name, 'train_data_size')
except IndexError:
train_data_size = 'unknown'
vals_['DSize'].append(train_data_size)
vals_['#params'].append(desc['n_params'])
for k in vals_:
if k != 'Coeff':
vals_[k] = list(set(vals_[k]))
else:
try:
vals_[k] = list(set(vals_[k]))
except TypeError:
vals_[k] = [desc['proba_coeff'][0]]
row = []
for col_name in col_names:
if len(vals_[col_name]) == 1:
row.append(str(vals_[col_name][0]))
else:
row.append(' ')
table.add_row(row)
table.add_hline()
class SummaryExtractor:
def __init__(self, name, include_moa=True):
dirname = os.path.abspath(os.path.dirname(__file__))
self._name = name
self._dirname = os.path.join(dirname, 'summaries', name)
filenames = sorted([f for f in os.listdir(self._dirname) if 'summary' in f],
key=lambda x: int(x.split('_')[1].split('.')[0]))
description_path = os.path.join(self._dirname, 'description.json')
self._summaries = []
self._n_simulations = len(filenames)
self._include_moa = include_moa
for f in filenames:
filepath = os.path.join(self._dirname, f)
try:
with open(filepath, 'rb', os.O_NONBLOCK) as fo:
self._summaries.append(pickle.load(fo))
except EOFError:
print(f)
print(filepath)
raise
with open(description_path, 'r') as fo:
self._description = json.load(fo)
self._vals = {
'accept_ratio': None,
'mix_ratio': None,
'visit_ratio': None,
'accept_ratio_err': None,
'visit_ratio_err': None,
'avg_min_error': None,
'avg_min_error_err': None,
'avg_diff_for_min_error': None,
'avg_diff_for_min_error_err': None,
'avg_steps_for_min_error': None, # steps == epochs
'avg_steps_for_min_error_err': None,
'avg_err_differ': None,
'avg_err_differ_err':None,
'avg_err_final_differ':None,
'avg_err_final_differ_err':None,
'__debug__visit_raw':[],
'__debug__err_differ': None,
}
self._n_replicas = self.get_description()['n_replicas']
self._colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
self._moa_color = 'blue'
def get_name(self):
return self._name
def get_sep_ratio_vs_final_err_differ(self):
"""Returns final difference between test-train error."""
if self._vals['avg_err_final_differ'] is None:
_ = self.get_sep_ratio_vs_err_differ()
err_differ = self._vals['avg_err_final_differ']
err_differ_std = self._vals['avg_err_final_differ_err']
sep = self.get_description()['separation_ratio']
return sep, err_differ, err_differ_std
def get_sep_ratio_vs_err_differ(self):
"""Returns differences between test-train error.
Differences are calculated:
`min(test error) - train error[argmin(test_error)]`
and returned averages over all simulations.
"""
sep = self.get_description()['separation_ratio']
if self._vals['avg_err_differ'] is not None:
err_differ = self._vals['avg_err_differ']
err_differ_std = self._vals['avg_err_differ_err']
return sep, err_differ, err_differ_std
err_differs = []
err_final_differs = []
reps = {i:[]
for i in range(self._n_replicas)}
reps_final = {i:[]
for i in range(self._n_replicas)}
for s in range(self._n_simulations):
for r in range(self._n_replicas):
test_x, test_y = self.get_summary('test_error',
replica_id=r,
simulation_num=s)
train_x, train_y = self.get_summary('train_error',
replica_id=r,
simulation_num=s)
_, test_steps = self.get_summary('test_steps',
replica_id=r,
simulation_num=s)
_, train_steps = self.get_summary('train_steps',
replica_id=r,
simulation_num=s)
idx_test = np.argmin(test_y)
try:
idx_train = _find_nearest_idx(train_steps, test_steps[idx_test])
except IndexError:
idx_train = 0
differ = test_y[idx_test] - train_y[idx_train]
reps[r].append(differ)
err_differs.append(differ)
idx_test = len(test_y) - 1
differ = test_y[idx_test] - np.mean(train_y[-s_utils.TRAIN_FREQ:])
reps_final[r].append(differ)
err_final_differs.append(differ)
result = {i:0 for i in range(self._n_replicas)}
for r in range(self._n_replicas):
result[r] = np.mean(reps[r])
self._vals['__debug__err_differ'] = result
err_differ = np.mean(err_differs)
err_differ_std = np.std(err_differs)
self._vals['avg_err_differ'] = err_differ
self._vals['avg_err_differ_err'] = err_differ_std
self._vals['avg_err_final_differ'] = np.mean(err_final_differs)
self._vals['avg_err_final_differ_err'] = np.std(err_final_differs)
return sep, err_differ, err_differ_std
def get_diffusion_vs_min_error(self):
"""Returns a min loss and diffusion value to achieve this min loss."""
if self._vals['avg_min_error'] is None:
losses = []
diffs = []
steps = []
replicas = []
for s in range(self._n_simulations):
candidate_losses = []
candidate_diffs = []
candidate_steps = []
for r in range(self._n_replicas):
x_loss, y_loss = self.get_summary('test_error', replica_id=r,
simulation_num=s)
x_diff, y_diff = self.get_summary('diffusion', replica_id=r,
simulation_num=s)
y_loss = np.asarray(y_loss)
loss_idx = y_loss.argmin()
diff_idx = _find_nearest_idx(x_diff, x_loss[loss_idx])
candidate_losses.append(y_loss[loss_idx])
candidate_diffs.append(y_diff[diff_idx])
candidate_steps.append(x_loss[loss_idx])
self._vals['replica_id_min_err_sim_' + str(s)] = np.argmin(candidate_losses)
idx = np.argmin(np.asarray(candidate_losses))
losses.append(candidate_losses[idx])
diffs.append(candidate_diffs[idx])
steps.append(candidate_steps[idx])
replicas.append(idx)
loss_val = np.mean(losses)
loss_err = np.std(losses)
diff_val = np.mean(diffs)
diff_err = np.std(diffs)
step_val = np.mean(steps)
step_err = np.std(steps)
self._vals['avg_min_error'] = loss_val
self._vals['avg_min_error_err'] = loss_err
self._vals['avg_diff_for_min_error'] = diff_val
self._vals['avg_diff_for_min_error_err'] = diff_err
self._vals['avg_steps_for_min_error'] = step_val
self._vals['avg_steps_for_min_error_err'] = step_err
else:
loss_val = self._vals['avg_min_error']
loss_err = self._vals['avg_min_error_err']
diff_val = self._vals['avg_diff_for_min_error']
diff_err = self._vals['avg_diff_for_min_error_err']
sep = self.get_description()['separation_ratio']
return diff_val, diff_err, loss_val, loss_err, sep
def get_n_steps_vs_min_error(self):
"""Returns a min error and number of steps to achieve this min loss."""
if self._vals['avg_min_error'] is None:
_ = self.get_diffusion_vs_min_error()
loss_val = self._vals['avg_min_error']
loss_err = self._vals['avg_min_error_err']
step_val = self._vals['avg_steps_for_min_error']
step_err = self._vals['avg_steps_for_min_error_err']
sep = self.get_description()['separation_ratio']
return step_val, step_err, loss_val, loss_err, sep
def get_sep_ratio_vs_min_error(self):
if self._vals['avg_min_error'] is None:
_ = self.get_diffusion_vs_min_error()
loss_val = self._vals['avg_min_error']
loss_err = self._vals['avg_min_error_err']
sep = self.get_description()['separation_ratio']
return sep, loss_val, loss_err
def get_sep_ratio_vs_accept_ratio(self):
"""Returns a tuple (`sep_ratio`, `accept_ratio`, `stddev`)."""
sep = self.get_description()['separation_ratio']
acc = self.get_accept_ratio()
stddev = self._vals['accept_ratio_err']
return sep, acc, stddev
def get_sep_ratio_vs_mix_ratio(self):
"""Returns a tuple (`sep_ratio`, `mix_ratio`, `stddev`)."""
sep = self.get_description()['separation_ratio']
mix = self.get_mix_ratio()
stddev = self._vals['mix_ratio_err']
return sep, mix, stddev
def get_sep_ratio_vs_visit_ratio(self):
"""Returns a tuple (`sep_ratio`, `visit_ratio`, `stddev`)."""
sep = self.get_description()['separation_ratio']
visit = self.get_visit_ratio()
stddev = self._vals['visit_ratio_err']
return sep, visit, stddev
def show_report(self, simulation_num=0, sample_every=2, print_stats=True):
"""Shows the report of the simulation with plots."""
if print_stats:
sep, acc, stddev = self.get_sep_ratio_vs_accept_ratio()
print('Accept Ratio:', acc, '+/-', stddev)
sep, visit, stddev = self.get_sep_ratio_vs_visit_ratio()
print('Visit Ratio:', visit, '+/-', stddev)
sep, mix, stddev = self.get_sep_ratio_vs_mix_ratio()
print('Mixing Ratio:', mix, '+/-', stddev)
sep, loss_val, loss_err = self.get_sep_ratio_vs_min_error()
_ = self.get_sep_ratio_vs_min_error()
min_rid = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
print('Min Error value:', loss_val, '+/-', loss_err, 'for replica', min_rid)
test_errs, valid_errs, train_errs = [], [], []
for r in range(self.get_description()['n_replicas']):
x, y = self.get_summary('test_error', simulation_num=simulation_num, replica_id=r)
test_errs.append((min(y), x[np.argmin(y)], r))
x, y = self.get_summary('train_error', simulation_num=simulation_num, replica_id=r)
train_errs.append((min(y), x[np.argmin(y)], r))
x, y = self.get_summary('validation_error', simulation_num=simulation_num, replica_id=r)
valid_errs.append((min(y), x[np.argmin(y)], r))
zipped = zip(test_errs, valid_errs, train_errs)
combined = [list(x) for x in zip(*sorted(zipped, key=lambda pair: pair[0][0]))]
test_errs, valid_errs, train_errs = combined
for i in range(self.get_description()['n_replicas']):
buff = '[rid:{0}]|[test:{1:.5f} at {2}]|[valid:{3:.5f} at {4}]|[train:{5:.5f} at {6}]'.format(
test_errs[i][-1], test_errs[i][0], int(test_errs[i][1]),
valid_errs[i][0], int(valid_errs[i][1]),
train_errs[i][0], int(train_errs[i][1]))
print(buff)
print('Min Valid Error value:', min(self.get_summary('validation_error')[1]))
if ('mode' in self.get_description()
and self.get_description()['mode'] is not None
and 'moa' in self.get_description()['mode'].lower()):
print('MOA Min Error value:', self.get_moa_min_err())
sep, err_differ, err_differ_std = self.get_sep_ratio_vs_err_differ()
print('Average Overfitting:',
err_differ,
'+/-',
err_differ_std)
print()
_ = self._plot_loss(summ_name='test_loss',
simulation_num=simulation_num,
sample_every=sample_every,
include_moa=self._include_moa)
_ = self._plot_loss(summ_name='train_loss',
simulation_num=simulation_num,
sample_every=sample_every,
include_moa=self._include_moa)
_ = self._plot_error(summ_name='test_error',
simulation_num=simulation_num,
sample_every=sample_every,
include_moa=self._include_moa,
ylim=(0, 0.2))
_ = self._plot_error(summ_name='validation_error',
simulation_num=simulation_num,
sample_every=sample_every,
include_moa=self._include_moa)
try:
_ = self._plot_moa_weights(simulation_num=simulation_num)
except KeyError:
pass
_ = self._plot_test_err_vs_noise_level_vs_epochs(simulation_num=simulation_num)
_ = self._plot_diffusion_vs_min_error(simulation_num=simulation_num,
sample_every=sample_every)
_ = self._plot_diffusion_vs_min_loss(simulation_num=simulation_num,
sample_every=sample_every)
_ = self._plot_train_test_error_gap(simulation_num=simulation_num,
sample_every=sample_every)
_ = self._plot_train_test_loss_gap(simulation_num=simulation_num,
sample_every=sample_every)
_ = self._plot_error(summ_name='train_error',
simulation_num=simulation_num,
sample_every=sample_every)
_ = self._plot_diffusion(simulation_num=simulation_num)
_ = self._plot_mixing(simulation_num=simulation_num)
_ = self._plot_noise_level_histogram(simulation_num=simulation_num)
'''
_ = self._plot_grads(simulation_num=simulation_num,
sample_every=sample_every)
_ = self._plot_norms(simulation_num=simulation_num)
'''
def get_accept_ratio(self):
accepts = []
if self._vals['accept_ratio'] is None:
for s in range(self._n_simulations):
for r in range(self._n_replicas):
x, y = self.get_summary('accepts', replica_id=r, simulation_num=s)
accepts.append(np.mean(y))
self._vals['accept_ratio'] = np.mean(accepts)
self._vals['accept_ratio_err'] = np.std(accepts)
return self._vals['accept_ratio']
def get_mix_ratio(self):
if self._vals['mix_ratio'] is None:
keys = [float("{0:.8f}".format(b))
for b in self.get_description()['noise_list']]
def _get_key(key):
return keys[int(np.argmin([abs(k-key) for k in keys]))]
mixing = {i:[] for i in range(self._n_replicas)}
visiting = {i:[] for i in range(self._n_replicas)}
travel_times = []
for s in range(self._n_simulations):
for r in range(self._n_replicas):
x, y = self.get_summary('noise_values', replica_id=r, simulation_num=s)
steps = self.get_summary('train_steps', replica_id=r, simulation_num=s)[1]
#steps = sorted(list(set(steps)))
reps = {k:0 for k in keys}
for i in range(len(steps)):
if steps[i] > self.get_description()['burn_in_period']:
try:
reps[_get_key(y[i])] += 1
except IndexError:
print(_get_key(y[i]))
raise
d = dict(replica_id=r,
simulation_num=s)
d.update(reps)
self._vals['__debug__visit_raw'].append(d)
# histograms
if 'histograms' not in self._vals:
self._vals['histograms'] = {}
if s not in self._vals['histograms']:
self._vals['histograms'][s] = {}
self._vals['histograms'][s][r] = reps
visiting[r].append(np.mean([1 if reps[x]!=0 else 0 for x in reps]))
mixing[r].append(1 if all(reps[x]!=0 for x in reps) else 0)
mix_ratios = []
visit_ratios = []
for s in range(self._n_simulations):
mix_ratio = np.mean([mixing[r][s] for r in range(self._n_replicas)])
visit_ratio = np.mean([visiting[r][s] for r in range(self._n_replicas)])
mix_ratios.append(mix_ratio)
visit_ratios.append(visit_ratio)
self._vals['mix_ratio'] = np.mean(mix_ratios)
self._vals['visit_ratio'] = np.mean(visit_ratios)
self._vals['mix_ratio_err'] = np.std(mix_ratios)
self._vals['visit_ratio_err'] = np.std(visit_ratios)
return self._vals['mix_ratio']
def get_moa_min_err(self):
if 'moa_min_err' not in self._vals:
x, y = self.get_summary('moa_test_error', simulation_num=0)
self._vals['moa_min_err'] = min(y)
return self._vals['moa_min_err']
def get_visit_ratio(self):
if self._vals['visit_ratio'] is None:
_ = self.get_mix_ratio()
return self._vals['visit_ratio']
def get_summary(self, summ_name, replica_id=0, simulation_num=0):
"""Returns summary for `summ_name`.
Args:
`summ_name`: Summary name. For a list of summaries see
`simulator.graph.summary.flush_summary()` function.
`replica_id`: A integer representing replica id.
`simulation_num`: A simulation from which to return the summary.
Returns:
A tuple `(x, y)` where `x` is a numpy array of epochs and `y` is
a list of summary.
Raises:
`ValueError`: If `simulation_num` is not in valid range.
"""
if simulation_num >= self._n_simulations:
err_msg = ("No such simulation: "
+ str(simulation_num))
raise ValueError(err_msg)
if 'steps' in summ_name:
y = self._summaries[simulation_num][summ_name]
y = sorted(list(set(y)))
elif 'moa' in summ_name and 'weights' not in summ_name:
y = self._summaries[simulation_num][summ_name]
else:
y = self._summaries[simulation_num][summ_name][replica_id]
n_epochs = self._summaries[simulation_num]['latest_epoch'] + 1
try:
x = np.linspace(start=0,
stop=n_epochs,
num=len(y))
except TypeError:
print(summ_name, y)
raise
return x, y
def get_description(self):
return self._description
def _plot_moa_weights(self, simulation_num=0):
fig, ax = plt.subplots()
plot = Plot()
for r in range(self._n_replicas):
x, y = self.get_summary('moa_weights', replica_id=r, simulation_num=simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label='Replica ' + str(r))
plot.legend(fig, ax, xlabel='EPOCHS', ylabel='MOA WEIGHTS')
def _plot_loss(self, summ_name, simulation_num=0, sample_every=1, ylim=(0, 5), include_moa=False):
fig, ax = plt.subplots()
plot = Plot()
if (include_moa
and 'mode' in self.get_description()
and self.get_description()['mode'] is not None
and 'moa' in self.get_description()['mode'].lower()):
if 'test' in summ_name:
label = 'Test Loss (MOA)'
linestyle = '-'
summary_name = 'moa_test_loss'
else:
label = 'Train Loss (MOA)'
linestyle = '--'
summary_name = 'moa_train_loss'
x, y = self.get_summary(summary_name, simulation_num=simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label=label, linewidth=1., linestyle=linestyle,
color=self._moa_color)
for r in range(self._n_replicas):
if summ_name == 'test_loss':
x, y = self.get_summary('test_loss', r, simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label='Test Loss (replica ' + str(r) + ')',
linewidth=1, color=self._colors[r])
#x = x[::sample_every]
#y = y[::sample_every]
elif summ_name == 'train_loss':
x1, y1 = self.get_summary('train_loss', r, simulation_num)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
x1 = np.linspace(start=0, stop=x1[-1], num=len(y1))
plot.plot(x1, y1, fig=fig, ax=ax, label='Train loss (replica ' + str(r) + ')',
linewidth=1, linestyle='--', splined_points_mult=None,
color=self._colors[r])
plot.legend(fig, ax, title=summ_name.upper().replace('_', ' '),
legend_title='Replica', xlabel='EPOCHS',
ylabel='LOSS', ylimit=ylim)
def _plot_error(self, summ_name, simulation_num=0, sample_every=1, ylim=(0, 1), include_moa=False):
fig, ax = plt.subplots()
plot = Plot()
if (include_moa
and 'mode' in self.get_description()
and self.get_description()['mode'] is not None
and 'moa' in self.get_description()['mode'].lower()):
if 'test' in summ_name:
label = 'Test Error (MOA)'
linestyle = '-'
summary_name = 'moa_test_error'
else:
label = 'Train Error (MOA)'
linestyle = '--'
summary_name = 'moa_train_error'
x, y = self.get_summary(summary_name, simulation_num=simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label=label, linewidth=1., linestyle=linestyle,
color=self._moa_color)
for r in range(self._n_replicas):
if 'test' in summ_name or 'validation' in summ_name:
x, y = self.get_summary(summ_name, r, simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label='Test error (replica ' + str(r) + ')',
linewidth=1, color=self._colors[r])
#x = x[::sample_every]
#y = y[::sample_every]
elif 'train' in summ_name:
x1, y1 = self.get_summary(summ_name, r, simulation_num)
#y1 = [(a + b) / 2 for a, b in zip(y1[::2], y1[1::2])]
#y1 = y1[::sample_every]
x1 = np.linspace(start=0, stop=x1[-1], num=len(y1))
plot.plot(x1, y1, fig=fig, ax=ax, label='Train error (replica ' + str(r) + ')',
linewidth=1, linestyle='--', splined_points_mult=None,
color=self._colors[r])
plot.legend(fig, ax, legend_title='Replica',
title=summ_name.upper().replace('_', ' '),
xlabel='EPOCHS', ylabel='0-1 ERROR', ylimit=ylim)
def _plot_noise_level_histogram(self, replica_id=-1, simulation_num=0):
if replica_id == -1:
_ = self.get_sep_ratio_vs_min_error()
replica_id = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
#print(sorted(list(histogram.items()), key=lambda x: x[0]))
x, y = self.get_summary('noise_values',
replica_id=replica_id,
simulation_num=simulation_num)
x, steps = self.get_summary('train_steps',
replica_id=replica_id,
simulation_num=simulation_num)
y = [y[i] for i in range(len(y))
if steps[i] > self.get_description()['burn_in_period']]
plot = Plot()
fig, ax = plt.subplots()
binwidth = 1
weights = np.ones_like(y)/float(len(y))
ax.hist(y, rwidth=0.5, weights=weights)
plot.legend(fig, ax, xlabel='Noise Level', ylabel='Visiting Ratio')
def _plot_loss_histogram_for_noise_level(self,
summ_name='validation_loss',
noise_level=0,
simulation_num=0,
epsilon = 0.01,
bins=60,
burn_in_period=None,
transformation=None,
fitting_distribution=None):
"""Plots histogram of energy distributions.
Args:
summ_name: One of `validation/train/test_loss/error`.
noise_level: Integer of list integers specifying a noise levels
to plot.
simulation_num: A number of simulation to plot.
epsilon: Additional width that added on sides of the plot.
bins: A number of bins for historgram.
burn_in_period: A step (swap step) from which the energies are
started to be taken into account.
transformation: A function that takes values `x` energy,
applies some mathematical transformation and returns resulted
value. E.g. `def foo(x): return np.exp(x)`. Default is
identity.
fitting_distribution: A fitting function for
`seaborn.distplot()` in addition to default KDE curve.
"""
def identity(x): return x
noise_list = sorted(self.get_description()['noise_list'])
n_replicas = self.get_description()['n_replicas']
if transformation is None:
transform_foo = identity
else:
transform_foo = transformation
noise_vals = {i:self.get_summary('noise_values', i, simulation_num)[1]
for i in range(n_replicas)}
loss_vals = {i:self.get_summary(summ_name, i, simulation_num)[1]
for i in range(n_replicas)}
_, steps = self.get_summary('_'.join([summ_name.split('_')[0], 'steps']),
replica_id=0,
simulation_num=simulation_num)
if burn_in_period is None:
if self.get_description()['burn_in_period'] == np.inf:
burn_in_period = steps[len(steps)//4]
else:
burn_in_period = self.get_description()['burn_in_period']
if not isinstance(noise_level, list):
noise_levels = [noise_level]
else:
noise_levels = noise_level
fig, ax = plt.subplots()
plot = Plot()
min_loss_val = 1000
max_loss_val = -1
means = []
stddevs = []
for noise_level in noise_levels:
noise_level_loss = []
for i in range(len(steps)):
if steps[i] < burn_in_period:
continue
try:
curr_noise_dict = {r:noise_vals[r][i] for r in range(n_replicas)}
except:
break
beta = [curr_noise_dict[r] for r in range(n_replicas)]
beta_rid = [(b, r) for r, b in enumerate(beta)]
beta_rid.sort(key=lambda x: x[0])
rid = beta_rid[noise_level][1]
loss_val = transform_foo(loss_vals[rid][i])
min_loss_val = min(loss_val, min_loss_val)
max_loss_val = max(loss_val, max_loss_val)
noise_level_loss.append(loss_val)
means.append(np.mean(noise_level_loss))
stddevs.append(np.std(noise_level_loss))
label = 'Noise Level: ' + "{0:.3f}({1})".format(noise_list[noise_level], noise_level)
'''
Notes on arguments for `seaborn.distplot()`:
* kde: kernel density estimation. Non-parametric way to
to estimate the probability density function of
a random variable. Definition:
'''
seaborn.distplot(noise_level_loss, kde=True,
hist=True, norm_hist=True, bins=bins, hist_kws={'edgecolor':'black'},
kde_kws={'linewidth':4},
label=label, fit=fitting_distribution)
mean = np.mean(means)
stddev = np.mean(stddevs)
xlimit = (min(min_loss_val, mean-2*stddev) - epsilon, max(max_loss_val, mean+2*stddev) + epsilon)
xlabel='Energy Levels'
if transformation is not None:
foostr = inspect.getsource(transformation).replace('. Filename: ', ';')
xlabel = xlabel + '\nTransformation: ' + foostr
plot.legend(fig, ax, xlabel=xlabel, ylabel='Histogram',
xlimit=xlimit)
return fig
def _plot_exchange_proba_hist(self, replica_id=-1, simulation_num=0, proba_coeff=None,
isnormalized=True, bins=60, epsilon=1e-3):
"""Plots probability of chaging noise at swap step for `replica_id`."""
def get_lower_and_upper_rids(rid, curr_noise_dict):
beta = [curr_noise_dict[r] for r in range(n_replicas)]
beta_rid = [(b, r) for r, b in enumerate(beta)]
reverse = (True if noise_type in ['weight_noise', 'langevin'] else False)
beta_rid.sort(key=lambda x: x[0], reverse=reverse)
rid_level = [i for i in range(len(beta_rid)) if beta_rid[i][1] == rid][0]
if rid_level == n_replicas - 1:
hrid = -1
else:
hrid = beta_rid[rid_level+1][1]
if rid_level == 0:
lrid = -1
else:
lrid = beta_rid[rid_level-1][1]
return lrid, hrid
if replica_id == -1:
_ = self.get_sep_ratio_vs_min_error()
replica_id = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
noise_list = sorted(self.get_description()['noise_list'])
n_replicas = self.get_description()['n_replicas']
noise_type = self.get_description()['noise_type']
if proba_coeff is None:
proba_coeff = self.get_description()['proba_coeff']
n_epochs = self.get_description()['n_epochs']
rid = replica_id
noise_vals = {i:self.get_summary('noise_values', i, simulation_num)[1]
for i in range(n_replicas)}
loss_vals = {i:self.get_summary('validation_loss', i, simulation_num)[1]
for i in range(n_replicas)}
tolower_probas = []
tohigher_probas = []
tostay_probas = []
endidx = len(loss_vals[0])
for step in range(endidx):
try:
curr_noise_dict = {r:noise_vals[r][step] for r in range(n_replicas)}
curr_loss_dict = {r:loss_vals[r][step] for r in range(n_replicas)}
except IndexError:
break
print('step:', step)
print('noise:')
for r, v in noise_vals.items():
print('rid:', r, 'len:', len(v))
print('loss:')
for r, v in loss_vals.items():
print('rid:', r, 'len:', len(v))
raise
lrid, hrid = get_lower_and_upper_rids(rid, curr_noise_dict)
if lrid == -1:
proba_lower = 0.0
tolower_probas.append(proba_lower)
else:
li = curr_loss_dict[lrid]
lj = curr_loss_dict[rid]
bi = curr_noise_dict[lrid]
bj = curr_noise_dict[rid]
proba_lower = np.exp(proba_coeff*(li - lj)*(bi - bj))
proba_lower = min(proba_lower, 1.0)
if isnormalized:
proba_lower *= (1/(n_replicas - 1))
tolower_probas.append(proba_lower)
if hrid == -1:
proba_higher = 0.0
tohigher_probas.append(proba_higher)
else:
li = curr_loss_dict[rid]
lj = curr_loss_dict[hrid]
bi = curr_noise_dict[rid]
bj = curr_noise_dict[hrid]
proba_higher = np.exp(proba_coeff*(li - lj)*(bi - bj))
proba_higher = min(1.0, proba_higher)
if isnormalized:
proba_higher *= (1/(n_replicas-1))
tohigher_probas.append(proba_higher)
proba_stay = max(0.0, 1.0 - proba_lower - proba_higher)
tostay_probas.append(proba_stay)
fig, ax = plt.subplots()
plot = Plot()
x = np.linspace(0, n_epochs, len(tostay_probas))
if epsilon is not None:
tostay_probas = [x for x in tostay_probas if x >= epsilon]
tohigher_probas = [x for x in tohigher_probas if x >= epsilon]
tolower_probas = [x for x in tolower_probas if x >= epsilon]
seaborn.distplot(tostay_probas, kde=True, hist=True, norm_hist=True,
bins=bins, hist_kws={'edgecolor':'black'},
kde_kws={'linewidth':4}, label='Next State is Current State')
seaborn.distplot(tohigher_probas, kde=True, hist=True, norm_hist=True,
bins=bins, hist_kws={'edgecolor':'black'},
kde_kws={'linewidth':4}, label='Next State is Higher State')
seaborn.distplot(tolower_probas, kde=True, hist=True, norm_hist=True,
bins=bins, hist_kws={'edgecolor':'black'},
kde_kws={'linewidth':4}, label='Next State is Lower State')
plot.legend(fig, ax, xlabel='Probability', ylabel='Histogram', xlimit=(0, 1))
'''
if isnormalized:
plot.plot(x, tostay_probas, fig=fig, ax=ax, label='NEXT_NOISE to CURR_NOISE')
plot.plot(x, tohigher_probas, fig=fig, ax=ax, label='NEXT_NOISE to HIGHER_NOISE')
plot.plot(x, tolower_probas, fig=fig, ax=ax, label='NEXT_NOISE to LOWER_NOISE')
plot.legend(fig, ax, xlabel='EPOCHS', ylabel='PROBABILITY')
'''
return fig
def _plot_test_err_vs_noise_level_vs_epochs(self, replica_id=-1, simulation_num=0):
if replica_id == -1:
_ = self.get_sep_ratio_vs_min_error()
replica_id = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
x_noise, noise_vals = self.get_summary('noise_values',
replica_id=replica_id,
simulation_num=simulation_num)
x_err, err_vals = self.get_summary('test_error',
replica_id=replica_id,
simulation_num=simulation_num)
x_diff, diff = self.get_summary('diffusion',
replica_id=replica_id,
simulation_num=simulation_num)
diff = np.asarray(diff)
x_train_steps, train_steps = self.get_summary('train_steps',
replica_id=replica_id,
simulation_num=simulation_num)
x_test_steps, test_steps = self.get_summary('test_steps',
replica_id=replica_id,
simulation_num=simulation_num)
fig = plt.figure()
fig.set_size_inches(18, 10)
ax = fig.gca(projection='3d')
epsilon = 0.005
ax.plot(x_noise, noise_vals, np.zeros_like(noise_vals), linewidth=1, label='Noise Level')
ax.plot(x_err, (max(noise_vals) + epsilon)*np.ones_like(err_vals), err_vals, linewidth=1, label='Error')
ax.plot(x_diff, (max(noise_vals) + 5*epsilon)*np.ones_like(x_diff), diff/diff.max(), label='Diffusion')
ax.set_zlabel('Error and Normalized Diffusion')
plt.xlabel('Epochs')
plt.ylabel('Noise Level')
ax.view_init(20, 270)
plt.ylim(min(noise_vals) - epsilon, max(noise_vals) + 6*epsilon)
plt.legend()
def _plot_norms(self, simulation_num=0):
fig, ax = plt.subplots()
plot = Plot()
for r in range(self._n_replicas):
x, y = self.get_summary('weight_norms', r, simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label='replica ' + str(r),
linewidth=2)
plot.legend(fig, ax, legend_title='Replica',
xlabel='EPOCHS', ylabel='WEIGHT L2 NORM')
return fig
def _plot_diffusion(self, simulation_num=0):
fig, ax = plt.subplots()
plot = Plot()
for r in range(self._n_replicas):
x, y = self.get_summary('diffusion', r, simulation_num)
plot.plot(x, y, fig=fig, ax=ax, label='replica ' + str(r),
linewidth=2)
plot.legend(fig, ax, legend_title='Replica',
xlabel='EPOCHS', ylabel='DIFFUSION')
return fig
def _plot_diffusion_vs_min_error(self, replica_id=0,
simulation_num=0, sample_every=1,
ylim=(0, 1)):
_ = self.get_sep_ratio_vs_min_error()
min_rid = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
x_test, y_test = self.get_summary('test_error',
replica_id=min_rid,
simulation_num=simulation_num)
x_train, y_train = self.get_summary('train_error',
replica_id=min_rid,
simulation_num=simulation_num)
x_diff, y_diff = self.get_summary('diffusion',
replica_id=min_rid,
simulation_num=simulation_num)
#_test, y_test = x_test[::sample_every], y_test[::sample_every]
#x_train, y_train = x_train[::sample_every], y_train[::sample_every]
#x_diff, y_diff = x_diff[::sample_every], y_diff[::sample_every]
#return x_test, y_test, x_train, y_train, x_diff, y_diff
result = dict(train=[], test=[], diff=[])
sorted_ = sorted([('train', y_train, x_train, len(y_train)),
('test', y_test, x_test, len(y_test)),
('diff', y_diff, x_diff, len(y_diff))], key=lambda x: x[3])
for i in range(sorted_[0][3]):
result[sorted_[0][0]].append(sorted_[0][1][i])
result[sorted_[1][0]].append(sorted_[1][1][_find_nearest_idx(sorted_[1][2], sorted_[0][2][i])])
result[sorted_[2][0]].append(sorted_[2][1][_find_nearest_idx(sorted_[2][2], sorted_[0][2][i])])
x_test = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['test']))
x_train = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['train']))
x_diff = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['diff']))
fig = plt.figure()
fig.set_size_inches(18, 10)
ax = fig.gca(projection='3d')
#ax.plot(x_test, np.asarray(result['test']), np.asarray(result['diff']),
# label='Test Error', color=self._colors[0], linewidth=1.2)
ax.plot(x_test, np.asarray(result['test']), np.zeros(len(result['test'])),
color=self._colors[0], linewidth=1, label='Test Error')
ax.plot(x_test, ylim[1]*np.ones(len(result['test'])), result['diff'],
color=self._colors[0], linewidth=1)
#ax.plot(x_train, np.asarray(result['train']), np.asarray(result['diff']),
# color=self._colors[1], linestyle='--', label='Train Error', linewidth=1.2)
ax.plot(x_train, np.asanyarray(result['train']), np.zeros(len(result['train'])),
color=self._colors[1], linestyle='--', linewidth=1, label='Train Error')
ax.view_init(20, 270)
xlabel = ('EPOCHS\n'
+ 'Train-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(y_train, y_diff)[0])
+ ', Test-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(result['test'], result['diff'])[0]))
plt.xlabel(xlabel, labelpad=20)
plt.ylabel('Error')
ax.set_zlabel('Diffusion')
plt.legend()
plt.ylim(ylim[0], ylim[1])
#loc = plticker.MultipleLocator(base=len(result[sorted_[0][0]])//20)
#ax.xaxis.set_major_locator(loc)
xticks = [int(x/10)*10 for x in np.linspace(0, self.get_description()['n_epochs'], 20)]
ax.set_xticks(xticks)
def _plot_diffusion_vs_min_loss(self, replica_id=0, simulation_num=0, sample_every=1, ylim=(0, 5)):
_ = self.get_sep_ratio_vs_min_error()
min_rid = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
x_test, y_test = self.get_summary('test_loss',
replica_id=min_rid,
simulation_num=simulation_num)
x_train, y_train = self.get_summary('train_loss',
replica_id=min_rid,
simulation_num=simulation_num)
x_diff, y_diff = self.get_summary('diffusion',
replica_id=min_rid,
simulation_num=simulation_num)
#x_test, y_test = x_test[::sample_every], y_test[::sample_every]
#x_train, y_train = x_train[::sample_every], y_train[::sample_every]
#x_diff, y_diff = x_diff[::sample_every], y_diff[::sample_every]
#return x_test, y_test, x_train, y_train, x_diff, y_diff
result = dict(train=[], test=[], diff=[])
sorted_ = sorted([('train', y_train, x_train, len(y_train)),
('test', y_test, x_test, len(y_test)),
('diff', y_diff, x_diff, len(y_diff))], key=lambda x: x[3])
for i in range(sorted_[0][3]):
result[sorted_[0][0]].append(sorted_[0][1][i])
result[sorted_[1][0]].append(sorted_[1][1][_find_nearest_idx(sorted_[1][2], sorted_[0][2][i])])
result[sorted_[2][0]].append(sorted_[2][1][_find_nearest_idx(sorted_[2][2], sorted_[0][2][i])])
x_test = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['test']))
x_train = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['train']))
x_diff = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['diff']))
fig = plt.figure()
fig.set_size_inches(18, 10)
ax = fig.gca(projection='3d')
#ax.plot(x_test, np.asarray(result['test']), np.asarray(result['diff']),
# label='Test Loss', color=self._colors[0], linewidth=1.2)
ax.plot(x_test, np.asarray(result['test']), np.zeros(len(result['test'])),
color=self._colors[0], linewidth=1, label='Test Loss')
ax.plot(x_test, ylim[1]*np.ones(len(result['test'])), result['diff'],
color=self._colors[0], linewidth=1,)
#ax.plot(x_train, np.asarray(result['train']), np.asarray(result['diff']),
# color=self._colors[1], linestyle='--', label='Train Loss', linewidth=1.2)
ax.plot(x_train, np.asanyarray(result['train']), np.zeros(len(result['train'])),
color=self._colors[1], linestyle='--', linewidth=1, label='Train Loss')
ax.view_init(20, 270)
xlabel = ('EPOCHS\n'
+ 'Train-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(y_train, y_diff)[0])
+ ', Test-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(result['test'], result['diff'])[0]))
plt.xlabel(xlabel, labelpad=20)
plt.ylabel('Loss')
ax.set_zlabel('Diffusion')
plt.legend()
plt.ylim(ylim[0], ylim[1])
xticks = [int(x/10)*10 for x in np.linspace(0, self.get_description()['n_epochs'], 20)]
ax.set_xticks(xticks)
def _data_for_train_test_error_gap(self, simulation_num=0, sample_every=1, ylim=(0, 1)):
_ = self.get_sep_ratio_vs_min_error()
min_rid = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
x_test, y_test = self.get_summary('test_error',
replica_id=min_rid,
simulation_num=simulation_num)
x_train, y_train = self.get_summary('train_error',
replica_id=min_rid,
simulation_num=simulation_num)
x_diff, y_diff = self.get_summary('diffusion',
replica_id=min_rid,
simulation_num=simulation_num)
#x_test, y_test = x_test[::sample_every], y_test[::sample_every]
#x_train, y_train = x_train[::sample_every], y_train[::sample_every]
#x_diff, y_diff = x_diff[::sample_every], y_diff[::sample_every]
#return x_test, y_test, x_train, y_train, x_diff, y_diff
result = dict(train=[], test=[], diff=[])
sorted_ = sorted([('train', y_train, x_train, len(y_train)),
('test', y_test, x_test, len(y_test)),
('diff', y_diff, x_diff, len(y_diff))], key=lambda x: x[3])
for i in range(sorted_[0][3]):
result[sorted_[0][0]].append(sorted_[0][1][i])
result[sorted_[1][0]].append(sorted_[1][1][_find_nearest_idx(sorted_[1][2], sorted_[0][2][i])])
result[sorted_[2][0]].append(sorted_[2][1][_find_nearest_idx(sorted_[2][2], sorted_[0][2][i])])
x_gap = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['test']))
#x_train = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['train']))
x_diff = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['diff']))
y_gap = np.asarray(result['test']) - np.asarray(result['train'])
return x_gap, y_gap, x_diff, result
def _plot_train_test_error_gap(self, simulation_num=0, sample_every=1, ylim=(0, 1)):
x_gap, y_gap, x_diff, result = self._data_for_train_test_error_gap(
simulation_num=simulation_num, sample_every=sample_every, ylim=ylim)
fig = plt.figure()
fig.set_size_inches(18, 10)
ax = fig.gca(projection='3d')
#ax.plot(x_gap, y_gap, np.asarray(result['diff']),
# label='Test-Train Error Gap', color=self._colors[0], linewidth=1.2)
ax.plot(x_gap, y_gap, np.zeros(len(result['test'])),
color=self._colors[0], linewidth=1.2, label='Test-Train Error Gap')
ax.plot(x_gap, ylim[1]*np.ones(len(result['test'])), result['diff'],
color=self._colors[0], linewidth=1.2,)
ax.view_init(20, 270)
xlabel = ('EPOCHS\n'
+ 'Gap-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(y_gap, result['diff'])[0]))
plt.xlabel(xlabel, labelpad=20)
plt.ylabel('Error Gap')
plt.ylim(min(0, min(y_gap)), ylim[1])
ax.set_zlabel('Diffusion')
plt.legend()
def _data_for_train_test_loss_gap(self, simulation_num=0, sample_every=1, ylim=(0, 5)):
_ = self.get_sep_ratio_vs_min_error()
min_rid = self._vals['replica_id_min_err_sim_' + str(simulation_num)]
x_test, y_test = self.get_summary('test_loss',
replica_id=min_rid,
simulation_num=simulation_num)
x_train, y_train = self.get_summary('train_loss',
replica_id=min_rid,
simulation_num=simulation_num)
x_diff, y_diff = self.get_summary('diffusion',
replica_id=min_rid,
simulation_num=simulation_num)
#x_test, y_test = x_test[::sample_every], y_test[::sample_every]
#x_train, y_train = x_train[::sample_every], y_train[::sample_every]
#x_diff, y_diff = x_diff[::sample_every], y_diff[::sample_every]
#return x_test, y_test, x_train, y_train, x_diff, y_diff
result = dict(train=[], test=[], diff=[])
sorted_ = sorted([('train', y_train, x_train, len(y_train)),
('test', y_test, x_test, len(y_test)),
('diff', y_diff, x_diff, len(y_diff))], key=lambda x: x[3])
for i in range(sorted_[0][3]):
result[sorted_[0][0]].append(sorted_[0][1][i])
result[sorted_[1][0]].append(sorted_[1][1][_find_nearest_idx(sorted_[1][2], sorted_[0][2][i])])
result[sorted_[2][0]].append(sorted_[2][1][_find_nearest_idx(sorted_[2][2], sorted_[0][2][i])])
x_gap = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['test']))
#x_train = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['train']))
x_diff = np.linspace(start=0, stop=self.get_description()['n_epochs'], num=len(result['diff']))
y_gap = np.asarray(result['test']) - np.asarray(result['train'])
return x_gap, y_gap, x_diff, result
def _plot_train_test_loss_gap(self, simulation_num=0, sample_every=1, ylim=(0, 5)):
x_gap, y_gap, x_diff, result = self._data_for_train_test_loss_gap(
simulation_num=simulation_num, sample_every=sample_every, ylim=ylim)
fig = plt.figure()
fig.set_size_inches(18, 10)
ax = fig.gca(projection='3d')
#ax.plot(x_gap, y_gap, np.asarray(result['diff']),
# label='Test-Train Loss Gap', color=self._colors[0], linewidth=1.2)
ax.plot(x_gap, y_gap, np.zeros(len(result['test'])),
color=self._colors[0], linewidth=1.2, label='Test-Train Loss Gap')
ax.plot(x_gap, ylim[1]*np.ones(len(result['test'])), result['diff'],
color=self._colors[0], linewidth=1.2,)
ax.view_init(20, 270)
xlabel = ('EPOCHS\n'
+ 'Gap-Diffusion Corr: '
+ "{0:.2f}".format(pearsonr(y_gap, result['diff'])[0]))
plt.xlabel(xlabel, labelpad=20)
plt.ylabel('Loss Gap')
plt.ylim(min(ylim[0], min(y_gap)), ylim[1])
ax.set_zlabel('Diffusion')
plt.legend()
def _plot_grads(self, simulation_num=0, sample_every=1):
fig, ax = plt.subplots()
plot = Plot()
for r in range(self._n_replicas):
x, y = self.get_summary('grad_norms', r, simulation_num)
#x, y = x[::sample_every], y[::sample_every]
plot.plot(x, y, fig=fig, ax=ax, label='replica ' + str(r),
linewidth=1.5)
plot.legend(fig, ax, legend_title='Replica',
xlabel='EPOCHS', ylabel='GRADIENT L2 NORM', log_y=5)
return fig
def _plot_mixing(self, simulation_num=0):
def _get_key(key):
keys = [float("{0:.7f}".format(b))
for b in self.get_description()['noise_list']]
return keys[int(np.argmin([abs(k-key) for k in keys]))]
fig, ax = plt.subplots()
plot = Plot()
noise_list = self.get_description()['noise_list']
key_map = {_get_key(key):i for i, key in enumerate(noise_list)}
for r in range(self._n_replicas):
x, y = self.get_summary('noise_values', replica_id=r, simulation_num=simulation_num)
y_new = [key_map[_get_key(i)] for i in y]
plot.plot(x, y_new, fig=fig, ax=ax, label='replica ' + str(r),
linewidth=2)
yticks_names = [float("{0:.5f}".format(b)) for b in noise_list]
plt.gca().set_yticklabels(['0'] + yticks_names)
plot.legend(fig, ax, legend_title='Replica',
xlabel='EPOCHS', ylabel='NOISE LEVEL')
return fig
##################### Experimental #####################
def _plot_mixing_resnet(self, simulation_num=0):
FONTSIZE = 25
def get_min_err_and_rid(se):
results = []
for r in range(se.get_description()['n_replicas']):
x, y = se.get_summary('test_error', replica_id=r)
results.append(min(y))
min_err = min(results)
min_rid = np.argmin(results)
return min_err, min_rid
def _get_key(key):
lr = self.get_description()['learning_rate']
noise_list = self.get_description()['noise_list'] + [lr]
noise_list = sorted(noise_list)
keys = [float("{0:.5f}".format(b)) for b in noise_list]
return keys[int(np.argmin([abs(k-key) for k in keys]))]
fig, ax = plt.subplots(figsize=(12, 8))
lr = self.get_description()['learning_rate']
noise_list = self.get_description()['noise_list'] + [lr]
noise_list = sorted(noise_list)
key_map = {_get_key(key):i for i, key in enumerate(noise_list)}
dsize = 45000
batch_size = self.get_description()['batch_size']
epoch_mult = np.ceil(dsize/batch_size)
n_epochs = self.get_description()['n_epochs']
n_replicas = self.get_description()['n_replicas']
_, min_rid = get_min_err_and_rid(self)
for r in range(self._n_replicas):
x, y = self.get_summary('noise_values', replica_id=r, simulation_num=simulation_num)
x = x*epoch_mult
y_new = [key_map[_get_key(i)] for i in y]
if r == min_rid:
linewidth = 15
ax.plot(x, y_new, label='replica ' + str(r), linewidth=linewidth, color='grey', alpha=0.8,
linestyle='-')
else:
linewidth = 2
ax.plot(x, y_new, label='replica ' + str(r), linewidth=4.5)
ylabels = [float("{0:.5f}".format(b)) for b in noise_list] + [float("{0:.5f}".format(lr))]
yticks = list(range(n_replicas + 1))
xticks = [25000, 30000, 40000, 50000, 60000]
xlabels = ['0', '30K', '40K', '50K', '60K']
xticks_locs = [25000, n_epochs*epoch_mult]
yticks_locs = [[i, i] for i in range(n_replicas)]
for r in range(n_replicas):
ax.plot(xticks_locs, yticks_locs[r], color='black', linestyle='--', dashes=(4, 6), linewidth=2)
continue
plt.xlim((25000, n_epochs*epoch_mult))
plt.xticks(xticks, xlabels, fontsize=23)
plt.yticks(yticks, ylabels, fontsize=23)
#plt.gca().set_yticklabels(['0'] + yticks_names)
#plot.legend(fig, ax, legend_title='Replica',
# xlabel='EPOCHS', ylabel='NOISE LEVEL')
return fig, ax
def _plot_bestsofar(self, summ_name='validation_loss', simulation_num=0):
losses = {r: self.get_summary(summ_name, replica_id=r, simulation_num=simulation_num)[1]
for r in range(self._n_replicas)}
bestsofar = {r:[losses[r][0]] for r in range(self._n_replicas)}
for i in range(1, len(losses[0])):
_ = [bestsofar[r].append(min(bestsofar[r][-1], losses[r][i]))
for r in range(self._n_replicas)]
fig, ax = plt.subplots()
plot = Plot()
x = np.linspace(1, self.get_description()['n_epochs'], len(bestsofar[0]))
_ = [plot.plot(x, bestsofar[r], fig=fig, ax=ax, label='replica-' + str(r))
for r in range(self._n_replicas)]
plot.legend(fig, ax, xlabel='EPOCHS', ylabel=summ_name.replace('_', '-'))
return fig
def _proba_if_coeff_was(self, coeff=None, simulation_num=0):
debug = self._summaries[simulation_num]['debug']
pairs = self._get_swap_pairs(simulation_num)
coeff = coeff or self.get_description()['proba_coeff']
noise_list = self.get_description()['noise_list']
betas = [1/n for n in noise_list]
if not isinstance(coeff, list):
coeff = [coeff for _ in range(self.get_description()['n_replicas'] - 1)]
pairs_coeffs = {p: c for p, c in zip(pairs, coeff)}
for i, p in enumerate(pairs):
probas = []
exp_args = debug['exp_args'][p]
for exp_arg in exp_args:
proba = min(1, np.exp(pairs_coeffs[p]*exp_arg))
probas.append(proba)
print(i, p, '--->', np.mean(probas), np.std(probas))
def _get_swap_pairs(self, simulation_num=0):
"""Returns a tuples of adjacent temperatures (temp_i, temp_i+1)."""
debug = self._summaries[simulation_num]['debug']
return list(sorted(debug['exp_args'].keys(), reverse=True))
def _find_nearest_idx(array, value):
"""Returns the index of the closest to the `value` value in `array`."""
array = np.asarray(array)
idx = (np.abs(array-value)).argmin()
return idx
def _make_len_as_shortest(lhs_x, lhs_y, rhs_x, rhs_y):
"""Make both arrays of same shape as the shortest between them."""
lhs_x, lhs_y = np.asarray(lhs_x), np.asarray(lhs_y)
rhs_x, rhs_y = np.asarray(rhs_x), np.asarray(rhs_y)
assert lhs_x.shape[0] == lhs_y.shape[0]
assert rhs_x.shape[0] == rhs_y.shape[0]
sorted_ = sorted([(lhs_x, lhs_y, lhs_x.shape[0]), (rhs_x, rhs_y, rhs_x.shape[0])],
key=lambda x: x[2])
res_y = []
res_x = []
for i in range(sorted_[0][2]):
idx = _find_nearest_idx(rhs_x, sorted_[0][0][i])
res_y.append(sorted_[1][1][idx])
res_x.append(idx)
return np.asarray(res_x), np.asarray(res_y)
| [
"cycler.cycler",
"numpy.abs",
"numpy.argmin",
"json.dumps",
"matplotlib.pyplot.figure",
"numpy.mean",
"pickle.load",
"numpy.exp",
"matplotlib.pyplot.gca",
"pylatex.utils.escape_latex",
"simulator.plot.Plot",
"os.path.join",
"pylatex.Figure",
"os.path.abspath",
"numpy.zeros_like",
"nump... | [((126276, 126293), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (126286, 126293), True, 'import numpy as np\n'), ((1978, 2012), 'os.path.join', 'os.path.join', (['dirname', 'file_prefix'], {}), '(dirname, file_prefix)\n', (1990, 2012), False, 'import os\n'), ((2103, 2128), 'os.listdir', 'os.listdir', (['self._dirname'], {}), '(self._dirname)\n', (2113, 2128), False, 'import os\n'), ((2259, 2296), 'os.path.join', 'os.path.join', (['self._dirname', '"""images"""'], {}), "(self._dirname, 'images')\n", (2271, 2296), False, 'import os\n'), ((2722, 2768), 'os.path.join', 'os.path.join', (['self._dirname', 'self._file_prefix'], {}), '(self._dirname, self._file_prefix)\n', (2734, 2768), False, 'import os\n'), ((3049, 3081), 'pylatex.Document', 'tex.Document', (['self._pdf_filename'], {}), '(self._pdf_filename)\n', (3061, 3081), True, 'import pylatex as tex\n'), ((3386, 3397), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3395, 3397), True, 'import matplotlib.pyplot as plt\n'), ((3634, 3645), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3643, 3645), True, 'import matplotlib.pyplot as plt\n'), ((3870, 3881), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3879, 3881), True, 'import matplotlib.pyplot as plt\n'), ((4101, 4112), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4110, 4112), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4325), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4323, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4529, 4540), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4538, 4540), True, 'import matplotlib.pyplot as plt\n'), ((4741, 4752), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4750, 4752), True, 'import matplotlib.pyplot as plt\n'), ((4957, 4968), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4966, 4968), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5516), 'pylatex.Tabular', 'tex.Tabular', (['table_spec'], {'pos': 'self._position', 'booktabs': '(False)', 'row_height': '(0.1)'}), '(table_spec, pos=self._position, booktabs=False, row_height=0.1)\n', (5452, 5516), True, 'import pylatex as tex\n'), ((7078, 7092), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7090, 7092), True, 'import matplotlib.pyplot as plt\n'), ((7118, 7124), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (7122, 7124), False, 'from simulator.plot import Plot\n'), ((8113, 8164), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""diffusion.png"""'], {}), "(self._images_dirname, 'diffusion.png')\n", (8125, 8164), False, 'import os\n'), ((8287, 8301), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8299, 8301), True, 'import matplotlib.pyplot as plt\n'), ((8327, 8333), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (8331, 8333), False, 'from simulator.plot import Plot\n'), ((9316, 9365), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""n_steps.png"""'], {}), "(self._images_dirname, 'n_steps.png')\n", (9328, 9365), False, 'import os\n'), ((9490, 9504), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9502, 9504), True, 'import matplotlib.pyplot as plt\n'), ((9530, 9536), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (9534, 9536), False, 'from simulator.plot import Plot\n'), ((10333, 10387), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""min_loss_sep.png"""'], {}), "(self._images_dirname, 'min_loss_sep.png')\n", (10345, 10387), False, 'import os\n'), ((10515, 10529), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10527, 10529), True, 'import matplotlib.pyplot as plt\n'), ((10555, 10561), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (10559, 10561), False, 'from simulator.plot import Plot\n'), ((11349, 11404), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""sep_vs_accept.png"""'], {}), "(self._images_dirname, 'sep_vs_accept.png')\n", (11361, 11404), False, 'import os\n'), ((11529, 11543), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11541, 11543), True, 'import matplotlib.pyplot as plt\n'), ((11569, 11575), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (11573, 11575), False, 'from simulator.plot import Plot\n'), ((12360, 12412), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""sep_vs_mix.png"""'], {}), "(self._images_dirname, 'sep_vs_mix.png')\n", (12372, 12412), False, 'import os\n'), ((12539, 12553), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12551, 12553), True, 'import matplotlib.pyplot as plt\n'), ((12579, 12585), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (12583, 12585), False, 'from simulator.plot import Plot\n'), ((13375, 13429), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""sep_vs_visit.png"""'], {}), "(self._images_dirname, 'sep_vs_visit.png')\n", (13387, 13429), False, 'import os\n'), ((13555, 13569), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13567, 13569), True, 'import matplotlib.pyplot as plt\n'), ((13595, 13601), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (13599, 13601), False, 'from simulator.plot import Plot\n'), ((14396, 14456), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""sep_vs_min_overfit.png"""'], {}), "(self._images_dirname, 'sep_vs_min_overfit.png')\n", (14408, 14456), False, 'import os\n'), ((14588, 14602), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14600, 14602), True, 'import matplotlib.pyplot as plt\n'), ((14628, 14634), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (14632, 14634), False, 'from simulator.plot import Plot\n'), ((15441, 15503), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""sep_vs_final_overfit.png"""'], {}), "(self._images_dirname, 'sep_vs_final_overfit.png')\n", (15453, 15503), False, 'import os\n'), ((16008, 16042), 'os.path.join', 'os.path.join', (['dirname', '"""summaries"""'], {}), "(dirname, 'summaries')\n", (16020, 16042), False, 'import os\n'), ((16059, 16084), 'os.listdir', 'os.listdir', (['self._dirname'], {}), '(self._dirname)\n', (16069, 16084), False, 'import os\n'), ((22027, 22072), 'os.path.join', 'os.path.join', (['dirname', '"""summaries"""', '"""reports"""'], {}), "(dirname, 'summaries', 'reports')\n", (22039, 22072), False, 'import os\n'), ((22157, 22191), 'os.path.join', 'os.path.join', (['dirname', 'report_name'], {}), '(dirname, report_name)\n', (22169, 22191), False, 'import os\n'), ((22219, 22256), 'os.path.join', 'os.path.join', (['self._dirname', '"""images"""'], {}), "(self._dirname, 'images')\n", (22231, 22256), False, 'import os\n'), ((22282, 22322), 'os.path.join', 'os.path.join', (['self._dirname', 'report_name'], {}), '(self._dirname, report_name)\n', (22294, 22322), False, 'import os\n'), ((22533, 22584), 'cycler.cycler', 'cycler', ([], {'y': '[0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72]'}), '(y=[0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72])\n', (22539, 22584), False, 'from cycler import cycler\n'), ((22613, 22645), 'cycler.cycler', 'cycler', ([], {'color': "['black', 'black']"}), "(color=['black', 'black'])\n", (22619, 22645), False, 'from cycler import cycler\n'), ((23233, 23283), 'pylatex.Document', 'tex.Document', (['self._pdf_filename'], {'font_size': '"""tiny"""'}), "(self._pdf_filename, font_size='tiny')\n", (23245, 23283), True, 'import pylatex as tex\n'), ((24029, 24040), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24038, 24040), True, 'import matplotlib.pyplot as plt\n'), ((24521, 24532), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24530, 24532), True, 'import matplotlib.pyplot as plt\n'), ((27024, 27035), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27033, 27035), True, 'import matplotlib.pyplot as plt\n'), ((27433, 27444), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27442, 27444), True, 'import matplotlib.pyplot as plt\n'), ((34291, 34302), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (34300, 34302), True, 'import matplotlib.pyplot as plt\n'), ((34998, 35009), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (35007, 35009), True, 'import matplotlib.pyplot as plt\n'), ((35840, 35890), 'pylatex.Document', 'tex.Document', (['self._pdf_filename'], {'font_size': '"""tiny"""'}), "(self._pdf_filename, font_size='tiny')\n", (35852, 35890), True, 'import pylatex as tex\n'), ((36755, 36769), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (36767, 36769), True, 'import matplotlib.pyplot as plt\n'), ((36781, 36787), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (36785, 36787), False, 'from simulator.plot import Plot\n'), ((39332, 39390), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""train_test_error.png"""'], {}), "(self._images_dirname, 'train_test_error.png')\n", (39344, 39390), False, 'import os\n'), ((39395, 39437), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (39406, 39437), True, 'import matplotlib.pyplot as plt\n'), ((39683, 39697), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (39695, 39697), True, 'import matplotlib.pyplot as plt\n'), ((39709, 39715), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (39713, 39715), False, 'from simulator.plot import Plot\n'), ((43469, 43511), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (43480, 43511), True, 'import matplotlib.pyplot as plt\n'), ((43918, 43932), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (43930, 43932), True, 'import matplotlib.pyplot as plt\n'), ((43944, 43950), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (43948, 43950), False, 'from simulator.plot import Plot\n'), ((48330, 48344), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (48342, 48344), True, 'import matplotlib.pyplot as plt\n'), ((48356, 48362), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (48360, 48362), False, 'from simulator.plot import Plot\n'), ((52317, 52359), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (52328, 52359), True, 'import matplotlib.pyplot as plt\n'), ((52693, 52735), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (52704, 52735), True, 'import matplotlib.pyplot as plt\n'), ((53103, 53145), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (53114, 53145), True, 'import matplotlib.pyplot as plt\n'), ((53446, 53488), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (53457, 53488), True, 'import matplotlib.pyplot as plt\n'), ((53685, 53713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (53695, 53713), True, 'import matplotlib.pyplot as plt\n'), ((54491, 54522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'labelpad': '(20)'}), '(xlabel, labelpad=20)\n', (54501, 54522), True, 'import matplotlib.pyplot as plt\n'), ((54527, 54550), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error Gap"""'], {}), "('Error Gap')\n", (54537, 54550), True, 'import matplotlib.pyplot as plt\n'), ((54628, 54640), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (54638, 54640), True, 'import matplotlib.pyplot as plt\n'), ((54788, 54830), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (54799, 54830), True, 'import matplotlib.pyplot as plt\n'), ((55128, 55170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (55139, 55170), True, 'import matplotlib.pyplot as plt\n'), ((55365, 55393), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (55375, 55393), True, 'import matplotlib.pyplot as plt\n'), ((56169, 56200), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'labelpad': '(20)'}), '(xlabel, labelpad=20)\n', (56179, 56200), True, 'import matplotlib.pyplot as plt\n'), ((56205, 56227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss Gap"""'], {}), "('Loss Gap')\n", (56215, 56227), True, 'import matplotlib.pyplot as plt\n'), ((56305, 56317), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (56315, 56317), True, 'import matplotlib.pyplot as plt\n'), ((56464, 56506), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (56475, 56506), True, 'import matplotlib.pyplot as plt\n'), ((56823, 56837), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (56835, 56837), True, 'import matplotlib.pyplot as plt\n'), ((56849, 56855), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (56853, 56855), False, 'from simulator.plot import Plot\n'), ((60573, 60615), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (60584, 60615), True, 'import matplotlib.pyplot as plt\n'), ((60720, 60734), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (60732, 60734), True, 'import matplotlib.pyplot as plt\n'), ((60746, 60752), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (60750, 60752), False, 'from simulator.plot import Plot\n'), ((61334, 61348), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (61346, 61348), True, 'import matplotlib.pyplot as plt\n'), ((61360, 61366), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (61364, 61366), False, 'from simulator.plot import Plot\n'), ((63901, 63958), 'os.path.join', 'os.path.join', (['self._images_dirname', '"""train_test_loss.png"""'], {}), "(self._images_dirname, 'train_test_loss.png')\n", (63913, 63958), False, 'import os\n'), ((63963, 64005), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (63974, 64005), True, 'import matplotlib.pyplot as plt\n'), ((64136, 64150), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (64148, 64150), True, 'import matplotlib.pyplot as plt\n'), ((64162, 64168), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (64166, 64168), False, 'from simulator.plot import Plot\n'), ((66687, 66729), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (66698, 66729), True, 'import matplotlib.pyplot as plt\n'), ((66859, 66873), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (66871, 66873), True, 'import matplotlib.pyplot as plt\n'), ((66885, 66891), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (66889, 66891), False, 'from simulator.plot import Plot\n'), ((69403, 69445), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (69414, 69445), True, 'import matplotlib.pyplot as plt\n'), ((69659, 69701), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (69670, 69701), True, 'import matplotlib.pyplot as plt\n'), ((69924, 69966), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (69935, 69966), True, 'import matplotlib.pyplot as plt\n'), ((70280, 70320), 'os.path.join', 'os.path.join', (['self._images_dirname', 'name'], {}), '(self._images_dirname, name)\n', (70292, 70320), False, 'import os\n'), ((70326, 70368), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (70337, 70368), True, 'import matplotlib.pyplot as plt\n'), ((70967, 71007), 'os.path.join', 'os.path.join', (['self._images_dirname', 'name'], {}), '(self._images_dirname, name)\n', (70979, 71007), False, 'import os\n'), ((71013, 71055), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (71024, 71055), True, 'import matplotlib.pyplot as plt\n'), ((71900, 71975), 'pylatex.Tabular', 'tex.Tabular', (['table_spec'], {'pos': 'self._position', 'booktabs': '(False)', 'row_height': '(0.1)'}), '(table_spec, pos=self._position, booktabs=False, row_height=0.1)\n', (71911, 71975), True, 'import pylatex as tex\n'), ((74102, 74142), 'os.path.join', 'os.path.join', (['dirname', '"""summaries"""', 'name'], {}), "(dirname, 'summaries', name)\n", (74114, 74142), False, 'import os\n'), ((74325, 74372), 'os.path.join', 'os.path.join', (['self._dirname', '"""description.json"""'], {}), "(self._dirname, 'description.json')\n", (74337, 74372), False, 'import os\n'), ((78235, 78255), 'numpy.mean', 'np.mean', (['err_differs'], {}), '(err_differs)\n', (78242, 78255), True, 'import numpy as np\n'), ((78277, 78296), 'numpy.std', 'np.std', (['err_differs'], {}), '(err_differs)\n', (78283, 78296), True, 'import numpy as np\n'), ((78439, 78465), 'numpy.mean', 'np.mean', (['err_final_differs'], {}), '(err_final_differs)\n', (78446, 78465), True, 'import numpy as np\n'), ((78511, 78536), 'numpy.std', 'np.std', (['err_final_differs'], {}), '(err_final_differs)\n', (78517, 78536), True, 'import numpy as np\n'), ((91175, 91189), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (91187, 91189), True, 'import matplotlib.pyplot as plt\n'), ((91201, 91207), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (91205, 91207), False, 'from simulator.plot import Plot\n'), ((91582, 91596), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (91594, 91596), True, 'import matplotlib.pyplot as plt\n'), ((91608, 91614), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (91612, 91614), False, 'from simulator.plot import Plot\n'), ((93324, 93338), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (93336, 93338), True, 'import matplotlib.pyplot as plt\n'), ((93350, 93356), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (93354, 93356), False, 'from simulator.plot import Plot\n'), ((95704, 95710), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (95708, 95710), False, 'from simulator.plot import Plot\n'), ((95725, 95739), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (95737, 95739), True, 'import matplotlib.pyplot as plt\n'), ((98329, 98343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (98341, 98343), True, 'import matplotlib.pyplot as plt\n'), ((98355, 98361), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (98359, 98361), False, 'from simulator.plot import Plot\n'), ((99764, 99778), 'numpy.mean', 'np.mean', (['means'], {}), '(means)\n', (99771, 99778), True, 'import numpy as np\n'), ((99792, 99808), 'numpy.mean', 'np.mean', (['stddevs'], {}), '(stddevs)\n', (99799, 99808), True, 'import numpy as np\n'), ((103476, 103490), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (103488, 103490), True, 'import matplotlib.pyplot as plt\n'), ((103502, 103508), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (103506, 103508), False, 'from simulator.plot import Plot\n'), ((103797, 103982), 'seaborn.distplot', 'seaborn.distplot', (['tostay_probas'], {'kde': '(True)', 'hist': '(True)', 'norm_hist': '(True)', 'bins': 'bins', 'hist_kws': "{'edgecolor': 'black'}", 'kde_kws': "{'linewidth': 4}", 'label': '"""Next State is Current State"""'}), "(tostay_probas, kde=True, hist=True, norm_hist=True, bins=\n bins, hist_kws={'edgecolor': 'black'}, kde_kws={'linewidth': 4}, label=\n 'Next State is Current State')\n", (103813, 103982), False, 'import seaborn\n'), ((104017, 104203), 'seaborn.distplot', 'seaborn.distplot', (['tohigher_probas'], {'kde': '(True)', 'hist': '(True)', 'norm_hist': '(True)', 'bins': 'bins', 'hist_kws': "{'edgecolor': 'black'}", 'kde_kws': "{'linewidth': 4}", 'label': '"""Next State is Higher State"""'}), "(tohigher_probas, kde=True, hist=True, norm_hist=True, bins\n =bins, hist_kws={'edgecolor': 'black'}, kde_kws={'linewidth': 4}, label\n ='Next State is Higher State')\n", (104033, 104203), False, 'import seaborn\n'), ((104238, 104422), 'seaborn.distplot', 'seaborn.distplot', (['tolower_probas'], {'kde': '(True)', 'hist': '(True)', 'norm_hist': '(True)', 'bins': 'bins', 'hist_kws': "{'edgecolor': 'black'}", 'kde_kws': "{'linewidth': 4}", 'label': '"""Next State is Lower State"""'}), "(tolower_probas, kde=True, hist=True, norm_hist=True, bins=\n bins, hist_kws={'edgecolor': 'black'}, kde_kws={'linewidth': 4}, label=\n 'Next State is Lower State')\n", (104254, 104422), False, 'import seaborn\n'), ((105722, 105738), 'numpy.asarray', 'np.asarray', (['diff'], {}), '(diff)\n', (105732, 105738), True, 'import numpy as np\n'), ((106185, 106197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (106195, 106197), True, 'import matplotlib.pyplot as plt\n'), ((106653, 106673), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (106663, 106673), True, 'import matplotlib.pyplot as plt\n'), ((106678, 106703), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (106688, 106703), True, 'import matplotlib.pyplot as plt\n'), ((106808, 106820), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (106818, 106820), True, 'import matplotlib.pyplot as plt\n'), ((106880, 106894), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (106892, 106894), True, 'import matplotlib.pyplot as plt\n'), ((106906, 106912), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (106910, 106912), False, 'from simulator.plot import Plot\n'), ((107294, 107308), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (107306, 107308), True, 'import matplotlib.pyplot as plt\n'), ((107320, 107326), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (107324, 107326), False, 'from simulator.plot import Plot\n'), ((109608, 109620), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (109618, 109620), True, 'import matplotlib.pyplot as plt\n'), ((110731, 110762), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'labelpad': '(20)'}), '(xlabel, labelpad=20)\n', (110741, 110762), True, 'import matplotlib.pyplot as plt\n'), ((110767, 110786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (110777, 110786), True, 'import matplotlib.pyplot as plt\n'), ((110822, 110834), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (110832, 110834), True, 'import matplotlib.pyplot as plt\n'), ((110839, 110865), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim[0]', 'ylim[1]'], {}), '(ylim[0], ylim[1])\n', (110847, 110865), True, 'import matplotlib.pyplot as plt\n'), ((112992, 113004), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (113002, 113004), True, 'import matplotlib.pyplot as plt\n'), ((114116, 114147), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'labelpad': '(20)'}), '(xlabel, labelpad=20)\n', (114126, 114147), True, 'import matplotlib.pyplot as plt\n'), ((114152, 114170), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (114162, 114170), True, 'import matplotlib.pyplot as plt\n'), ((114206, 114218), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (114216, 114218), True, 'import matplotlib.pyplot as plt\n'), ((114223, 114249), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim[0]', 'ylim[1]'], {}), '(ylim[0], ylim[1])\n', (114231, 114249), True, 'import matplotlib.pyplot as plt\n'), ((116608, 116620), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (116618, 116620), True, 'import matplotlib.pyplot as plt\n'), ((117261, 117292), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'labelpad': '(20)'}), '(xlabel, labelpad=20)\n', (117271, 117292), True, 'import matplotlib.pyplot as plt\n'), ((117297, 117320), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error Gap"""'], {}), "('Error Gap')\n", (117307, 117320), True, 'import matplotlib.pyplot as plt\n'), ((117398, 117410), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (117408, 117410), True, 'import matplotlib.pyplot as plt\n'), ((119641, 119653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (119651, 119653), True, 'import matplotlib.pyplot as plt\n'), ((120287, 120318), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'labelpad': '(20)'}), '(xlabel, labelpad=20)\n', (120297, 120318), True, 'import matplotlib.pyplot as plt\n'), ((120323, 120345), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss Gap"""'], {}), "('Loss Gap')\n", (120333, 120345), True, 'import matplotlib.pyplot as plt\n'), ((120429, 120441), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (120439, 120441), True, 'import matplotlib.pyplot as plt\n'), ((120516, 120530), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (120528, 120530), True, 'import matplotlib.pyplot as plt\n'), ((120542, 120548), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (120546, 120548), False, 'from simulator.plot import Plot\n'), ((121179, 121193), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (121191, 121193), True, 'import matplotlib.pyplot as plt\n'), ((121205, 121211), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (121209, 121211), False, 'from simulator.plot import Plot\n'), ((122592, 122621), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (122604, 122621), True, 'import matplotlib.pyplot as plt\n'), ((122925, 122952), 'numpy.ceil', 'np.ceil', (['(dsize / batch_size)'], {}), '(dsize / batch_size)\n', (122932, 122952), True, 'import numpy as np\n'), ((124064, 124104), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(25000, n_epochs * epoch_mult)'], {}), '((25000, n_epochs * epoch_mult))\n', (124072, 124104), True, 'import matplotlib.pyplot as plt\n'), ((124108, 124148), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xticks', 'xlabels'], {'fontsize': '(23)'}), '(xticks, xlabels, fontsize=23)\n', (124118, 124148), True, 'import matplotlib.pyplot as plt\n'), ((124153, 124193), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yticks', 'ylabels'], {'fontsize': '(23)'}), '(yticks, ylabels, fontsize=23)\n', (124163, 124193), True, 'import matplotlib.pyplot as plt\n'), ((124835, 124849), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (124847, 124849), True, 'import matplotlib.pyplot as plt\n'), ((124861, 124867), 'simulator.plot.Plot', 'Plot', ([], {}), '()\n', (124865, 124867), False, 'from simulator.plot import Plot\n'), ((126488, 126505), 'numpy.asarray', 'np.asarray', (['lhs_x'], {}), '(lhs_x)\n', (126498, 126505), True, 'import numpy as np\n'), ((126507, 126524), 'numpy.asarray', 'np.asarray', (['lhs_y'], {}), '(lhs_y)\n', (126517, 126524), True, 'import numpy as np\n'), ((126542, 126559), 'numpy.asarray', 'np.asarray', (['rhs_x'], {}), '(rhs_x)\n', (126552, 126559), True, 'import numpy as np\n'), ((126561, 126578), 'numpy.asarray', 'np.asarray', (['rhs_y'], {}), '(rhs_y)\n', (126571, 126578), True, 'import numpy as np\n'), ((126970, 126987), 'numpy.asarray', 'np.asarray', (['res_x'], {}), '(res_x)\n', (126980, 126987), True, 'import numpy as np\n'), ((126989, 127006), 'numpy.asarray', 'np.asarray', (['res_y'], {}), '(res_y)\n', (126999, 127006), True, 'import numpy as np\n'), ((1905, 1928), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (1919, 1928), False, 'import os\n'), ((1936, 1956), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (1947, 1956), False, 'import os\n'), ((2025, 2054), 'os.path.exists', 'os.path.exists', (['self._dirname'], {}), '(self._dirname)\n', (2039, 2054), False, 'import os\n'), ((2062, 2088), 'os.makedirs', 'os.makedirs', (['self._dirname'], {}), '(self._dirname)\n', (2073, 2088), False, 'import os\n'), ((2309, 2345), 'os.path.exists', 'os.path.exists', (['self._images_dirname'], {}), '(self._images_dirname)\n', (2323, 2345), False, 'import os\n'), ((2353, 2386), 'os.makedirs', 'os.makedirs', (['self._images_dirname'], {}), '(self._images_dirname)\n', (2364, 2386), False, 'import os\n'), ((15961, 15986), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15976, 15986), False, 'import os\n'), ((21986, 22011), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (22001, 22011), False, 'import os\n'), ((22084, 22107), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (22098, 22107), False, 'import os\n'), ((22115, 22135), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (22126, 22135), False, 'import os\n'), ((22372, 22408), 'os.path.exists', 'os.path.exists', (['self._images_dirname'], {}), '(self._images_dirname)\n', (22386, 22408), False, 'import os\n'), ((22416, 22449), 'os.makedirs', 'os.makedirs', (['self._images_dirname'], {}), '(self._images_dirname)\n', (22427, 22449), False, 'import os\n'), ((23350, 23365), 'pylatex.LineBreak', 'tex.LineBreak', ([], {}), '()\n', (23363, 23365), True, 'import pylatex as tex\n'), ((25154, 25195), 'matplotlib.pyplot.savefig', 'plt.savefig', (['imgpath'], {'bbox_inches': '"""tight"""'}), "(imgpath, bbox_inches='tight')\n", (25165, 25195), True, 'import matplotlib.pyplot as plt\n'), ((25202, 25213), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25211, 25213), True, 'import matplotlib.pyplot as plt\n'), ((25435, 25454), 'pylatex.basic.NewPage', 'tex.basic.NewPage', ([], {}), '()\n', (25452, 25454), True, 'import pylatex as tex\n'), ((26025, 26036), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26034, 26036), True, 'import matplotlib.pyplot as plt\n'), ((26538, 26549), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26547, 26549), True, 'import matplotlib.pyplot as plt\n'), ((28154, 28165), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28163, 28165), True, 'import matplotlib.pyplot as plt\n'), ((28796, 28807), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28805, 28807), True, 'import matplotlib.pyplot as plt\n'), ((28824, 28843), 'pylatex.basic.NewPage', 'tex.basic.NewPage', ([], {}), '()\n', (28841, 28843), True, 'import pylatex as tex\n'), ((29319, 29330), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (29328, 29330), True, 'import matplotlib.pyplot as plt\n'), ((29803, 29814), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (29812, 29814), True, 'import matplotlib.pyplot as plt\n'), ((31158, 31177), 'pylatex.basic.NewPage', 'tex.basic.NewPage', ([], {}), '()\n', (31175, 31177), True, 'import pylatex as tex\n'), ((35957, 35972), 'pylatex.LineBreak', 'tex.LineBreak', ([], {}), '()\n', (35970, 35972), True, 'import pylatex as tex\n'), ((36325, 36336), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (36334, 36336), True, 'import matplotlib.pyplot as plt\n'), ((36621, 36632), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (36630, 36632), True, 'import matplotlib.pyplot as plt\n'), ((47978, 48020), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {'bbox_inches': '"""tight"""'}), "(img_path, bbox_inches='tight')\n", (47989, 48020), True, 'import matplotlib.pyplot as plt\n'), ((51018, 51029), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (51026, 51029), True, 'import numpy as np\n'), ((51040, 51051), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (51048, 51051), True, 'import numpy as np\n'), ((71425, 71440), 'pylatex.LineBreak', 'tex.LineBreak', ([], {}), '()\n', (71438, 71440), True, 'import pylatex as tex\n'), ((74033, 74058), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (74048, 74058), False, 'import os\n'), ((74517, 74547), 'os.path.join', 'os.path.join', (['self._dirname', 'f'], {}), '(self._dirname, f)\n', (74529, 74547), False, 'import os\n'), ((74813, 74826), 'json.load', 'json.load', (['fo'], {}), '(fo)\n', (74822, 74826), False, 'import json\n'), ((78153, 78169), 'numpy.mean', 'np.mean', (['reps[r]'], {}), '(reps[r])\n', (78160, 78169), True, 'import numpy as np\n'), ((79869, 79884), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (79876, 79884), True, 'import numpy as np\n'), ((79902, 79916), 'numpy.std', 'np.std', (['losses'], {}), '(losses)\n', (79908, 79916), True, 'import numpy as np\n'), ((79934, 79948), 'numpy.mean', 'np.mean', (['diffs'], {}), '(diffs)\n', (79941, 79948), True, 'import numpy as np\n'), ((79966, 79979), 'numpy.std', 'np.std', (['diffs'], {}), '(diffs)\n', (79972, 79979), True, 'import numpy as np\n'), ((79997, 80011), 'numpy.mean', 'np.mean', (['steps'], {}), '(steps)\n', (80004, 80011), True, 'import numpy as np\n'), ((80029, 80042), 'numpy.std', 'np.std', (['steps'], {}), '(steps)\n', (80035, 80042), True, 'import numpy as np\n'), ((87182, 87198), 'numpy.mean', 'np.mean', (['accepts'], {}), '(accepts)\n', (87189, 87198), True, 'import numpy as np\n'), ((87238, 87253), 'numpy.std', 'np.std', (['accepts'], {}), '(accepts)\n', (87244, 87253), True, 'import numpy as np\n'), ((89196, 89215), 'numpy.mean', 'np.mean', (['mix_ratios'], {}), '(mix_ratios)\n', (89203, 89215), True, 'import numpy as np\n'), ((89250, 89271), 'numpy.mean', 'np.mean', (['visit_ratios'], {}), '(visit_ratios)\n', (89257, 89271), True, 'import numpy as np\n'), ((89308, 89326), 'numpy.std', 'np.std', (['mix_ratios'], {}), '(mix_ratios)\n', (89314, 89326), True, 'import numpy as np\n'), ((89365, 89385), 'numpy.std', 'np.std', (['visit_ratios'], {}), '(visit_ratios)\n', (89371, 89385), True, 'import numpy as np\n'), ((95771, 95786), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (95783, 95786), True, 'import numpy as np\n'), ((99549, 99737), 'seaborn.distplot', 'seaborn.distplot', (['noise_level_loss'], {'kde': '(True)', 'hist': '(True)', 'norm_hist': '(True)', 'bins': 'bins', 'hist_kws': "{'edgecolor': 'black'}", 'kde_kws': "{'linewidth': 4}", 'label': 'label', 'fit': 'fitting_distribution'}), "(noise_level_loss, kde=True, hist=True, norm_hist=True,\n bins=bins, hist_kws={'edgecolor': 'black'}, kde_kws={'linewidth': 4},\n label=label, fit=fitting_distribution)\n", (99565, 99737), False, 'import seaborn\n'), ((106319, 106344), 'numpy.zeros_like', 'np.zeros_like', (['noise_vals'], {}), '(noise_vals)\n', (106332, 106344), True, 'import numpy as np\n'), ((109861, 109887), 'numpy.asarray', 'np.asarray', (["result['test']"], {}), "(result['test'])\n", (109871, 109887), True, 'import numpy as np\n'), ((110299, 110329), 'numpy.asanyarray', 'np.asanyarray', (["result['train']"], {}), "(result['train'])\n", (110312, 110329), True, 'import numpy as np\n'), ((113244, 113270), 'numpy.asarray', 'np.asarray', (["result['test']"], {}), "(result['test'])\n", (113254, 113270), True, 'import numpy as np\n'), ((113681, 113711), 'numpy.asanyarray', 'np.asanyarray', (["result['train']"], {}), "(result['train'])\n", (113694, 113711), True, 'import numpy as np\n'), ((116257, 116283), 'numpy.asarray', 'np.asarray', (["result['test']"], {}), "(result['test'])\n", (116267, 116283), True, 'import numpy as np\n'), ((116286, 116313), 'numpy.asarray', 'np.asarray', (["result['train']"], {}), "(result['train'])\n", (116296, 116313), True, 'import numpy as np\n'), ((119295, 119321), 'numpy.asarray', 'np.asarray', (["result['test']"], {}), "(result['test'])\n", (119305, 119321), True, 'import numpy as np\n'), ((119324, 119351), 'numpy.asarray', 'np.asarray', (["result['train']"], {}), "(result['train'])\n", (119334, 119351), True, 'import numpy as np\n'), ((122229, 122247), 'numpy.argmin', 'np.argmin', (['results'], {}), '(results)\n', (122238, 122247), True, 'import numpy as np\n'), ((126303, 126324), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (126309, 126324), True, 'import numpy as np\n'), ((1789, 1814), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1804, 1814), False, 'import os\n'), ((3179, 3214), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (3189, 3214), True, 'import pylatex as tex\n'), ((3419, 3454), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (3429, 3454), True, 'import pylatex as tex\n'), ((3667, 3702), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (3677, 3702), True, 'import pylatex as tex\n'), ((3903, 3938), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (3913, 3938), True, 'import pylatex as tex\n'), ((4134, 4169), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (4144, 4169), True, 'import pylatex as tex\n'), ((4347, 4382), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (4357, 4382), True, 'import pylatex as tex\n'), ((4562, 4597), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (4572, 4597), True, 'import pylatex as tex\n'), ((4774, 4809), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (4784, 4809), True, 'import pylatex as tex\n'), ((23496, 23531), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (23506, 23531), True, 'import pylatex as tex\n'), ((24062, 24097), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (24072, 24097), True, 'import pylatex as tex\n'), ((24640, 24675), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (24650, 24675), True, 'import pylatex as tex\n'), ((26647, 26682), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (26657, 26682), True, 'import pylatex as tex\n'), ((27057, 27092), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (27067, 27092), True, 'import pylatex as tex\n'), ((33834, 33869), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (33844, 33869), True, 'import pylatex as tex\n'), ((34324, 34359), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (34334, 34359), True, 'import pylatex as tex\n'), ((43116, 43144), 'json.dumps', 'json.dumps', (['added_noise_keys'], {}), '(added_noise_keys)\n', (43126, 43144), False, 'import json\n'), ((51655, 51680), 'numpy.where', 'np.where', (['(x1 <= epoch_lim)'], {}), '(x1 <= epoch_lim)\n', (51663, 51680), True, 'import numpy as np\n'), ((60221, 60249), 'json.dumps', 'json.dumps', (['added_noise_keys'], {}), '(added_noise_keys)\n', (60231, 60249), False, 'import json\n'), ((71283, 71298), 'pylatex.LineBreak', 'tex.LineBreak', ([], {}), '()\n', (71296, 71298), True, 'import pylatex as tex\n'), ((71393, 71408), 'pylatex.LineBreak', 'tex.LineBreak', ([], {}), '()\n', (71406, 71408), True, 'import pylatex as tex\n'), ((77577, 77594), 'numpy.argmin', 'np.argmin', (['test_y'], {}), '(test_y)\n', (77586, 77594), True, 'import numpy as np\n'), ((79608, 79635), 'numpy.argmin', 'np.argmin', (['candidate_losses'], {}), '(candidate_losses)\n', (79617, 79635), True, 'import numpy as np\n'), ((99142, 99167), 'numpy.mean', 'np.mean', (['noise_level_loss'], {}), '(noise_level_loss)\n', (99149, 99167), True, 'import numpy as np\n'), ((99190, 99214), 'numpy.std', 'np.std', (['noise_level_loss'], {}), '(noise_level_loss)\n', (99196, 99214), True, 'import numpy as np\n'), ((102695, 102738), 'numpy.exp', 'np.exp', (['(proba_coeff * (li - lj) * (bi - bj))'], {}), '(proba_coeff * (li - lj) * (bi - bj))\n', (102701, 102738), True, 'import numpy as np\n'), ((103158, 103201), 'numpy.exp', 'np.exp', (['(proba_coeff * (li - lj) * (bi - bj))'], {}), '(proba_coeff * (li - lj) * (bi - bj))\n', (103164, 103201), True, 'import numpy as np\n'), ((106427, 106449), 'numpy.ones_like', 'np.ones_like', (['err_vals'], {}), '(err_vals)\n', (106439, 106449), True, 'import numpy as np\n'), ((106539, 106559), 'numpy.ones_like', 'np.ones_like', (['x_diff'], {}), '(x_diff)\n', (106551, 106559), True, 'import numpy as np\n'), ((121684, 121693), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (121691, 121693), True, 'import matplotlib.pyplot as plt\n'), ((125884, 125899), 'numpy.mean', 'np.mean', (['probas'], {}), '(probas)\n', (125891, 125899), True, 'import numpy as np\n'), ((125901, 125915), 'numpy.std', 'np.std', (['probas'], {}), '(probas)\n', (125907, 125915), True, 'import numpy as np\n'), ((2199, 2229), 'os.path.join', 'os.path.join', (['self._dirname', 'f'], {}), '(self._dirname, f)\n', (2211, 2229), False, 'import os\n'), ((25590, 25625), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (25600, 25625), True, 'import pylatex as tex\n'), ((26104, 26139), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (26114, 26139), True, 'import pylatex as tex\n'), ((26919, 26949), 'pylatex.utils.escape_latex', 'tex.utils.escape_latex', (['""" \n """'], {}), "(' \\n ')\n", (26941, 26949), True, 'import pylatex as tex\n'), ((27328, 27358), 'pylatex.utils.escape_latex', 'tex.utils.escape_latex', (['""" \n """'], {}), "(' \\n ')\n", (27350, 27358), True, 'import pylatex as tex\n'), ((27587, 27622), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (27597, 27622), True, 'import pylatex as tex\n'), ((28233, 28268), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (28243, 28268), True, 'import pylatex as tex\n'), ((28986, 29021), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (28996, 29021), True, 'import pylatex as tex\n'), ((29464, 29499), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (29474, 29499), True, 'import pylatex as tex\n'), ((30351, 30362), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30360, 30362), True, 'import matplotlib.pyplot as plt\n'), ((33801, 33812), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (33810, 33812), True, 'import matplotlib.pyplot as plt\n'), ((36061, 36096), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (36071, 36096), True, 'import pylatex as tex\n'), ((36360, 36395), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (36370, 36395), True, 'import pylatex as tex\n'), ((51102, 51126), 'numpy.where', 'np.where', (['(x <= epoch_lim)'], {}), '(x <= epoch_lim)\n', (51110, 51126), True, 'import numpy as np\n'), ((54450, 54481), 'scipy.stats.pearsonr', 'pearsonr', (['y_gap', "result['diff']"], {}), "(y_gap, result['diff'])\n", (54458, 54481), False, 'from scipy.stats import pearsonr\n'), ((56128, 56159), 'scipy.stats.pearsonr', 'pearsonr', (['y_gap', "result['diff']"], {}), "(y_gap, result['diff'])\n", (56136, 56159), False, 'from scipy.stats import pearsonr\n'), ((73212, 73277), 'simulator.simulator_utils.get_value_from_name', 's_utils.get_value_from_name', (['se._original_name', '"""train_data_size"""'], {}), "(se._original_name, 'train_data_size')\n", (73239, 73277), True, 'import simulator.simulator_utils as s_utils\n'), ((74178, 74203), 'os.listdir', 'os.listdir', (['self._dirname'], {}), '(self._dirname)\n', (74188, 74203), False, 'import os\n'), ((77927, 77965), 'numpy.mean', 'np.mean', (['train_y[-s_utils.TRAIN_FREQ:]'], {}), '(train_y[-s_utils.TRAIN_FREQ:])\n', (77934, 77965), True, 'import numpy as np\n'), ((79275, 79293), 'numpy.asarray', 'np.asarray', (['y_loss'], {}), '(y_loss)\n', (79285, 79293), True, 'import numpy as np\n'), ((79661, 79689), 'numpy.asarray', 'np.asarray', (['candidate_losses'], {}), '(candidate_losses)\n', (79671, 79689), True, 'import numpy as np\n'), ((99990, 100023), 'inspect.getsource', 'inspect.getsource', (['transformation'], {}), '(transformation)\n', (100007, 100023), False, 'import inspect\n'), ((110680, 110720), 'scipy.stats.pearsonr', 'pearsonr', (["result['test']", "result['diff']"], {}), "(result['test'], result['diff'])\n", (110688, 110720), False, 'from scipy.stats import pearsonr\n'), ((114065, 114105), 'scipy.stats.pearsonr', 'pearsonr', (["result['test']", "result['diff']"], {}), "(result['test'], result['diff'])\n", (114073, 114105), False, 'from scipy.stats import pearsonr\n'), ((117219, 117250), 'scipy.stats.pearsonr', 'pearsonr', (['y_gap', "result['diff']"], {}), "(y_gap, result['diff'])\n", (117227, 117250), False, 'from scipy.stats import pearsonr\n'), ((120245, 120276), 'scipy.stats.pearsonr', 'pearsonr', (['y_gap', "result['diff']"], {}), "(y_gap, result['diff'])\n", (120253, 120276), False, 'from scipy.stats import pearsonr\n'), ((125795, 125828), 'numpy.exp', 'np.exp', (['(pairs_coeffs[p] * exp_arg)'], {}), '(pairs_coeffs[p] * exp_arg)\n', (125801, 125828), True, 'import numpy as np\n'), ((30033, 30068), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (30043, 30068), True, 'import pylatex as tex\n'), ((31074, 31085), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (31083, 31085), True, 'import matplotlib.pyplot as plt\n'), ((31793, 31836), 'os.path.join', 'os.path.join', (['self._images_dirname', 'imgname'], {}), '(self._images_dirname, imgname)\n', (31805, 31836), False, 'import os\n'), ((31884, 31925), 'matplotlib.pyplot.savefig', 'plt.savefig', (['imgpath'], {'bbox_inches': '"""tight"""'}), "(imgpath, bbox_inches='tight')\n", (31895, 31925), True, 'import matplotlib.pyplot as plt\n'), ((32176, 32187), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (32185, 32187), True, 'import matplotlib.pyplot as plt\n'), ((33026, 33061), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (33036, 33061), True, 'import pylatex as tex\n'), ((74648, 74663), 'pickle.load', 'pickle.load', (['fo'], {}), '(fo)\n', (74659, 74663), False, 'import pickle\n'), ((87135, 87145), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (87142, 87145), True, 'import numpy as np\n'), ((88718, 88769), 'numpy.mean', 'np.mean', (['[(1 if reps[x] != 0 else 0) for x in reps]'], {}), '([(1 if reps[x] != 0 else 0) for x in reps])\n', (88725, 88769), True, 'import numpy as np\n'), ((30583, 30618), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (30593, 30618), True, 'import pylatex as tex\n'), ((31585, 31620), 'pylatex.Figure', 'tex.Figure', ([], {'position': 'self._position'}), '(position=self._position)\n', (31595, 31620), True, 'import pylatex as tex\n'), ((83316, 83328), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (83325, 83328), True, 'import numpy as np\n'), ((83464, 83476), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (83473, 83476), True, 'import numpy as np\n'), ((83617, 83629), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (83626, 83629), True, 'import numpy as np\n'), ((110575, 110600), 'scipy.stats.pearsonr', 'pearsonr', (['y_train', 'y_diff'], {}), '(y_train, y_diff)\n', (110583, 110600), False, 'from scipy.stats import pearsonr\n'), ((113960, 113985), 'scipy.stats.pearsonr', 'pearsonr', (['y_train', 'y_diff'], {}), '(y_train, y_diff)\n', (113968, 113985), False, 'from scipy.stats import pearsonr\n')] |
from datetime import datetime, timedelta, timezone
from enum import Enum
import logging
import pickle
from typing import Collection, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Union
import aiomcache
import numpy as np
import pandas as pd
import sentry_sdk
from sqlalchemy import and_, desc, insert, outerjoin, select, union_all
from sqlalchemy.dialects.postgresql import insert as postgres_insert
from sqlalchemy.orm.attributes import InstrumentedAttribute
from athenian.api import metadata
from athenian.api.async_utils import gather, read_sql_query
from athenian.api.cache import cached, middle_term_exptime, short_term_exptime
from athenian.api.controllers.logical_repos import drop_logical_repo
from athenian.api.controllers.miners.github.branches import BranchMiner, load_branch_commit_dates
from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, \
extract_subdag, join_dags, partition_dag, searchsorted_inrange
from athenian.api.controllers.miners.types import DAG as DAGStruct
from athenian.api.controllers.prefixer import Prefixer
from athenian.api.db import add_pdb_hits, add_pdb_misses, Database, DatabaseLike
from athenian.api.defer import defer
from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, \
PushCommit, Release, User
from athenian.api.models.precomputed.models import GitHubCommitHistory
from athenian.api.tracing import sentry_span
class FilterCommitsProperty(Enum):
"""Primary commit filter modes."""
NO_PR_MERGES = "no_pr_merges"
BYPASSING_PRS = "bypassing_prs"
# hashes, vertex offsets in edges, edge indexes
DAG = Tuple[np.ndarray, np.ndarray, np.ndarray]
@sentry_span
@cached(
exptime=short_term_exptime,
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda prop, date_from, date_to, repos, with_author, with_committer, only_default_branch, **kwargs: # noqa
(
prop.value,
date_from.timestamp(), date_to.timestamp(),
",".join(sorted(repos)),
",".join(sorted(with_author)) if with_author else "",
",".join(sorted(with_committer)) if with_committer else "",
"" if kwargs.get("columns") is None else ",".join(c.name for c in kwargs["columns"]),
only_default_branch,
),
)
async def extract_commits(prop: FilterCommitsProperty,
date_from: datetime,
date_to: datetime,
repos: Collection[str],
with_author: Optional[Collection[str]],
with_committer: Optional[Collection[str]],
only_default_branch: bool,
branch_miner: Optional[BranchMiner],
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: DatabaseLike,
pdb: DatabaseLike,
cache: Optional[aiomcache.Client],
columns: Optional[List[InstrumentedAttribute]] = None,
) -> pd.DataFrame:
"""Fetch commits that satisfy the given filters."""
assert isinstance(date_from, datetime)
assert isinstance(date_to, datetime)
log = logging.getLogger("%s.extract_commits" % metadata.__package__)
sql_filters = [
PushCommit.acc_id.in_(meta_ids),
PushCommit.committed_date.between(date_from, date_to),
PushCommit.repository_full_name.in_(repos),
PushCommit.committer_email != "<EMAIL>",
]
user_logins = set()
if with_author:
user_logins.update(with_author)
if with_committer:
user_logins.update(with_committer)
if user_logins:
rows = await mdb.fetch_all(
select([User.login, User.node_id])
.where(and_(User.login.in_(user_logins), User.acc_id.in_(meta_ids))))
user_ids = {r[0]: r[1] for r in rows}
del user_logins
else:
user_ids = {}
if with_author:
author_ids = []
for u in with_author:
try:
author_ids.append(user_ids[u])
except KeyError:
continue
sql_filters.append(PushCommit.author_user_id.in_(author_ids))
if with_committer:
committer_ids = []
for u in with_committer:
try:
committer_ids.append(user_ids[u])
except KeyError:
continue
sql_filters.append(PushCommit.committer_user_id.in_(committer_ids))
if columns is None:
cols_query, cols_df = [PushCommit], PushCommit
else:
for col in (PushCommit.node_id, PushCommit.repository_full_name, PushCommit.sha):
if col not in columns:
columns.append(col)
cols_query = cols_df = columns
if prop == FilterCommitsProperty.NO_PR_MERGES:
commits_task = read_sql_query(
select(cols_query).where(and_(*sql_filters)), mdb, cols_df)
elif prop == FilterCommitsProperty.BYPASSING_PRS:
commits_task = read_sql_query(
select(cols_query)
.select_from(outerjoin(
PushCommit, NodePullRequestCommit,
and_(PushCommit.node_id == NodePullRequestCommit.commit_id,
PushCommit.acc_id == NodePullRequestCommit.acc_id)))
.where(and_(NodePullRequestCommit.commit_id.is_(None), *sql_filters)),
mdb, cols_df)
else:
raise AssertionError('Unsupported primary commit filter "%s"' % prop)
tasks = [
commits_task,
fetch_repository_commits_from_scratch(
repos, branch_miner, True, prefixer, account, meta_ids, mdb, pdb, cache),
]
commits, (dags, branches, default_branches) = await gather(*tasks, op="extract_commits/fetch")
candidates_count = len(commits)
if only_default_branch:
commits = _take_commits_in_default_branches(commits, dags, branches, default_branches)
log.info("Removed side branch commits: %d / %d", len(commits), candidates_count)
else:
commits = _remove_force_push_dropped(commits, dags)
log.info("Removed force push dropped commits: %d / %d", len(commits), candidates_count)
for number_prop in (PushCommit.additions, PushCommit.deletions, PushCommit.changed_files):
try:
number_col = commits[number_prop.name]
except KeyError:
continue
nans = commits[PushCommit.node_id.name].take(np.where(number_col.isna())[0])
if not nans.empty:
log.error("[DEV-546] Commits have NULL in %s: %s", number_prop.name, ", ".join(nans))
return commits
def _take_commits_in_default_branches(commits: pd.DataFrame,
dags: Dict[str, DAG],
branches: pd.DataFrame,
default_branches: Dict[str, str],
) -> pd.DataFrame:
if commits.empty:
return commits
branch_repos = branches[Branch.repository_full_name.name].values.astype("U")
branch_names = branches[Branch.branch_name.name].values.astype("U")
branch_repos_names = np.char.add(np.char.add(branch_repos, "|"), branch_names)
default_repos_names = np.array([f"{k}|{v}" for k, v in default_branches.items()], dtype="U")
default_mask = np.in1d(branch_repos_names, default_repos_names, assume_unique=True)
branch_repos = branch_repos[default_mask]
branch_hashes = branches[Branch.commit_sha.name].values[default_mask].astype("S")
repos_order = np.argsort(branch_repos)
branch_repos = branch_repos[repos_order]
branch_hashes = branch_hashes[repos_order]
commit_repos, commit_repo_indexes = np.unique(
commits[PushCommit.repository_full_name.name].values.astype("U"), return_inverse=True)
commit_repos_in_branches_mask = np.in1d(commit_repos, branch_repos, assume_unique=True)
branch_repos_in_commits_mask = np.in1d(branch_repos, commit_repos, assume_unique=True)
branch_repos = branch_repos[branch_repos_in_commits_mask]
branch_hashes = branch_hashes[branch_repos_in_commits_mask]
commit_hashes = commits[PushCommit.sha.name].values.astype("S")
accessible_indexes = []
for repo, head_sha, commit_repo_index in zip(
branch_repos, branch_hashes, np.nonzero(commit_repos_in_branches_mask)[0]):
repo_indexes = np.nonzero(commit_repo_indexes == commit_repo_index)[0]
repo_hashes = commit_hashes[repo_indexes]
default_branch_hashes = extract_subdag(*dags[repo], np.array([head_sha]))[0]
accessible_indexes.append(
repo_indexes[np.in1d(repo_hashes, default_branch_hashes, assume_unique=True)])
if accessible_indexes:
accessible_indexes = np.sort(np.concatenate(accessible_indexes))
return commits.take(accessible_indexes)
def _remove_force_push_dropped(commits: pd.DataFrame, dags: Dict[str, DAG]) -> pd.DataFrame:
if commits.empty:
return commits
repos_order, indexes = np.unique(
commits[PushCommit.repository_full_name.name].values.astype("U"), return_inverse=True)
hashes = commits[PushCommit.sha.name].values.astype("S")
accessible_indexes = []
for i, repo in enumerate(repos_order):
repo_indexes = np.nonzero(indexes == i)[0]
repo_hashes = hashes[repo_indexes]
accessible_indexes.append(
repo_indexes[np.in1d(repo_hashes, dags[repo][0], assume_unique=True)])
accessible_indexes = np.sort(np.concatenate(accessible_indexes))
return commits.take(accessible_indexes)
@sentry_span
@cached(
exptime=middle_term_exptime,
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda repos, branches, columns, prune, **_: (
",".join(sorted(repos)),
",".join(np.sort(
branches[columns[0] if isinstance(columns[0], str) else columns[0].name].values)),
prune,
) if not branches.empty else None,
refresh_on_access=True,
)
async def fetch_repository_commits(repos: Dict[str, DAG],
branches: pd.DataFrame,
columns: Tuple[Union[str, InstrumentedAttribute],
Union[str, InstrumentedAttribute],
Union[str, InstrumentedAttribute],
Union[str, InstrumentedAttribute]],
prune: bool,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, DAG]:
"""
Load full commit DAGs for the given repositories.
:param repos: Map from repository names to their precomputed DAGs.
:param branches: Commits must contain all the existing commits in this DataFrame.
:param columns: Names of the columns in `branches` that correspond to: \
1. Commit hash. \
2. Commit node ID. \
3. Commit timestamp. \
4. Commit repository name.
:param prune: Remove any commits that are not accessible from `branches`.
:return: Map from repository names to their DAGs.
"""
if branches.empty:
if not prune:
return repos
return {key: _empty_dag() for key in repos}
missed_counter = 0
repo_heads = {}
sha_col, id_col, dt_col, repo_col = (c if isinstance(c, str) else c.name for c in columns)
hash_to_id = dict(zip(branches[sha_col].values, branches[id_col].values))
hash_to_dt = dict(zip(branches[sha_col].values, branches[dt_col].values))
result = {}
tasks = []
df_repos = branches[repo_col].values
df_shas = branches[sha_col].values.astype("S40")
unique_repos, index_map, counts = np.unique(df_repos, return_inverse=True, return_counts=True)
repo_order = np.argsort(index_map)
offsets = np.zeros(len(counts) + 1, dtype=int)
np.cumsum(counts, out=offsets[1:])
for i, repo in enumerate(unique_repos):
required_heads = df_shas[repo_order[offsets[i]:offsets[i + 1]]]
repo_heads[repo] = required_heads
try:
hashes, vertexes, edges = repos[drop_logical_repo(repo)]
except KeyError:
# totally OK, `branches` may include repositories from other ForSet-s
continue
if len(hashes) > 0:
found_indexes = searchsorted_inrange(hashes, required_heads)
missed_mask = hashes[found_indexes] != required_heads
missed_counter += missed_mask.sum()
missed_heads = required_heads[missed_mask] # these hashes do not exist in the p-DAG
else:
missed_heads = required_heads
if len(missed_heads) > 0:
# heuristic: order the heads from most to least recent
missed_heads = missed_heads.astype("U40")
order = sorted([(hash_to_dt[h], i) for i, h in enumerate(missed_heads)], reverse=True)
missed_heads = [missed_heads[i] for _, i in order]
missed_ids = [hash_to_id[h] for h in missed_heads]
tasks.append(_fetch_commit_history_dag(
hashes, vertexes, edges, missed_heads, missed_ids, repo, meta_ids, mdb))
else:
if prune:
hashes, vertexes, edges = extract_subdag(hashes, vertexes, edges, required_heads)
result[repo] = hashes, vertexes, edges
# traverse commits starting from the missing branch heads
add_pdb_hits(pdb, "fetch_repository_commits", len(branches) - missed_counter)
add_pdb_misses(pdb, "fetch_repository_commits", missed_counter)
if tasks:
new_dags = await gather(*tasks, op="fetch_repository_commits/mdb")
sql_values = []
for repo, hashes, vertexes, edges in new_dags:
assert (hashes[1:] > hashes[:-1]).all(), repo
sql_values.append(GitHubCommitHistory(
acc_id=account,
repository_full_name=repo,
dag=DAGStruct.from_fields(hashes=hashes, vertexes=vertexes, edges=edges).data,
).create_defaults().explode(with_primary_keys=True))
if prune:
hashes, vertexes, edges = extract_subdag(hashes, vertexes, edges, repo_heads[repo])
result[repo] = hashes, vertexes, edges
if pdb.url.dialect == "postgresql":
sql = postgres_insert(GitHubCommitHistory)
sql = sql.on_conflict_do_update(
constraint=GitHubCommitHistory.__table__.primary_key,
set_={GitHubCommitHistory.dag.name: sql.excluded.dag,
GitHubCommitHistory.updated_at.name: sql.excluded.updated_at})
elif pdb.url.dialect == "sqlite":
sql = insert(GitHubCommitHistory).prefix_with("OR REPLACE")
else:
raise AssertionError("Unsupported database dialect: %s" % pdb.url.dialect)
async def execute():
if pdb.url.dialect == "sqlite":
async with pdb.connection() as pdb_conn:
async with pdb_conn.transaction():
await pdb_conn.execute_many(sql, sql_values)
else:
# don't require a transaction in Postgres, executemany() is atomic in new asyncpg
await pdb.execute_many(sql, sql_values)
await defer(execute(), "fetch_repository_commits/pdb")
for repo, pdag in repos.items():
if repo not in result:
result[repo] = _empty_dag() if prune else pdag
return result
BRANCH_FETCH_COMMITS_COLUMNS = (
Branch.commit_sha, Branch.commit_id, Branch.commit_date, Branch.repository_full_name,
)
RELEASE_FETCH_COMMITS_COLUMNS = (
Release.sha, Release.commit_id, Release.published_at, Release.repository_full_name,
)
COMMIT_FETCH_COMMITS_COLUMNS = (
PushCommit.sha, PushCommit.node_id, PushCommit.committed_date, PushCommit.repository_full_name,
)
@sentry_span
async def fetch_repository_commits_no_branch_dates(
repos: Dict[str, DAG],
branches: pd.DataFrame,
columns: Tuple[str, str, str, str],
prune: bool,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, DAG]:
"""
Load full commit DAGs for the given repositories.
The difference with fetch_repository_commits is that `branches` may possibly miss the commit \
dates. If that is the case, we fetch the commit dates.
"""
await load_branch_commit_dates(branches, meta_ids, mdb)
return await fetch_repository_commits(
repos, branches, columns, prune, account, meta_ids, mdb, pdb, cache)
@sentry_span
async def fetch_repository_commits_from_scratch(repos: Iterable[str],
branch_miner: BranchMiner,
prune: bool,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Tuple[Dict[str, DAG],
pd.DataFrame,
Dict[str, str]]:
"""
Load full commit DAGs for the given repositories.
The difference with fetch_repository_commits is that we don't have `branches`. We load them
in-place.
"""
(branches, defaults), pdags = await gather(
branch_miner.extract_branches(repos, prefixer, meta_ids, mdb, cache),
fetch_precomputed_commit_history_dags(repos, account, pdb, cache),
)
dags = await fetch_repository_commits_no_branch_dates(
pdags, branches, BRANCH_FETCH_COMMITS_COLUMNS, prune, account, meta_ids, mdb, pdb, cache)
return dags, branches, defaults
@sentry_span
@cached(
exptime=60 * 60, # 1 hour
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda repos, **_: (",".join(sorted(repos)),),
)
async def fetch_precomputed_commit_history_dags(
repos: Iterable[str],
account: int,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, DAG]:
"""Load commit DAGs from the pdb."""
ghrc = GitHubCommitHistory
format_version = ghrc.__table__.columns[ghrc.format_version.key].default.arg
with sentry_sdk.start_span(op="fetch_precomputed_commit_history_dags/pdb"):
rows = await pdb.fetch_all(
select([ghrc.repository_full_name, ghrc.dag])
.where(and_(
ghrc.format_version == format_version,
ghrc.repository_full_name.in_(repos),
ghrc.acc_id == account,
)))
dags = {
row[ghrc.repository_full_name.name]: (
(dag := DAGStruct(row[ghrc.dag.name])).hashes, dag.vertexes, dag.edges,
)
for row in rows
}
for repo in repos:
if repo not in dags:
dags[repo] = _empty_dag()
return dags
def _empty_dag() -> DAG:
return np.array([], dtype="S40"), np.array([0], dtype=np.uint32), np.array([], dtype=np.uint32)
@sentry_span
async def _fetch_commit_history_dag(hashes: np.ndarray,
vertexes: np.ndarray,
edges: np.ndarray,
head_hashes: Sequence[str],
head_ids: Sequence[int],
repo: str,
meta_ids: Tuple[int, ...],
mdb: Database,
) -> Tuple[str, np.ndarray, np.ndarray, np.ndarray]:
max_stop_heads = 25
max_inner_partitions = 25
# there can be duplicates, remove them
head_hashes = np.asarray(head_hashes)
head_ids = np.asarray(head_ids)
_, unique_indexes = np.unique(head_hashes, return_index=True)
head_hashes = head_hashes[unique_indexes]
head_ids = head_ids[unique_indexes]
# find max `max_stop_heads` top-level most recent commit hashes
stop_heads = hashes[np.delete(np.arange(len(hashes)), np.unique(edges))]
if len(stop_heads) > 0:
if len(stop_heads) > max_stop_heads:
min_commit_time = datetime.now(timezone.utc) - timedelta(days=90)
rows = await mdb.fetch_all(select([NodeCommit.oid])
.where(and_(NodeCommit.oid.in_(stop_heads.astype("U40")),
NodeCommit.committed_date > min_commit_time,
NodeCommit.acc_id.in_(meta_ids)))
.order_by(desc(NodeCommit.committed_date))
.limit(max_stop_heads))
stop_heads = np.fromiter((r[0] for r in rows), dtype="S40", count=len(rows))
first_parents = extract_first_parents(hashes, vertexes, edges, stop_heads, max_depth=1000)
# We can still branch from an arbitrary point. Choose `max_partitions` graph partitions.
if len(first_parents) >= max_inner_partitions:
step = len(first_parents) // max_inner_partitions
partition_seeds = first_parents[:max_inner_partitions * step:step]
else:
partition_seeds = first_parents
partition_seeds = np.concatenate([stop_heads, partition_seeds])
assert partition_seeds.dtype.char == "S"
# the expansion factor is ~6x, so 2 * 25 -> 300
with sentry_sdk.start_span(op="partition_dag",
description="%d %d" % (len(hashes), len(partition_seeds))):
stop_hashes = partition_dag(hashes, vertexes, edges, partition_seeds).astype("U40")
else:
stop_hashes = []
batch_size = 20
while len(head_hashes) > 0:
new_edges = await _fetch_commit_history_edges(
head_ids[:batch_size], stop_hashes, meta_ids, mdb)
if not new_edges:
new_edges = [(h, "0" * 40, 0) for h in np.sort(np.unique(head_hashes[:batch_size]))]
hashes, vertexes, edges = join_dags(hashes, vertexes, edges, new_edges)
head_hashes = head_hashes[batch_size:]
head_ids = head_ids[batch_size:]
if len(head_hashes) > 0 and len(hashes) > 0:
collateral = np.flatnonzero(
hashes[searchsorted_inrange(hashes, head_hashes)] == head_hashes)
if len(collateral) > 0:
head_hashes = np.delete(head_hashes, collateral)
head_ids = np.delete(head_ids, collateral)
return repo, hashes, vertexes, edges
async def _fetch_commit_history_edges(commit_ids: Iterable[int],
stop_hashes: Iterable[str],
meta_ids: Tuple[int, ...],
mdb: Database,
) -> List[Tuple]:
"""
Query metadata DB for the new commit DAG edges.
We recursively traverse github.node_commit_edge_parents starting from `commit_ids`.
Initial SQL credits: @dennwc.
We return nodes in the native DB order, that's the opposite of Git's parent-child.
CASE 0000000000000000000000000000000000000000 is required to distinguish between two options:
1. node ID is null => we've reached the DAG's end.
2. node ID is not null but the hash is null => we hit a temporary DB inconsistency.
We don't include the edges from the outside to the first parents (`commit_ids`). This means
that if some of `commit_ids` do not have children, there will be 0 edges with them.
"""
assert isinstance(mdb, Database), "fetch_all() must be patched to avoid re-wrapping"
rq = "`" if mdb.url.dialect == "sqlite" else ""
tq = '"' if mdb.url.dialect == "sqlite" else ""
if len(meta_ids) == 1:
meta_id_sql = ("= %d" % meta_ids[0])
else:
meta_id_sql = "IN (%s)" % ", ".join(str(i) for i in meta_ids)
query = f"""
WITH RECURSIVE commit_history AS (
SELECT
p.child_id,
p.{rq}index{rq} AS parent_index,
pc.oid AS parent_oid,
cc.oid AS child_oid,
p.acc_id
FROM
{tq}github.node_commit_edge_parents{tq} p
LEFT JOIN {tq}github.node_commit{tq} pc ON p.parent_id = pc.graph_id AND p.acc_id = pc.acc_id
LEFT JOIN {tq}github.node_commit{tq} cc ON p.child_id = cc.graph_id AND p.acc_id = cc.acc_id
WHERE
p.parent_id IN ({", ".join(map(str, commit_ids))}) AND p.acc_id {meta_id_sql}
UNION
SELECT
p.child_id,
p.{rq}index{rq} AS parent_index,
h.child_oid AS parent_oid,
cc.oid AS child_oid,
p.acc_id
FROM
{tq}github.node_commit_edge_parents{tq} p
INNER JOIN commit_history h ON p.parent_id = h.child_id AND p.acc_id = h.acc_id
LEFT JOIN {tq}github.node_commit{tq} cc ON p.child_id = cc.graph_id AND p.acc_id = cc.acc_id
WHERE h.child_oid NOT IN ('{"', '".join(stop_hashes)}')
) SELECT
parent_oid,
CASE
WHEN child_id IS NULL THEN '0000000000000000000000000000000000000000'
ELSE child_oid
END AS child_oid,
parent_index
FROM
commit_history;
""" # noqa
rows = await mdb.fetch_all(query)
if mdb.url.dialect == "sqlite":
rows = [tuple(r) for r in rows]
return rows
@sentry_span
async def fetch_dags_with_commits(commits: Mapping[str, Sequence[int]],
prune: bool,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Tuple[Dict[str, DAG], pd.DataFrame]:
"""
Load full commit DAGs for the given commit node IDs mapped from repository names.
:param commits: repository name -> sequence of commit node IDs.
:return: 1. DAGs that contain the specified commits, by repository name. \
2. DataFrame with loaded `commits` - *not with all the commits in the DAGs*.
"""
commits, pdags = await gather(
_fetch_commits_for_dags(commits, meta_ids, mdb, cache),
fetch_precomputed_commit_history_dags(commits, account, pdb, cache),
op="fetch_dags_with_commits/prepare",
)
dags = await fetch_repository_commits(
pdags, commits, COMMIT_FETCH_COMMITS_COLUMNS, prune, account, meta_ids, mdb, pdb, cache)
return dags, commits
@sentry_span
@cached(
exptime=middle_term_exptime,
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda commits, **_: (
";".join("%s: %s" % (k, ",".join(map(str, sorted(v))))
for k, v in sorted(commits.items())),
),
refresh_on_access=True,
)
async def _fetch_commits_for_dags(commits: Mapping[str, Sequence[int]],
meta_ids: Tuple[int, ...],
mdb: Database,
cache: Optional[aiomcache.Client],
) -> pd.DataFrame:
queries = [
select(COMMIT_FETCH_COMMITS_COLUMNS)
.where(and_(PushCommit.repository_full_name == repo,
PushCommit.acc_id.in_(meta_ids),
PushCommit.node_id.in_any_values(nodes)))
for repo, nodes in commits.items()
]
return await read_sql_query(union_all(*queries), mdb, COMMIT_FETCH_COMMITS_COLUMNS)
| [
"athenian.api.models.metadata.github.PushCommit.node_id.in_any_values",
"athenian.api.controllers.miners.github.dag_accelerated.partition_dag",
"athenian.api.models.metadata.github.NodeCommit.acc_id.in_",
"numpy.argsort",
"athenian.api.models.metadata.github.PushCommit.committed_date.between",
"athenian.a... | [((3314, 3376), 'logging.getLogger', 'logging.getLogger', (["('%s.extract_commits' % metadata.__package__)"], {}), "('%s.extract_commits' % metadata.__package__)\n", (3331, 3376), False, 'import logging\n'), ((7429, 7497), 'numpy.in1d', 'np.in1d', (['branch_repos_names', 'default_repos_names'], {'assume_unique': '(True)'}), '(branch_repos_names, default_repos_names, assume_unique=True)\n', (7436, 7497), True, 'import numpy as np\n'), ((7648, 7672), 'numpy.argsort', 'np.argsort', (['branch_repos'], {}), '(branch_repos)\n', (7658, 7672), True, 'import numpy as np\n'), ((7948, 8003), 'numpy.in1d', 'np.in1d', (['commit_repos', 'branch_repos'], {'assume_unique': '(True)'}), '(commit_repos, branch_repos, assume_unique=True)\n', (7955, 8003), True, 'import numpy as np\n'), ((8039, 8094), 'numpy.in1d', 'np.in1d', (['branch_repos', 'commit_repos'], {'assume_unique': '(True)'}), '(branch_repos, commit_repos, assume_unique=True)\n', (8046, 8094), True, 'import numpy as np\n'), ((12106, 12166), 'numpy.unique', 'np.unique', (['df_repos'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(df_repos, return_inverse=True, return_counts=True)\n', (12115, 12166), True, 'import numpy as np\n'), ((12184, 12205), 'numpy.argsort', 'np.argsort', (['index_map'], {}), '(index_map)\n', (12194, 12205), True, 'import numpy as np\n'), ((12261, 12295), 'numpy.cumsum', 'np.cumsum', (['counts'], {'out': 'offsets[1:]'}), '(counts, out=offsets[1:])\n', (12270, 12295), True, 'import numpy as np\n'), ((13886, 13949), 'athenian.api.db.add_pdb_misses', 'add_pdb_misses', (['pdb', '"""fetch_repository_commits"""', 'missed_counter'], {}), "(pdb, 'fetch_repository_commits', missed_counter)\n", (13900, 13949), False, 'from athenian.api.db import add_pdb_hits, add_pdb_misses, Database, DatabaseLike\n'), ((20418, 20441), 'numpy.asarray', 'np.asarray', (['head_hashes'], {}), '(head_hashes)\n', (20428, 20441), True, 'import numpy as np\n'), ((20457, 20477), 'numpy.asarray', 'np.asarray', (['head_ids'], {}), '(head_ids)\n', (20467, 20477), True, 'import numpy as np\n'), ((20502, 20543), 'numpy.unique', 'np.unique', (['head_hashes'], {'return_index': '(True)'}), '(head_hashes, return_index=True)\n', (20511, 20543), True, 'import numpy as np\n'), ((3405, 3436), 'athenian.api.models.metadata.github.PushCommit.acc_id.in_', 'PushCommit.acc_id.in_', (['meta_ids'], {}), '(meta_ids)\n', (3426, 3436), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((3446, 3499), 'athenian.api.models.metadata.github.PushCommit.committed_date.between', 'PushCommit.committed_date.between', (['date_from', 'date_to'], {}), '(date_from, date_to)\n', (3479, 3499), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((3509, 3551), 'athenian.api.models.metadata.github.PushCommit.repository_full_name.in_', 'PushCommit.repository_full_name.in_', (['repos'], {}), '(repos)\n', (3544, 3551), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((5827, 5869), 'athenian.api.async_utils.gather', 'gather', (['*tasks'], {'op': '"""extract_commits/fetch"""'}), "(*tasks, op='extract_commits/fetch')\n", (5833, 5869), False, 'from athenian.api.async_utils import gather, read_sql_query\n'), ((7267, 7297), 'numpy.char.add', 'np.char.add', (['branch_repos', '"""|"""'], {}), "(branch_repos, '|')\n", (7278, 7297), True, 'import numpy as np\n'), ((9590, 9624), 'numpy.concatenate', 'np.concatenate', (['accessible_indexes'], {}), '(accessible_indexes)\n', (9604, 9624), True, 'import numpy as np\n'), ((16842, 16891), 'athenian.api.controllers.miners.github.branches.load_branch_commit_dates', 'load_branch_commit_dates', (['branches', 'meta_ids', 'mdb'], {}), '(branches, meta_ids, mdb)\n', (16866, 16891), False, 'from athenian.api.controllers.miners.github.branches import BranchMiner, load_branch_commit_dates\n'), ((18972, 19041), 'sentry_sdk.start_span', 'sentry_sdk.start_span', ([], {'op': '"""fetch_precomputed_commit_history_dags/pdb"""'}), "(op='fetch_precomputed_commit_history_dags/pdb')\n", (18993, 19041), False, 'import sentry_sdk\n'), ((19655, 19680), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""S40"""'}), "([], dtype='S40')\n", (19663, 19680), True, 'import numpy as np\n'), ((19682, 19712), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.uint32'}), '([0], dtype=np.uint32)\n', (19690, 19712), True, 'import numpy as np\n'), ((19714, 19743), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.uint32'}), '([], dtype=np.uint32)\n', (19722, 19743), True, 'import numpy as np\n'), ((21526, 21600), 'athenian.api.controllers.miners.github.dag_accelerated.extract_first_parents', 'extract_first_parents', (['hashes', 'vertexes', 'edges', 'stop_heads'], {'max_depth': '(1000)'}), '(hashes, vertexes, edges, stop_heads, max_depth=1000)\n', (21547, 21600), False, 'from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, extract_subdag, join_dags, partition_dag, searchsorted_inrange\n'), ((21978, 22023), 'numpy.concatenate', 'np.concatenate', (['[stop_heads, partition_seeds]'], {}), '([stop_heads, partition_seeds])\n', (21992, 22023), True, 'import numpy as np\n'), ((22737, 22782), 'athenian.api.controllers.miners.github.dag_accelerated.join_dags', 'join_dags', (['hashes', 'vertexes', 'edges', 'new_edges'], {}), '(hashes, vertexes, edges, new_edges)\n', (22746, 22782), False, 'from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, extract_subdag, join_dags, partition_dag, searchsorted_inrange\n'), ((4264, 4305), 'athenian.api.models.metadata.github.PushCommit.author_user_id.in_', 'PushCommit.author_user_id.in_', (['author_ids'], {}), '(author_ids)\n', (4293, 4305), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((4538, 4585), 'athenian.api.models.metadata.github.PushCommit.committer_user_id.in_', 'PushCommit.committer_user_id.in_', (['committer_ids'], {}), '(committer_ids)\n', (4570, 4585), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((8409, 8450), 'numpy.nonzero', 'np.nonzero', (['commit_repos_in_branches_mask'], {}), '(commit_repos_in_branches_mask)\n', (8419, 8450), True, 'import numpy as np\n'), ((8479, 8531), 'numpy.nonzero', 'np.nonzero', (['(commit_repo_indexes == commit_repo_index)'], {}), '(commit_repo_indexes == commit_repo_index)\n', (8489, 8531), True, 'import numpy as np\n'), ((8860, 8894), 'numpy.concatenate', 'np.concatenate', (['accessible_indexes'], {}), '(accessible_indexes)\n', (8874, 8894), True, 'import numpy as np\n'), ((9368, 9392), 'numpy.nonzero', 'np.nonzero', (['(indexes == i)'], {}), '(indexes == i)\n', (9378, 9392), True, 'import numpy as np\n'), ((12720, 12764), 'athenian.api.controllers.miners.github.dag_accelerated.searchsorted_inrange', 'searchsorted_inrange', (['hashes', 'required_heads'], {}), '(hashes, required_heads)\n', (12740, 12764), False, 'from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, extract_subdag, join_dags, partition_dag, searchsorted_inrange\n'), ((13989, 14038), 'athenian.api.async_utils.gather', 'gather', (['*tasks'], {'op': '"""fetch_repository_commits/mdb"""'}), "(*tasks, op='fetch_repository_commits/mdb')\n", (13995, 14038), False, 'from athenian.api.async_utils import gather, read_sql_query\n'), ((14697, 14733), 'sqlalchemy.dialects.postgresql.insert', 'postgres_insert', (['GitHubCommitHistory'], {}), '(GitHubCommitHistory)\n', (14712, 14733), True, 'from sqlalchemy.dialects.postgresql import insert as postgres_insert\n'), ((20756, 20772), 'numpy.unique', 'np.unique', (['edges'], {}), '(edges)\n', (20765, 20772), True, 'import numpy as np\n'), ((28317, 28336), 'sqlalchemy.union_all', 'union_all', (['*queries'], {}), '(*queries)\n', (28326, 28336), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((5003, 5021), 'sqlalchemy.and_', 'and_', (['*sql_filters'], {}), '(*sql_filters)\n', (5007, 5021), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((8645, 8665), 'numpy.array', 'np.array', (['[head_sha]'], {}), '([head_sha])\n', (8653, 8665), True, 'import numpy as np\n'), ((8730, 8793), 'numpy.in1d', 'np.in1d', (['repo_hashes', 'default_branch_hashes'], {'assume_unique': '(True)'}), '(repo_hashes, default_branch_hashes, assume_unique=True)\n', (8737, 8793), True, 'import numpy as np\n'), ((9499, 9554), 'numpy.in1d', 'np.in1d', (['repo_hashes', 'dags[repo][0]'], {'assume_unique': '(True)'}), '(repo_hashes, dags[repo][0], assume_unique=True)\n', (9506, 9554), True, 'import numpy as np\n'), ((12511, 12534), 'athenian.api.controllers.logical_repos.drop_logical_repo', 'drop_logical_repo', (['repo'], {}), '(repo)\n', (12528, 12534), False, 'from athenian.api.controllers.logical_repos import drop_logical_repo\n'), ((13631, 13686), 'athenian.api.controllers.miners.github.dag_accelerated.extract_subdag', 'extract_subdag', (['hashes', 'vertexes', 'edges', 'required_heads'], {}), '(hashes, vertexes, edges, required_heads)\n', (13645, 13686), False, 'from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, extract_subdag, join_dags, partition_dag, searchsorted_inrange\n'), ((14526, 14583), 'athenian.api.controllers.miners.github.dag_accelerated.extract_subdag', 'extract_subdag', (['hashes', 'vertexes', 'edges', 'repo_heads[repo]'], {}), '(hashes, vertexes, edges, repo_heads[repo])\n', (14540, 14583), False, 'from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, extract_subdag, join_dags, partition_dag, searchsorted_inrange\n'), ((20878, 20904), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (20890, 20904), False, 'from datetime import datetime, timedelta, timezone\n'), ((20907, 20925), 'datetime.timedelta', 'timedelta', ([], {'days': '(90)'}), '(days=90)\n', (20916, 20925), False, 'from datetime import datetime, timedelta, timezone\n'), ((23113, 23147), 'numpy.delete', 'np.delete', (['head_hashes', 'collateral'], {}), '(head_hashes, collateral)\n', (23122, 23147), True, 'import numpy as np\n'), ((23175, 23206), 'numpy.delete', 'np.delete', (['head_ids', 'collateral'], {}), '(head_ids, collateral)\n', (23184, 23206), True, 'import numpy as np\n'), ((28023, 28059), 'sqlalchemy.select', 'select', (['COMMIT_FETCH_COMMITS_COLUMNS'], {}), '(COMMIT_FETCH_COMMITS_COLUMNS)\n', (28029, 28059), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((28141, 28172), 'athenian.api.models.metadata.github.PushCommit.acc_id.in_', 'PushCommit.acc_id.in_', (['meta_ids'], {}), '(meta_ids)\n', (28162, 28172), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((28194, 28233), 'athenian.api.models.metadata.github.PushCommit.node_id.in_any_values', 'PushCommit.node_id.in_any_values', (['nodes'], {}), '(nodes)\n', (28226, 28233), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((4978, 4996), 'sqlalchemy.select', 'select', (['cols_query'], {}), '(cols_query)\n', (4984, 4996), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((19407, 19436), 'athenian.api.controllers.miners.types.DAG', 'DAGStruct', (['row[ghrc.dag.name]'], {}), '(row[ghrc.dag.name])\n', (19416, 19436), True, 'from athenian.api.controllers.miners.types import DAG as DAGStruct\n'), ((22305, 22360), 'athenian.api.controllers.miners.github.dag_accelerated.partition_dag', 'partition_dag', (['hashes', 'vertexes', 'edges', 'partition_seeds'], {}), '(hashes, vertexes, edges, partition_seeds)\n', (22318, 22360), False, 'from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, extract_subdag, join_dags, partition_dag, searchsorted_inrange\n'), ((3826, 3860), 'sqlalchemy.select', 'select', (['[User.login, User.node_id]'], {}), '([User.login, User.node_id])\n', (3832, 3860), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((3885, 3912), 'athenian.api.models.metadata.github.User.login.in_', 'User.login.in_', (['user_logins'], {}), '(user_logins)\n', (3899, 3912), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((3914, 3939), 'athenian.api.models.metadata.github.User.acc_id.in_', 'User.acc_id.in_', (['meta_ids'], {}), '(meta_ids)\n', (3929, 3939), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((5423, 5464), 'athenian.api.models.metadata.github.NodePullRequestCommit.commit_id.is_', 'NodePullRequestCommit.commit_id.is_', (['None'], {}), '(None)\n', (5458, 5464), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n'), ((15064, 15091), 'sqlalchemy.insert', 'insert', (['GitHubCommitHistory'], {}), '(GitHubCommitHistory)\n', (15070, 15091), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((19091, 19136), 'sqlalchemy.select', 'select', (['[ghrc.repository_full_name, ghrc.dag]'], {}), '([ghrc.repository_full_name, ghrc.dag])\n', (19097, 19136), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((22665, 22700), 'numpy.unique', 'np.unique', (['head_hashes[:batch_size]'], {}), '(head_hashes[:batch_size])\n', (22674, 22700), True, 'import numpy as np\n'), ((22988, 23029), 'athenian.api.controllers.miners.github.dag_accelerated.searchsorted_inrange', 'searchsorted_inrange', (['hashes', 'head_hashes'], {}), '(hashes, head_hashes)\n', (23008, 23029), False, 'from athenian.api.controllers.miners.github.dag_accelerated import extract_first_parents, extract_subdag, join_dags, partition_dag, searchsorted_inrange\n'), ((5143, 5161), 'sqlalchemy.select', 'select', (['cols_query'], {}), '(cols_query)\n', (5149, 5161), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((5265, 5380), 'sqlalchemy.and_', 'and_', (['(PushCommit.node_id == NodePullRequestCommit.commit_id)', '(PushCommit.acc_id == NodePullRequestCommit.acc_id)'], {}), '(PushCommit.node_id == NodePullRequestCommit.commit_id, PushCommit.\n acc_id == NodePullRequestCommit.acc_id)\n', (5269, 5380), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((21317, 21348), 'sqlalchemy.desc', 'desc', (['NodeCommit.committed_date'], {}), '(NodeCommit.committed_date)\n', (21321, 21348), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((14322, 14390), 'athenian.api.controllers.miners.types.DAG.from_fields', 'DAGStruct.from_fields', ([], {'hashes': 'hashes', 'vertexes': 'vertexes', 'edges': 'edges'}), '(hashes=hashes, vertexes=vertexes, edges=edges)\n', (14343, 14390), True, 'from athenian.api.controllers.miners.types import DAG as DAGStruct\n'), ((20965, 20989), 'sqlalchemy.select', 'select', (['[NodeCommit.oid]'], {}), '([NodeCommit.oid])\n', (20971, 20989), False, 'from sqlalchemy import and_, desc, insert, outerjoin, select, union_all\n'), ((21234, 21265), 'athenian.api.models.metadata.github.NodeCommit.acc_id.in_', 'NodeCommit.acc_id.in_', (['meta_ids'], {}), '(meta_ids)\n', (21255, 21265), False, 'from athenian.api.models.metadata.github import Branch, NodeCommit, NodePullRequestCommit, PushCommit, Release, User\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import pickle
import numpy as np
import tensorflow as tf
from nnli.parser import SNLI
from nnli import util
from nnli import tfutil
from nnli import embeddings as E
from nnli import evaluation
from nnli.models import ConditionalBiLSTM
from nnli.models import FeedForwardDAM
from nnli.models import FeedForwardDAMP
from nnli.models import FeedForwardDAMS
from nnli.models import ESIM
import nnli.regularizers as R
from nnli.samplers import WithoutReplacementSampler
from nnli.generators import InstanceGenerator
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
REPORT_LOSS_INTERVAL = 100
def save_model(save_path, saver, session, index_to_token):
if save_path:
with open('{}_index_to_token.p'.format(save_path), 'wb') as fs:
pickle.dump(index_to_token, fs)
saved_path = saver.save(session, save_path)
logger.info('Model saved in {}'.format(saved_path))
return
def main(argv):
logger.info('Command line: {}'.format(' '.join(arg for arg in argv)))
def fmt(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Regularising RTE/NLI models via Adversarial Training', formatter_class=fmt)
argparser.add_argument('--train', '-t', action='store', type=str, default='data/snli/snli_1.0_train.jsonl.gz')
argparser.add_argument('--valid', '-v', action='store', type=str, default='data/snli/snli_1.0_dev.jsonl.gz')
argparser.add_argument('--test', '-T', action='store', type=str, default='data/snli/snli_1.0_test.jsonl.gz')
argparser.add_argument('--test2', action='store', type=str, default=None)
argparser.add_argument('--model', '-m', action='store', type=str, default='ff-dam',
choices=['cbilstm', 'ff-dam', 'esim'])
argparser.add_argument('--optimizer', '-o', action='store', type=str, default='adagrad',
choices=['adagrad', 'adam'])
argparser.add_argument('--embedding-size', action='store', type=int, default=300)
argparser.add_argument('--representation-size', '-r', action='store', type=int, default=200)
argparser.add_argument('--batch-size', '-b', action='store', type=int, default=32)
argparser.add_argument('--epochs', '-e', action='store', type=int, default=1)
argparser.add_argument('--dropout-keep-prob', '-d', action='store', type=float, default=1.0)
argparser.add_argument('--learning-rate', '--lr', action='store', type=float, default=0.1)
argparser.add_argument('--clip', '-c', action='store', type=float, default=None)
argparser.add_argument('--seed', action='store', type=int, default=0)
argparser.add_argument('--glove', action='store', type=str, default=None)
argparser.add_argument('--restore', action='store', type=str, default=None)
argparser.add_argument('--save', action='store', type=str, default=None)
argparser.add_argument('--check-interval', '--check-every', '-C',
action='store', type=int, default=None)
# The following parameters are devoted to regularization
for rule_index in range(1, 5 + 1):
argparser.add_argument('--regularizer{}-weight'.format(rule_index),
'-{}'.format(rule_index),
action='store', type=float, default=None)
argparser.add_argument('--regularizer-inputs', '--ri', '-R', nargs='+', type=str)
argparser.add_argument('--regularizer-nb-samples', '--rns', '-S', type=int, default=0)
argparser.add_argument('--regularizer-nb-flips', '--rnf', '-F', type=int, default=0)
args = argparser.parse_args(argv)
# Command line arguments
train_path = args.train
valid_path = args.valid
test_path = args.test
test2_path = args.test2
model_name = args.model
optimizer_name = args.optimizer
embedding_size = args.embedding_size
representation_size = args.representation_size
batch_size = args.batch_size
nb_epochs = args.epochs
dropout_keep_prob = args.dropout_keep_prob
learning_rate = args.learning_rate
clip_value = args.clip
seed = args.seed
glove_path = args.glove
restore_path = args.restore
save_path = args.save
check_interval = args.check_interval
# The following parameters are devoted to regularization
r1_weight = args.regularizer1_weight
r2_weight = args.regularizer2_weight
r3_weight = args.regularizer3_weight
r4_weight = args.regularizer4_weight
r5_weight = args.regularizer5_weight
r_input_paths = args.regularizer_inputs or []
nb_r_samples = args.regularizer_nb_samples
nb_r_flips = args.regularizer_nb_flips
r_weights = [r1_weight, r2_weight, r3_weight, r4_weight, r5_weight]
is_regularized = not all(r_weight is None for r_weight in r_weights)
np.random.seed(seed)
rs = np.random.RandomState(seed)
tf.set_random_seed(seed)
logger.info('Reading corpus ..')
snli = SNLI()
train_is = snli.parse(path=train_path)
valid_is = snli.parse(path=valid_path)
test_is = snli.parse(path=test_path)
test2_is = snli.parse(path=test2_path) if test2_path else None
# Discrete/symbolic inputs used by the regularizers
regularizer_is = [i for path in r_input_paths for i in snli.parse(path=path)]
# Filtering out unuseful information
regularizer_is = [
{k: v for k, v in instance.items() if k in {'sentence1_parse_tokens', 'sentence2_parse_tokens'}}
for instance in regularizer_is]
all_is = train_is + valid_is + test_is
if test2_is is not None:
all_is += test2_is
# Enumeration of tokens start at index=3:
# index=0 PADDING
# index=1 START_OF_SENTENCE
# index=2 END_OF_SENTENCE
# index=3 UNKNOWN_WORD
bos_idx, eos_idx, unk_idx = 1, 2, 3
# Words start at index 4
start_idx = 1 + 3
if restore_path is None:
token_lst = [tkn for inst in all_is for tkn in inst['sentence1_parse_tokens'] + inst['sentence2_parse_tokens']]
from collections import Counter
token_cnt = Counter(token_lst)
# Sort the tokens according to their frequency and lexicographic ordering
sorted_vocabulary = sorted(token_cnt.keys(), key=lambda t: (- token_cnt[t], t))
index_to_token = {idx: tkn for idx, tkn in enumerate(sorted_vocabulary, start=start_idx)}
else:
vocab_path = '{}_index_to_token.p'.format(restore_path)
logger.info('Restoring vocabulary from {} ..'.format(vocab_path))
with open(vocab_path, 'rb') as f:
index_to_token = pickle.load(f)
token_to_index = {token: index for index, token in index_to_token.items()}
entailment_idx, neutral_idx, contradiction_idx = 0, 1, 2
label_to_index = {
'entailment': entailment_idx,
'neutral': neutral_idx,
'contradiction': contradiction_idx,
}
name_to_optimizer = {
'adagrad': tf.train.AdagradOptimizer,
'adam': tf.train.AdamOptimizer
}
name_to_model = {
'cbilstm': ConditionalBiLSTM,
'ff-dam': FeedForwardDAM,
'ff-damp': FeedForwardDAMP,
'ff-dams': FeedForwardDAMS,
'esim': ESIM
}
optimizer_class = name_to_optimizer[optimizer_name]
optimizer = optimizer_class(learning_rate=learning_rate)
model_class = name_to_model[model_name]
token_kwargs = dict(bos_idx=bos_idx, eos_idx=eos_idx, unk_idx=unk_idx)
train_tensors = util.to_tensors(train_is, token_to_index, label_to_index, **token_kwargs)
valid_tensors = util.to_tensors(valid_is, token_to_index, label_to_index, **token_kwargs)
test_tensors = util.to_tensors(test_is, token_to_index, label_to_index, **token_kwargs)
test2_tensors = None
if test2_is is not None:
test2_tensors = util.to_tensors(test2_is, token_to_index, label_to_index, **token_kwargs)
train_sequence1 = train_tensors['sequence1']
train_sequence1_len = train_tensors['sequence1_length']
train_sequence2 = train_tensors['sequence2']
train_sequence2_len = train_tensors['sequence2_length']
train_label = train_tensors['label']
sequence1_ph = tf.placeholder(dtype=tf.int32, shape=[None, None], name='sequence1')
sequence1_len_ph = tf.placeholder(dtype=tf.int32, shape=[None], name='sequence1_length')
sequence2_ph = tf.placeholder(dtype=tf.int32, shape=[None, None], name='sequence2')
sequence2_len_ph = tf.placeholder(dtype=tf.int32, shape=[None], name='sequence2_length')
label_ph = tf.placeholder(dtype=tf.int32, shape=[None], name='label')
dropout_keep_prob_ph = tf.placeholder(tf.float32, name='dropout_keep_prob')
placeholders = {
'sequence1': sequence1_ph,
'sequence1_length': sequence1_len_ph,
'sequence2': sequence2_ph,
'sequence2_length': sequence2_len_ph,
'label': label_ph,
'dropout': dropout_keep_prob_ph
}
# Disable Dropout at evaluation time
valid_tensors['dropout'] = 1.0
test_tensors['dropout'] = 1.0
if test2_tensors is not None:
test2_tensors['dropout'] = 1.0
clipped_sequence1 = tfutil.clip_sentence(sequence1_ph, sequence1_len_ph)
clipped_sequence2 = tfutil.clip_sentence(sequence2_ph, sequence2_len_ph)
vocab_size = max(token_to_index.values()) + 1
logger.info('Initializing the Model')
adversary_scope_name = 'adversary'
with tf.variable_scope(adversary_scope_name):
r_sequence1_embedding = tf.get_variable('s1_emb', shape=[26, embedding_size],
initializer=tf.contrib.layers.xavier_initializer())
r_sequence2_embedding = tf.get_variable('s2_emb', shape=[26, embedding_size],
initializer=tf.contrib.layers.xavier_initializer())
r_sequence1_len_ph = tf.ones([26]) * 16
r_sequence2_len_ph = tf.ones([26]) * 16
discriminator_scope_name = 'discriminator'
with tf.variable_scope(discriminator_scope_name):
embedding_matrix_value = E.embedding_matrix(nb_tokens=vocab_size,
embedding_size=embedding_size,
token_to_index=token_to_index,
glove_path=glove_path,
unit_norm=True,
rs=rs, dtype=np.float32)
embedding_layer = tf.get_variable('embeddings',
initializer=tf.constant(embedding_matrix_value),
trainable=False)
sequence1_embedding = tf.nn.embedding_lookup(embedding_layer, clipped_sequence1)
sequence2_embedding = tf.nn.embedding_lookup(embedding_layer, clipped_sequence2)
model_kwargs = {
'sequence1': sequence1_embedding,
'sequence1_length': sequence1_len_ph,
'sequence2': sequence2_embedding,
'sequence2_length': sequence2_len_ph,
'representation_size': representation_size,
'dropout_keep_prob': dropout_keep_prob_ph
}
model = model_class(**model_kwargs)
logits = model()
predictions = tf.argmax(logits, axis=1, name='predictions')
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label_ph)
loss = tf.reduce_mean(losses)
if is_regularized:
logger.info('Initializing the Regularizers')
r_model_kwargs = model_kwargs.copy()
r_model_kwargs.update({
'sequence1': r_sequence1_embedding,
'sequence1_length': r_sequence1_len_ph,
'sequence2': r_sequence2_embedding,
'sequence2_length': r_sequence2_len_ph
})
r_kwargs = {
'model_class': model_class,
'model_kwargs': r_model_kwargs,
'debug': True
}
a_loss = 0
with tf.variable_scope(discriminator_scope_name):
if r1_weight:
r_loss, _ = R.contradiction_acl(is_bi=True, **r_kwargs)
a_loss += r1_weight * r_loss
if r2_weight:
r_loss, _ = R.entailment_acl(is_bi=True, **r_kwargs)
a_loss += r2_weight * r_loss
if r3_weight:
r_loss, _ = R.neutral_acl(is_bi=True, **r_kwargs)
a_loss += r3_weight * r_loss
if r4_weight:
r_loss, _ = R.entailment_reflexive_acl(**r_kwargs)
a_loss += r4_weight * r_loss
if r5_weight:
r_loss, _ = R.entailment_neutral_acl(is_bi=True, **r_kwargs)
a_loss += r5_weight * r_loss
loss += a_loss
discriminator_vars = tfutil.get_variables_in_scope(discriminator_scope_name)
adversary_vars = tfutil.get_variables_in_scope(adversary_scope_name)
discriminator_init_op = tf.variables_initializer(discriminator_vars)
adversary_init_op = tf.variables_initializer(adversary_vars)
trainable_discriminator_vars = list(discriminator_vars)
trainable_discriminator_vars.remove(embedding_layer)
discriminator_optimizer_scope_name = 'discriminator_optimizer'
with tf.variable_scope(discriminator_optimizer_scope_name):
gradients, v = zip(*optimizer.compute_gradients(- a_loss, var_list=trainable_discriminator_vars))
if clip_value:
gradients, _ = tf.clip_by_global_norm(gradients, clip_value)
training_step = optimizer.apply_gradients(zip(gradients, v))
discriminator_optimizer_vars = tfutil.get_variables_in_scope(discriminator_optimizer_scope_name)
discriminator_optimizer_init_op = tf.variables_initializer(discriminator_optimizer_vars)
adversary_optimizer_scope_name = 'adversary_optimizer'
with tf.variable_scope(discriminator_optimizer_scope_name):
gradients, v = zip(*optimizer.compute_gradients(loss, var_list=adversary_vars))
if clip_value:
gradients, _ = tf.clip_by_global_norm(gradients, clip_value)
adversary_training_step = optimizer.apply_gradients(zip(gradients, v))
adversary_optimizer_vars = tfutil.get_variables_in_scope(adversary_optimizer_scope_name)
adversary_optimizer_init_op = tf.variables_initializer(adversary_optimizer_vars)
predictions_int = tf.cast(predictions, tf.int32)
labels_int = tf.cast(label_ph, tf.int32)
accuracy = tf.cast(tf.equal(x=predictions_int, y=labels_int), tf.float32)
saver = tf.train.Saver(discriminator_vars + discriminator_optimizer_vars, max_to_keep=1)
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
nb_r_instances = len(regularizer_is)
r_sampler = WithoutReplacementSampler(nb_instances=nb_r_instances) if is_regularized else None
r_generator = InstanceGenerator(token_to_index=token_to_index)
with tf.Session(config=session_config) as session:
logger.info('Total Parameters: {}'
.format(tfutil.count_trainable_parameters()))
logger.info('Total Discriminator Parameters: {}'
.format(tfutil.count_trainable_parameters(var_list=discriminator_vars)))
logger.info('Total Trainable Discriminator Parameters: {}'
.format(tfutil.count_trainable_parameters(var_list=trainable_discriminator_vars)))
if restore_path is not None:
saver.restore(session, restore_path)
else:
session.run([discriminator_init_op, discriminator_optimizer_init_op])
session.run([adversary_init_op, adversary_optimizer_init_op])
nb_instances = train_sequence1.shape[0]
batches = util.make_batches(size=nb_instances, batch_size=batch_size)
loss_values = []
best_valid_accuracy = None
iteration_index = 0
for epoch in range(1, nb_epochs + 1):
order = rs.permutation(nb_instances)
shuf_sequence1 = train_sequence1[order]
shuf_sequence2 = train_sequence2[order]
shuf_sequence1_len = train_sequence1_len[order]
shuf_sequence2_len = train_sequence2_len[order]
shuf_label = train_label[order]
# Semi-sorting
order = util.semi_sort(shuf_sequence1_len, shuf_sequence2_len)
shuf_sequence1 = shuf_sequence1[order]
shuf_sequence2 = shuf_sequence2[order]
shuf_sequence1_len = shuf_sequence1_len[order]
shuf_sequence2_len = shuf_sequence2_len[order]
shuf_label = shuf_label[order]
for batch_idx, (batch_start, batch_end) in enumerate(batches, start=1):
iteration_index += 1
batch_sequence1 = shuf_sequence1[batch_start:batch_end]
batch_sequence2 = shuf_sequence2[batch_start:batch_end]
batch_sequence1_len = shuf_sequence1_len[batch_start:batch_end]
batch_sequence2_len = shuf_sequence2_len[batch_start:batch_end]
batch_label = shuf_label[batch_start:batch_end]
batch_max_size1 = np.max(batch_sequence1_len)
batch_max_size2 = np.max(batch_sequence2_len)
batch_sequence1 = batch_sequence1[:, :batch_max_size1]
batch_sequence2 = batch_sequence2[:, :batch_max_size2]
current_batch_size = batch_sequence1.shape[0]
batch_feed_dict = {
sequence1_ph: batch_sequence1,
sequence1_len_ph: batch_sequence1_len,
sequence2_ph: batch_sequence2,
sequence2_len_ph: batch_sequence2_len,
label_ph: batch_label,
dropout_keep_prob_ph: dropout_keep_prob
}
if is_regularized:
r_instances = [regularizer_is[index] for index in r_sampler.sample(nb_r_samples)]
c_instances = []
for r_instance in r_instances:
r_sentence1 = r_instance['sentence1_parse_tokens']
r_sentence2 = r_instance['sentence2_parse_tokens']
f_sentence1_lst, f_sentence2_lst = r_generator.flip(r_sentence1, r_sentence2, nb_r_flips)
for f_sentence1, f_sentence2 in zip(f_sentence1_lst, f_sentence2_lst):
c_instance = {
'sentence1_parse_tokens': f_sentence1,
'sentence2_parse_tokens': f_sentence2
}
c_instances += [c_instance]
r_instances += c_instances
r_tensors = util.to_tensors(r_instances, token_to_index, label_to_index, **token_kwargs)
assert len(r_instances) == r_tensors['sequence1'].shape[0]
# logging.info('Regularising on {} samples ..'.format(len(r_instances)))
batch_feed_dict.update({
r_sequence1_ph: r_tensors['sequence1'],
r_sequence1_len_ph: r_tensors['sequence1_length'],
r_sequence2_ph: r_tensors['sequence2'],
r_sequence2_len_ph: r_tensors['sequence2_length'],
})
_, loss_value = session.run([training_step, loss], feed_dict=batch_feed_dict)
loss_values += [loss_value / current_batch_size]
if len(loss_values) >= REPORT_LOSS_INTERVAL:
logger.info("Epoch {0}, Batch {1}\tLoss: {2}".format(epoch, batch_idx, util.stats(loss_values)))
loss_values = []
# every k iterations, check whether accuracy improves
if check_interval is not None and iteration_index % check_interval == 0:
accuracies_valid = evaluation.evaluate(session, valid_tensors, placeholders, accuracy, batch_size=256)
accuracies_test = evaluation.evaluate(session, test_tensors, placeholders, accuracy, batch_size=256)
accuracies_test2 = None
if test2_tensors is not None:
accuracies_test2 = evaluation.evaluate(session, test2_tensors, placeholders, accuracy,
batch_size=256)
logger.info("Epoch {0}\tBatch {1}\tValidation Accuracy: {2}, Test Accuracy: {3}"
.format(epoch, batch_idx, util.stats(accuracies_valid), util.stats(accuracies_test)))
if accuracies_test2 is not None:
logger.info("Epoch {0}\tBatch {1}\tValidation Accuracy: {2}, Test2 Accuracy: {3}"
.format(epoch, batch_idx, util.stats(accuracies_valid),
util.stats(accuracies_test2)))
if best_valid_accuracy is None or best_valid_accuracy < np.mean(accuracies_valid):
best_valid_accuracy = np.mean(accuracies_valid)
logger.info("Epoch {0}\tBatch {1}\tBest Validation Accuracy: {2}, Test Accuracy: {3}"
.format(epoch, batch_idx, util.stats(accuracies_valid), util.stats(accuracies_test)))
if accuracies_test2 is not None:
logger.info("Epoch {0}\tBatch {1}\tBest Validation Accuracy: {2}, Test2 Accuracy: {3}"
.format(epoch, batch_idx, util.stats(accuracies_valid),
util.stats(accuracies_test2)))
save_model(save_path, saver, session, index_to_token)
# End of epoch statistics
accuracies_valid = evaluation.evaluate(session, valid_tensors, placeholders, accuracy, batch_size=256)
accuracies_test = evaluation.evaluate(session, test_tensors, placeholders, accuracy, batch_size=256)
accuracies_test2 = None
if test2_tensors is not None:
accuracies_test2 = evaluation.evaluate(session, test2_tensors, placeholders, accuracy, batch_size=256)
logger.info("Epoch {0}\tValidation Accuracy: {1}, Test Accuracy: {2}"
.format(epoch, util.stats(accuracies_valid), util.stats(accuracies_test)))
if accuracies_test2 is not None:
logger.info("Epoch {0}\tValidation Accuracy: {1}, Test2 Accuracy: {2}"
.format(epoch, util.stats(accuracies_valid), util.stats(accuracies_test2)))
if best_valid_accuracy is None or best_valid_accuracy < np.mean(accuracies_valid):
best_valid_accuracy = np.mean(accuracies_valid)
logger.info("Epoch {0}\tBest Validation Accuracy: {1}, Test Accuracy: {2}"
.format(epoch, util.stats(accuracies_valid), util.stats(accuracies_test)))
if accuracies_test2 is not None:
logger.info("Epoch {0}\tBest Validation Accuracy: {1}, Test2 Accuracy: {2}"
.format(epoch, util.stats(accuracies_valid), util.stats(accuracies_test2)))
save_model(save_path, saver, session, index_to_token)
logger.info('Training finished.')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| [
"tensorflow.contrib.layers.xavier_initializer",
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"nnli.generators.InstanceGenerator",
"nnli.util.semi_sort",
"nnli.regularizers.neutral_acl",
"nnli.regularizers.entailment_reflexive_acl",
"tensorflow.variables_initializer",
"nnli.tfutil... | [((648, 677), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (664, 677), False, 'import os\n'), ((1232, 1336), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Regularising RTE/NLI models via Adversarial Training"""'], {'formatter_class': 'fmt'}), "('Regularising RTE/NLI models via Adversarial Training',\n formatter_class=fmt)\n", (1255, 1336), False, 'import argparse\n'), ((4933, 4953), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4947, 4953), True, 'import numpy as np\n'), ((4963, 4990), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4984, 4990), True, 'import numpy as np\n'), ((4995, 5019), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (5013, 5019), True, 'import tensorflow as tf\n'), ((5070, 5076), 'nnli.parser.SNLI', 'SNLI', ([], {}), '()\n', (5074, 5076), False, 'from nnli.parser import SNLI\n'), ((7573, 7646), 'nnli.util.to_tensors', 'util.to_tensors', (['train_is', 'token_to_index', 'label_to_index'], {}), '(train_is, token_to_index, label_to_index, **token_kwargs)\n', (7588, 7646), False, 'from nnli import util\n'), ((7667, 7740), 'nnli.util.to_tensors', 'util.to_tensors', (['valid_is', 'token_to_index', 'label_to_index'], {}), '(valid_is, token_to_index, label_to_index, **token_kwargs)\n', (7682, 7740), False, 'from nnli import util\n'), ((7760, 7832), 'nnli.util.to_tensors', 'util.to_tensors', (['test_is', 'token_to_index', 'label_to_index'], {}), '(test_is, token_to_index, label_to_index, **token_kwargs)\n', (7775, 7832), False, 'from nnli import util\n'), ((8268, 8336), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""sequence1"""'}), "(dtype=tf.int32, shape=[None, None], name='sequence1')\n", (8282, 8336), True, 'import tensorflow as tf\n'), ((8360, 8429), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""sequence1_length"""'}), "(dtype=tf.int32, shape=[None], name='sequence1_length')\n", (8374, 8429), True, 'import tensorflow as tf\n'), ((8450, 8518), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""sequence2"""'}), "(dtype=tf.int32, shape=[None, None], name='sequence2')\n", (8464, 8518), True, 'import tensorflow as tf\n'), ((8542, 8611), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""sequence2_length"""'}), "(dtype=tf.int32, shape=[None], name='sequence2_length')\n", (8556, 8611), True, 'import tensorflow as tf\n'), ((8628, 8686), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""label"""'}), "(dtype=tf.int32, shape=[None], name='label')\n", (8642, 8686), True, 'import tensorflow as tf\n'), ((8714, 8766), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""dropout_keep_prob"""'}), "(tf.float32, name='dropout_keep_prob')\n", (8728, 8766), True, 'import tensorflow as tf\n'), ((9234, 9286), 'nnli.tfutil.clip_sentence', 'tfutil.clip_sentence', (['sequence1_ph', 'sequence1_len_ph'], {}), '(sequence1_ph, sequence1_len_ph)\n', (9254, 9286), False, 'from nnli import tfutil\n'), ((9311, 9363), 'nnli.tfutil.clip_sentence', 'tfutil.clip_sentence', (['sequence2_ph', 'sequence2_len_ph'], {}), '(sequence2_ph, sequence2_len_ph)\n', (9331, 9363), False, 'from nnli import tfutil\n'), ((12925, 12980), 'nnli.tfutil.get_variables_in_scope', 'tfutil.get_variables_in_scope', (['discriminator_scope_name'], {}), '(discriminator_scope_name)\n', (12954, 12980), False, 'from nnli import tfutil\n'), ((13002, 13053), 'nnli.tfutil.get_variables_in_scope', 'tfutil.get_variables_in_scope', (['adversary_scope_name'], {}), '(adversary_scope_name)\n', (13031, 13053), False, 'from nnli import tfutil\n'), ((13083, 13127), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['discriminator_vars'], {}), '(discriminator_vars)\n', (13107, 13127), True, 'import tensorflow as tf\n'), ((13152, 13192), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['adversary_vars'], {}), '(adversary_vars)\n', (13176, 13192), True, 'import tensorflow as tf\n'), ((13751, 13816), 'nnli.tfutil.get_variables_in_scope', 'tfutil.get_variables_in_scope', (['discriminator_optimizer_scope_name'], {}), '(discriminator_optimizer_scope_name)\n', (13780, 13816), False, 'from nnli import tfutil\n'), ((13855, 13909), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['discriminator_optimizer_vars'], {}), '(discriminator_optimizer_vars)\n', (13879, 13909), True, 'import tensorflow as tf\n'), ((14330, 14391), 'nnli.tfutil.get_variables_in_scope', 'tfutil.get_variables_in_scope', (['adversary_optimizer_scope_name'], {}), '(adversary_optimizer_scope_name)\n', (14359, 14391), False, 'from nnli import tfutil\n'), ((14426, 14476), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['adversary_optimizer_vars'], {}), '(adversary_optimizer_vars)\n', (14450, 14476), True, 'import tensorflow as tf\n'), ((14501, 14531), 'tensorflow.cast', 'tf.cast', (['predictions', 'tf.int32'], {}), '(predictions, tf.int32)\n', (14508, 14531), True, 'import tensorflow as tf\n'), ((14549, 14576), 'tensorflow.cast', 'tf.cast', (['label_ph', 'tf.int32'], {}), '(label_ph, tf.int32)\n', (14556, 14576), True, 'import tensorflow as tf\n'), ((14668, 14753), 'tensorflow.train.Saver', 'tf.train.Saver', (['(discriminator_vars + discriminator_optimizer_vars)'], {'max_to_keep': '(1)'}), '(discriminator_vars + discriminator_optimizer_vars, max_to_keep=1\n )\n', (14682, 14753), True, 'import tensorflow as tf\n'), ((14771, 14787), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (14785, 14787), True, 'import tensorflow as tf\n'), ((14999, 15047), 'nnli.generators.InstanceGenerator', 'InstanceGenerator', ([], {'token_to_index': 'token_to_index'}), '(token_to_index=token_to_index)\n', (15016, 15047), False, 'from nnli.generators import InstanceGenerator\n'), ((23553, 23592), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (23572, 23592), False, 'import logging\n'), ((1152, 1214), 'argparse.HelpFormatter', 'argparse.HelpFormatter', (['prog'], {'max_help_position': '(100)', 'width': '(200)'}), '(prog, max_help_position=100, width=200)\n', (1174, 1214), False, 'import argparse\n'), ((6191, 6209), 'collections.Counter', 'Counter', (['token_lst'], {}), '(token_lst)\n', (6198, 6209), False, 'from collections import Counter\n'), ((7912, 7985), 'nnli.util.to_tensors', 'util.to_tensors', (['test2_is', 'token_to_index', 'label_to_index'], {}), '(test2_is, token_to_index, label_to_index, **token_kwargs)\n', (7927, 7985), False, 'from nnli import util\n'), ((9508, 9547), 'tensorflow.variable_scope', 'tf.variable_scope', (['adversary_scope_name'], {}), '(adversary_scope_name)\n', (9525, 9547), True, 'import tensorflow as tf\n'), ((10075, 10118), 'tensorflow.variable_scope', 'tf.variable_scope', (['discriminator_scope_name'], {}), '(discriminator_scope_name)\n', (10092, 10118), True, 'import tensorflow as tf\n'), ((10154, 10328), 'nnli.embeddings.embedding_matrix', 'E.embedding_matrix', ([], {'nb_tokens': 'vocab_size', 'embedding_size': 'embedding_size', 'token_to_index': 'token_to_index', 'glove_path': 'glove_path', 'unit_norm': '(True)', 'rs': 'rs', 'dtype': 'np.float32'}), '(nb_tokens=vocab_size, embedding_size=embedding_size,\n token_to_index=token_to_index, glove_path=glove_path, unit_norm=True,\n rs=rs, dtype=np.float32)\n', (10172, 10328), True, 'from nnli import embeddings as E\n'), ((10819, 10877), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_layer', 'clipped_sequence1'], {}), '(embedding_layer, clipped_sequence1)\n', (10841, 10877), True, 'import tensorflow as tf\n'), ((10908, 10966), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_layer', 'clipped_sequence2'], {}), '(embedding_layer, clipped_sequence2)\n', (10930, 10966), True, 'import tensorflow as tf\n'), ((11401, 11446), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(1)', 'name': '"""predictions"""'}), "(logits, axis=1, name='predictions')\n", (11410, 11446), True, 'import tensorflow as tf\n'), ((11465, 11543), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'label_ph'}), '(logits=logits, labels=label_ph)\n', (11511, 11543), True, 'import tensorflow as tf\n'), ((11559, 11581), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (11573, 11581), True, 'import tensorflow as tf\n'), ((13389, 13442), 'tensorflow.variable_scope', 'tf.variable_scope', (['discriminator_optimizer_scope_name'], {}), '(discriminator_optimizer_scope_name)\n', (13406, 13442), True, 'import tensorflow as tf\n'), ((13980, 14033), 'tensorflow.variable_scope', 'tf.variable_scope', (['discriminator_optimizer_scope_name'], {}), '(discriminator_optimizer_scope_name)\n', (13997, 14033), True, 'import tensorflow as tf\n'), ((14600, 14641), 'tensorflow.equal', 'tf.equal', ([], {'x': 'predictions_int', 'y': 'labels_int'}), '(x=predictions_int, y=labels_int)\n', (14608, 14641), True, 'import tensorflow as tf\n'), ((14898, 14952), 'nnli.samplers.WithoutReplacementSampler', 'WithoutReplacementSampler', ([], {'nb_instances': 'nb_r_instances'}), '(nb_instances=nb_r_instances)\n', (14923, 14952), False, 'from nnli.samplers import WithoutReplacementSampler\n'), ((15058, 15091), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_config'}), '(config=session_config)\n', (15068, 15091), True, 'import tensorflow as tf\n'), ((15854, 15913), 'nnli.util.make_batches', 'util.make_batches', ([], {'size': 'nb_instances', 'batch_size': 'batch_size'}), '(size=nb_instances, batch_size=batch_size)\n', (15871, 15913), False, 'from nnli import util\n'), ((870, 901), 'pickle.dump', 'pickle.dump', (['index_to_token', 'fs'], {}), '(index_to_token, fs)\n', (881, 901), False, 'import pickle\n'), ((6700, 6714), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6711, 6714), False, 'import pickle\n'), ((9950, 9963), 'tensorflow.ones', 'tf.ones', (['[26]'], {}), '([26])\n', (9957, 9963), True, 'import tensorflow as tf\n'), ((9998, 10011), 'tensorflow.ones', 'tf.ones', (['[26]'], {}), '([26])\n', (10005, 10011), True, 'import tensorflow as tf\n'), ((12124, 12167), 'tensorflow.variable_scope', 'tf.variable_scope', (['discriminator_scope_name'], {}), '(discriminator_scope_name)\n', (12141, 12167), True, 'import tensorflow as tf\n'), ((13600, 13645), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'clip_value'], {}), '(gradients, clip_value)\n', (13622, 13645), True, 'import tensorflow as tf\n'), ((14173, 14218), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'clip_value'], {}), '(gradients, clip_value)\n', (14195, 14218), True, 'import tensorflow as tf\n'), ((16415, 16469), 'nnli.util.semi_sort', 'util.semi_sort', (['shuf_sequence1_len', 'shuf_sequence2_len'], {}), '(shuf_sequence1_len, shuf_sequence2_len)\n', (16429, 16469), False, 'from nnli import util\n'), ((21988, 22075), 'nnli.evaluation.evaluate', 'evaluation.evaluate', (['session', 'valid_tensors', 'placeholders', 'accuracy'], {'batch_size': '(256)'}), '(session, valid_tensors, placeholders, accuracy,\n batch_size=256)\n', (22007, 22075), False, 'from nnli import evaluation\n'), ((22102, 22188), 'nnli.evaluation.evaluate', 'evaluation.evaluate', (['session', 'test_tensors', 'placeholders', 'accuracy'], {'batch_size': '(256)'}), '(session, test_tensors, placeholders, accuracy,\n batch_size=256)\n', (22121, 22188), False, 'from nnli import evaluation\n'), ((9695, 9733), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (9731, 9733), True, 'import tensorflow as tf\n'), ((9881, 9919), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (9917, 9919), True, 'import tensorflow as tf\n'), ((10692, 10727), 'tensorflow.constant', 'tf.constant', (['embedding_matrix_value'], {}), '(embedding_matrix_value)\n', (10703, 10727), True, 'import tensorflow as tf\n'), ((12223, 12266), 'nnli.regularizers.contradiction_acl', 'R.contradiction_acl', ([], {'is_bi': '(True)'}), '(is_bi=True, **r_kwargs)\n', (12242, 12266), True, 'import nnli.regularizers as R\n'), ((12366, 12406), 'nnli.regularizers.entailment_acl', 'R.entailment_acl', ([], {'is_bi': '(True)'}), '(is_bi=True, **r_kwargs)\n', (12382, 12406), True, 'import nnli.regularizers as R\n'), ((12506, 12543), 'nnli.regularizers.neutral_acl', 'R.neutral_acl', ([], {'is_bi': '(True)'}), '(is_bi=True, **r_kwargs)\n', (12519, 12543), True, 'import nnli.regularizers as R\n'), ((12643, 12681), 'nnli.regularizers.entailment_reflexive_acl', 'R.entailment_reflexive_acl', ([], {}), '(**r_kwargs)\n', (12669, 12681), True, 'import nnli.regularizers as R\n'), ((12781, 12829), 'nnli.regularizers.entailment_neutral_acl', 'R.entailment_neutral_acl', ([], {'is_bi': '(True)'}), '(is_bi=True, **r_kwargs)\n', (12805, 12829), True, 'import nnli.regularizers as R\n'), ((15175, 15210), 'nnli.tfutil.count_trainable_parameters', 'tfutil.count_trainable_parameters', ([], {}), '()\n', (15208, 15210), False, 'from nnli import tfutil\n'), ((15298, 15360), 'nnli.tfutil.count_trainable_parameters', 'tfutil.count_trainable_parameters', ([], {'var_list': 'discriminator_vars'}), '(var_list=discriminator_vars)\n', (15331, 15360), False, 'from nnli import tfutil\n'), ((15458, 15530), 'nnli.tfutil.count_trainable_parameters', 'tfutil.count_trainable_parameters', ([], {'var_list': 'trainable_discriminator_vars'}), '(var_list=trainable_discriminator_vars)\n', (15491, 15530), False, 'from nnli import tfutil\n'), ((17259, 17286), 'numpy.max', 'np.max', (['batch_sequence1_len'], {}), '(batch_sequence1_len)\n', (17265, 17286), True, 'import numpy as np\n'), ((17321, 17348), 'numpy.max', 'np.max', (['batch_sequence2_len'], {}), '(batch_sequence2_len)\n', (17327, 17348), True, 'import numpy as np\n'), ((22299, 22386), 'nnli.evaluation.evaluate', 'evaluation.evaluate', (['session', 'test2_tensors', 'placeholders', 'accuracy'], {'batch_size': '(256)'}), '(session, test2_tensors, placeholders, accuracy,\n batch_size=256)\n', (22318, 22386), False, 'from nnli import evaluation\n'), ((22936, 22961), 'numpy.mean', 'np.mean', (['accuracies_valid'], {}), '(accuracies_valid)\n', (22943, 22961), True, 'import numpy as np\n'), ((18873, 18949), 'nnli.util.to_tensors', 'util.to_tensors', (['r_instances', 'token_to_index', 'label_to_index'], {}), '(r_instances, token_to_index, label_to_index, **token_kwargs)\n', (18888, 18949), False, 'from nnli import util\n'), ((20046, 20133), 'nnli.evaluation.evaluate', 'evaluation.evaluate', (['session', 'valid_tensors', 'placeholders', 'accuracy'], {'batch_size': '(256)'}), '(session, valid_tensors, placeholders, accuracy,\n batch_size=256)\n', (20065, 20133), False, 'from nnli import evaluation\n'), ((20168, 20254), 'nnli.evaluation.evaluate', 'evaluation.evaluate', (['session', 'test_tensors', 'placeholders', 'accuracy'], {'batch_size': '(256)'}), '(session, test_tensors, placeholders, accuracy,\n batch_size=256)\n', (20187, 20254), False, 'from nnli import evaluation\n'), ((22505, 22533), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (22515, 22533), False, 'from nnli import util\n'), ((22535, 22562), 'nnli.util.stats', 'util.stats', (['accuracies_test'], {}), '(accuracies_test)\n', (22545, 22562), False, 'from nnli import util\n'), ((22871, 22896), 'numpy.mean', 'np.mean', (['accuracies_valid'], {}), '(accuracies_valid)\n', (22878, 22896), True, 'import numpy as np\n'), ((20389, 20476), 'nnli.evaluation.evaluate', 'evaluation.evaluate', (['session', 'test2_tensors', 'placeholders', 'accuracy'], {'batch_size': '(256)'}), '(session, test2_tensors, placeholders, accuracy,\n batch_size=256)\n', (20408, 20476), False, 'from nnli import evaluation\n'), ((21233, 21258), 'numpy.mean', 'np.mean', (['accuracies_valid'], {}), '(accuracies_valid)\n', (21240, 21258), True, 'import numpy as np\n'), ((22741, 22769), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (22751, 22769), False, 'from nnli import util\n'), ((22771, 22799), 'nnli.util.stats', 'util.stats', (['accuracies_test2'], {}), '(accuracies_test2)\n', (22781, 22799), False, 'from nnli import util\n'), ((23096, 23124), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (23106, 23124), False, 'from nnli import util\n'), ((23126, 23153), 'nnli.util.stats', 'util.stats', (['accuracies_test'], {}), '(accuracies_test)\n', (23136, 23153), False, 'from nnli import util\n'), ((19784, 19807), 'nnli.util.stats', 'util.stats', (['loss_values'], {}), '(loss_values)\n', (19794, 19807), False, 'from nnli import util\n'), ((20696, 20724), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (20706, 20724), False, 'from nnli import util\n'), ((20726, 20753), 'nnli.util.stats', 'util.stats', (['accuracies_test'], {}), '(accuracies_test)\n', (20736, 20753), False, 'from nnli import util\n'), ((21160, 21185), 'numpy.mean', 'np.mean', (['accuracies_valid'], {}), '(accuracies_valid)\n', (21167, 21185), True, 'import numpy as np\n'), ((23349, 23377), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (23359, 23377), False, 'from nnli import util\n'), ((23379, 23407), 'nnli.util.stats', 'util.stats', (['accuracies_test2'], {}), '(accuracies_test2)\n', (23389, 23407), False, 'from nnli import util\n'), ((20978, 21006), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (20988, 21006), False, 'from nnli import util\n'), ((21052, 21080), 'nnli.util.stats', 'util.stats', (['accuracies_test2'], {}), '(accuracies_test2)\n', (21062, 21080), False, 'from nnli import util\n'), ((21431, 21459), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (21441, 21459), False, 'from nnli import util\n'), ((21461, 21488), 'nnli.util.stats', 'util.stats', (['accuracies_test'], {}), '(accuracies_test)\n', (21471, 21488), False, 'from nnli import util\n'), ((21730, 21758), 'nnli.util.stats', 'util.stats', (['accuracies_valid'], {}), '(accuracies_valid)\n', (21740, 21758), False, 'from nnli import util\n'), ((21808, 21836), 'nnli.util.stats', 'util.stats', (['accuracies_test2'], {}), '(accuracies_test2)\n', (21818, 21836), False, 'from nnli import util\n')] |
import cv2
import numpy as np
print("OpenCV Version:", cv2.__version__)
img = cv2.imread("images/color-paint.jpg")
# Get the size of the image
print(img.shape)
# Define Corners
width, height = 250, 350
pts1 = np.float32([[111,219],[287,188],[152,482],[352,440]])
pts2 = np.float32([[0,0],[width,0],[0,height],[width,height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgOutput = cv2.warpPerspective(img, matrix, (width, height))
cv2.imshow("Original Image", img)
cv2.imshow("Perspective Transform", imgOutput)
# Wait 5 secs. then close window
# cv2.waitKey(5000)
# Close window when key is pressed
cv2.waitKey(0) | [
"cv2.warpPerspective",
"cv2.getPerspectiveTransform",
"cv2.waitKey",
"numpy.float32",
"cv2.imread",
"cv2.imshow"
] | [((80, 116), 'cv2.imread', 'cv2.imread', (['"""images/color-paint.jpg"""'], {}), "('images/color-paint.jpg')\n", (90, 116), False, 'import cv2\n'), ((213, 273), 'numpy.float32', 'np.float32', (['[[111, 219], [287, 188], [152, 482], [352, 440]]'], {}), '([[111, 219], [287, 188], [152, 482], [352, 440]])\n', (223, 273), True, 'import numpy as np\n'), ((274, 336), 'numpy.float32', 'np.float32', (['[[0, 0], [width, 0], [0, height], [width, height]]'], {}), '([[0, 0], [width, 0], [0, height], [width, height]])\n', (284, 336), True, 'import numpy as np\n'), ((339, 378), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (366, 378), False, 'import cv2\n'), ((391, 440), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'matrix', '(width, height)'], {}), '(img, matrix, (width, height))\n', (410, 440), False, 'import cv2\n'), ((442, 475), 'cv2.imshow', 'cv2.imshow', (['"""Original Image"""', 'img'], {}), "('Original Image', img)\n", (452, 475), False, 'import cv2\n'), ((476, 522), 'cv2.imshow', 'cv2.imshow', (['"""Perspective Transform"""', 'imgOutput'], {}), "('Perspective Transform', imgOutput)\n", (486, 522), False, 'import cv2\n'), ((613, 627), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (624, 627), False, 'import cv2\n')] |
import argparse
import h5py
import numpy as np
import grsn
def load_all_data(input_hdf):
print("Loading data: ", args.input_hdf)
with h5py.File(args.input_hdf, "r") as f_in:
ctypes = [k for k in f_in.keys() if k != "index"]
patients_ls = [f_in[ct]['columns'][:] for ct in ctypes]
n_patients = sum([len(p) for p in patients_ls])
idx = f_in['index'][:]
combined = np.empty((idx.size, n_patients))
leading = 0
lagging = 0
for pat, ct in zip(patients_ls, ctypes):
leading += len(pat)
combined[:, lagging:leading] = f_in[ct]['data'][:]
lagging = leading
return combined, ctypes, patients_ls, idx
def dump_data(arr, ctypes, patients, idx, out_hdf):
print("Saving results: ", out_hdf)
with h5py.File(out_hdf, "w") as f_out:
# Store the feature set
rows = f_out.create_dataset("index", shape=idx.shape,
dtype=h5py.string_dtype('utf-8'))
rows[:] = idx
leading = 0
lagging = 0
# For each cancer type:
for ct, pat in zip(ctypes, patients):
leading += len(pat)
# Store the data values
dset = f_out.create_dataset(ct+"/data",
shape=(arr.shape[0], len(pat)))
dset[:] = arr[:, lagging:leading]
# Store the columns
columns = f_out.create_dataset(ct+"/columns",
shape=(len(pat),),
dtype=h5py.string_dtype('utf-8'))
columns[:] = pat
lagging = leading
return
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_hdf", help="path to the input HDF file")
parser.add_argument("output_hdf", help="path to the output HDF file")
parser.add_argument("--set-size", help="size of the rank-invariant set", type=int, default=100)
parser.add_argument("--iterations", help="number of iterations", type=int, default=4)
args = parser.parse_args()
arr, ctypes, patients, idx = load_all_data(args.input_hdf)
print("Performing GSRN")
result = grsn.grsn(arr, args.set_size, args.iterations)
dump_data(result, ctypes, patients, idx, args.output_hdf)
| [
"h5py.File",
"argparse.ArgumentParser",
"grsn.grsn",
"numpy.empty",
"h5py.string_dtype"
] | [((1752, 1777), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1775, 1777), False, 'import argparse\n'), ((2253, 2299), 'grsn.grsn', 'grsn.grsn', (['arr', 'args.set_size', 'args.iterations'], {}), '(arr, args.set_size, args.iterations)\n', (2262, 2299), False, 'import grsn\n'), ((146, 176), 'h5py.File', 'h5py.File', (['args.input_hdf', '"""r"""'], {}), "(args.input_hdf, 'r')\n", (155, 176), False, 'import h5py\n'), ((433, 465), 'numpy.empty', 'np.empty', (['(idx.size, n_patients)'], {}), '((idx.size, n_patients))\n', (441, 465), True, 'import numpy as np\n'), ((849, 872), 'h5py.File', 'h5py.File', (['out_hdf', '"""w"""'], {}), "(out_hdf, 'w')\n", (858, 872), False, 'import h5py\n'), ((1021, 1047), 'h5py.string_dtype', 'h5py.string_dtype', (['"""utf-8"""'], {}), "('utf-8')\n", (1038, 1047), False, 'import h5py\n'), ((1611, 1637), 'h5py.string_dtype', 'h5py.string_dtype', (['"""utf-8"""'], {}), "('utf-8')\n", (1628, 1637), False, 'import h5py\n')] |
# Based on the code from: https://github.com/tkipf/keras-gcn
import tensorflow as tf
from tensorflow.keras import activations, initializers, constraints
from tensorflow.keras import regularizers
import tensorflow.keras.backend as K
import scipy.sparse as sp
import numpy as np
import pickle, copy
class GCN(tf.keras.Model):
def __init__(self, nhid, nclass, epochs, train_ratio, eval_ratio,
sparse_input=True, early_stopping=True, dropout=0.5, nlayer=2, feature_columns=None,
id_col='id', feature_col='features', from_node_col='from_node_id', to_node_col='to_node_id'):
"""
Implementation of GCN in this paper: https://arxiv.org/pdf/1609.02907.pdf. The original tensorflow implementation
is accessible here: https://github.com/tkipf/gcn, and one can find more information about GCN through:
http://tkipf.github.io/graph-convolutional-networks/.
:param nhid: Number of hidden units for GCN.
type nhid: int.
:param nclass: Number of classes in total which will be the output dimension.
type nclass: int.
:param epochs: Number of epochs for the model to be trained.
type epochs: int.
:param train_ratio: Percentage of data points to be used for training.
type train_ratio: float.
:param eval_ratio: Percentage of data points to be used for evaluating.
type eval_ratio: float.
:param early_stopping: Whether to use early stopping trick during the training phase.
type early_stopping: bool.
:param dropout: The rate for dropout.
type dropout: float.
:param nlayer: Number of GCNLayer to be used in the model.
type nlayer: int.
:param feature_columns: a list of tf.feature_column. (Not used in this model)
type feature_columns: list.
:param id_col: Name for the column in database to be used as the id of each node.
type id_col: string.
:param feature_col: Name for the column in database to be used as the features of each node.
type feature_col: string.
:param from_node_col: Name for the column in database to be used as the from_node id of each edge.
type from_node_col: string.
:param to_node_col: Name for the column in database to be used as the to_node id of each edge.
type to_node_col: string.
"""
super(GCN, self).__init__()
assert dropout < 1 and dropout > 0, "Please make sure dropout rate is a float between 0 and 1."
assert train_ratio < 1 and train_ratio > 0, "Please make sure train_ratio is a float between 0 and 1."
assert eval_ratio < 1 and eval_ratio > 0, "Please make sure eval_ratio is a float between 0 and 1."
self.gc_layers = list()
self.gc_layers.append(GCNLayer(nhid, kernel_regularizer=tf.keras.regularizers.l2(5e-4), sparse_input=sparse_input))
for i in range(nlayer-1):
self.gc_layers.append(GCNLayer(nhid, kernel_regularizer=tf.keras.regularizers.l2(5e-4)))
self.gc_layers.append(GCNLayer(nclass))
self.keep_prob = 1 - dropout
self.dropout = tf.keras.layers.Dropout(dropout)
self.nshape = None
self.train_ratio = train_ratio
self.eval_ratio = eval_ratio
self.nlayer = nlayer
self.epochs = epochs
self.early_stopping = early_stopping
self.sparse_input = sparse_input
self.id_col = id_col
self.feature_col = feature_col
self.from_node_col = from_node_col
self.to_node_col = to_node_col
# try to load the result file
try:
with open('./results.pkl', 'rb') as f:
self.results = pickle.load(f)
except (FileNotFoundError, IOError):
self.results = None
def call(self, data):
x, adj = data
assert self.nshape is not None, "Should calculate the shape of input by preprocessing the data with model.preprocess(data)."
if self.sparse_input:
x = GCN.sparse_dropout(x, self.keep_prob, self.nshape)
else:
x = self.dropout(x)
for i in range(self.nlayer-1):
x = tf.keras.activations.relu(self.gc_layers[i](x, adj))
x = self.dropout(x)
x = self.gc_layers[-1](x, adj)
return tf.keras.activations.softmax(x)
def evaluate(self, data, y, sample_weight):
"""Function to evaluate the model."""
return self.test(sample_weight, return_loss=True)
def predict(self, data):
"""Function to predict labels with the model."""
x, adj = data
for i in range(self.nlayer-1):
x = tf.keras.activations.relu(self.gc_layers[i](x, adj))
x = self.gc_layers[-1](x, adj)
return tf.keras.activations.softmax(x)
@staticmethod
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random.uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse.retain(x, dropout_mask)
return pre_out * (1./keep_prob)
@staticmethod
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)
return labels_onehot
@staticmethod
def normalize_adj(adjacency, symmetric=True):
"""
Function to normalize the adjacency matrix (get the laplacian matrix).
:param adjacency: Adjacency matrix of the dataset.
type adjacency: Scipy COO_Matrix.
:param symmetric: Boolean variable to determine whether to use symmetric laplacian.
type symmetric: bool.
"""
adjacency += sp.eye(adjacency.shape[0])
if symmetric:
"""L=D^-0.5 * (A+I) * D^-0.5"""
d = sp.diags(np.power(np.array(adjacency.sum(1)), -0.5).flatten(), 0)
a_norm = adjacency.dot(d).transpose().dot(d).tocoo()
else:
"""L=D^-1 * (A+I)"""
d = sp.diags(np.power(np.array(adjacency.sum(1)), -1).flatten(), 0)
a_norm = d.dot(adjacency).tocoo()
return a_norm
@staticmethod
def normalize_feature(features, sparse_input):
"""Function to row-normalize the features input."""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
if sparse_input:
return sp.csr_matrix(features).tocoo()
else:
return features
def preprocess(self, ids, features, labels, edges):
"""Function to preprocess the node features and adjacency matrix."""
if len(features.shape) > 2:
features = np.squeeze(features)
if len(edges.shape) > 2:
edges = np.squeeze(edges)
# sort the data in the correct order
idx = np.argsort(np.array(ids))
features = features[idx]
labels = labels[idx]
# preprocess
features = GCN.normalize_feature(features, self.sparse_input)
labels = GCN.encode_onehot(labels)
adjacency = sp.coo_matrix((np.ones(len(edges)),
(edges[:, 0], edges[:, 1])),
shape=(features.shape[0], features.shape[0]), dtype="float32")
adjacency = adjacency + adjacency.T.multiply(adjacency.T > adjacency) - adjacency.multiply(adjacency.T > adjacency)
adjacency = GCN.normalize_adj(adjacency, symmetric=True)
nf_shape = features.data.shape
na_shape = adjacency.data.shape
if self.sparse_input:
features = tf.SparseTensor(
indices=np.array(list(zip(features.row, features.col)), dtype=np.int64),
values=tf.cast(features.data, tf.float32),
dense_shape=features.shape)
features = tf.sparse.reorder(features)
adjacency = tf.SparseTensor(
indices=np.array(list(zip(adjacency.row, adjacency.col)), dtype=np.int64),
values=tf.cast(adjacency.data, tf.float32),
dense_shape=adjacency.shape)
adjacency = tf.sparse.reorder(adjacency)
total_num = features.shape[0]
train_num = round(total_num*self.train_ratio)
eval_num = round(total_num*self.eval_ratio)
train_index = np.arange(train_num)
val_index = np.arange(train_num, train_num+eval_num)
test_index = np.arange(train_num+eval_num, total_num)
self.train_mask = np.zeros(total_num, dtype = np.bool)
self.val_mask = np.zeros(total_num, dtype = np.bool)
self.test_mask = np.zeros(total_num, dtype = np.bool)
self.train_mask[train_index] = True
self.val_mask[val_index] = True
self.test_mask[test_index] = True
print('Dataset has {} nodes, {} edges, {} features.'.format(features.shape[0], edges.shape[0], features.shape[1]))
return features, labels, adjacency, nf_shape, na_shape
def loss_func(self, model, x, y, train_mask, training=True):
'''Customed loss function for the model.'''
y_ = model(x, training=training)
test_mask_logits = tf.gather_nd(y_, tf.where(train_mask))
masked_labels = tf.gather_nd(y, tf.where(train_mask))
return loss(labels=masked_labels, output=test_mask_logits)
def grad(self, model, inputs, targets, train_mask):
'''Calculate the gradients of the parameters.'''
with tf.GradientTape() as tape:
loss_value = self.loss_func(model, inputs, targets, train_mask)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def test(self, mask, return_loss=False):
'''Test the results on the model. Return accuracy'''
logits = self.predict(data=[self.features, self.adjacency])
test_mask_logits = tf.gather_nd(logits, tf.where(mask))
masked_labels = tf.gather_nd(self.labels, tf.where(mask))
ll = tf.equal(tf.argmax(masked_labels, -1), tf.argmax(test_mask_logits, -1))
accuracy = tf.reduce_mean(tf.cast(ll, dtype=tf.float32))
if return_loss:
loss_value = loss(labels=masked_labels, output=test_mask_logits)
return [loss_value, accuracy]
return accuracy
def sqlflow_train_loop(self, x):
"""Customized training function."""
# load data
ids, ids_check, features, labels, edges, edge_check = list(), dict(), list(), list(), list(), dict()
from_node = 0
for inputs, label in x:
id = inputs[self.id_col].numpy().astype(np.int32)
feature = inputs[self.feature_col].numpy().astype(np.float32)
from_node = inputs[self.from_node_col].numpy().astype(np.int32)
to_node = inputs[self.to_node_col].numpy().astype(np.int32)
if int(id) not in ids_check:
ids.append(int(id))
features.append(feature)
labels.append(label.numpy()[0])
ids_check[int(id)] = 0
if tuple([int(from_node), int(to_node)]) not in edge_check:
edge_check[tuple([int(from_node), int(to_node)])] = 0
edges.append([from_node, to_node])
features = np.stack(features)
labels = np.stack(labels)
edges = np.stack(edges)
self.features, self.labels, self.adjacency, self.nshape, na_shape = self.preprocess(ids, features, labels, edges)
# training the model
wait = 0
best_acc = -9999999
PATIENCE = 10
for epoch in range(self.epochs):
# calculate the gradients and take the step
loss_value, grads = self.grad(self, [self.features, self.adjacency], self.labels, self.train_mask)
optimizer().apply_gradients(zip(grads, self.trainable_variables))
# Test on train and evaluate dataset
train_acc = self.test(self.train_mask)
val_acc = self.test(self.val_mask)
print("Epoch {} loss={:6f} accuracy={:6f} val_acc={:6f}".format(epoch, loss_value, train_acc, val_acc))
# early stopping
if epoch > 50 and self.early_stopping:
if float(val_acc.numpy()) > best_acc:
best_acc = float(val_acc.numpy())
wait = 0
else:
if wait >= PATIENCE:
print('Epoch {}: early stopping'.format(epoch))
break
wait += 1
# evaluate the model
result = self.evaluate(data=[self.features, self.adjacency], y=self.labels, sample_weight=self.val_mask)
# get all the results
predicted = self.predict([self.features, self.adjacency])
# store the results in a pickled file
with open('./results.pkl', 'wb') as f:
results = dict()
for i in range(len(ids)):
results[str(ids[i])] = predicted[i]
results['evaluation'] = result
pickle.dump(results, f)
self.results = results
def sqlflow_evaluate_loop(self, x, metric_names):
"""Customed evaluation, can only support calculating the accuracy."""
assert self.results is not None, "Please make sure to train the model first."
eval_result = self.results['evaluation']
return eval_result
def sqlflow_predict_one(self, sample):
"""Customed prediction, sample must be the node id."""
assert self.results is not None, "Please make sure to train the model first."
prediction = self.results[str(int(sample))]
return [prediction]
def optimizer():
"""Default optimizer name. Used in model.compile."""
return tf.keras.optimizers.Adam(lr=0.01)
def loss(labels, output):
"""Default loss function for classification task."""
criterion = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
return criterion(y_true=labels, y_pred=output)
# Graph Convolutional Layer
class GCNLayer(tf.keras.layers.Layer):
def __init__(self, units, use_bias=True, sparse_input=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
"""GCNLayer
Graph Convolutional Networks Layer from paper: https://arxiv.org/pdf/1609.02907.pdf. This is used in the GCN model for
classification task on graph-structured data.
:param units: Number of hidden units for the layer.
type units: int.
:param use_bias: Boolean variable to determine whether to use bias.
type use_bias: bool.
:param sparse_input: Boolean variable to check if input tensor is sparse.
type sparse_input: bool.
:param kernel_initializer: Weight initializer for the GCN kernel.
:param bias_initializer: Weight initializer for the bias.
:param kernel_regularizer: Weight regularizer for the GCN kernel.
:param bias_regularizer: Weight regularizer for the bias.
:param kernel_constraint: Weight value constraint for the GCN kernel.
:param bias_constraint: Weight value constraint for the bias.
:param kwargs:
"""
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GCNLayer, self).__init__(**kwargs)
self.units = units
self.use_bias = use_bias
self.sparse_input = sparse_input
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True)
self.built = True
def call(self, inputs, adj, **kwargs):
assert isinstance(adj, tf.SparseTensor), "Adjacency matrix should be a SparseTensor"
if self.sparse_input:
assert isinstance(inputs, tf.SparseTensor), "Input matrix should be a SparseTensor"
support = tf.sparse.sparse_dense_matmul(inputs, self.kernel)
else:
support = tf.matmul(inputs, self.kernel)
output = tf.sparse.sparse_dense_matmul(adj, support)
if self.use_bias:
output = output + self.bias
else:
output = output
return output
def get_config(self):
config = {'units': self.units,
'use_bias': self.use_bias,
'sparse_input': self.sparse_input,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'bias_initializer': initializers.serialize(
self.bias_initializer),
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(
self.bias_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(GCNLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | [
"pickle.dump",
"tensorflow.sparse.retain",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.floor",
"tensorflow.matmul",
"numpy.arange",
"pickle.load",
"tensorflow.keras.regularizers.serialize",
"tensorflow.keras.initializers.get",
"scipy.sparse.eye",
"tensorflow.keras.regularizers... | [((14074, 14107), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (14098, 14107), True, 'import tensorflow as tf\n'), ((14208, 14266), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (14247, 14266), True, 'import tensorflow as tf\n'), ((3201, 3233), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {}), '(dropout)\n', (3224, 3233), True, 'import tensorflow as tf\n'), ((4376, 4407), 'tensorflow.keras.activations.softmax', 'tf.keras.activations.softmax', (['x'], {}), '(x)\n', (4404, 4407), True, 'import tensorflow as tf\n'), ((4832, 4863), 'tensorflow.keras.activations.softmax', 'tf.keras.activations.softmax', (['x'], {}), '(x)\n', (4860, 4863), True, 'import tensorflow as tf\n'), ((5035, 5065), 'tensorflow.random.uniform', 'tf.random.uniform', (['noise_shape'], {}), '(noise_shape)\n', (5052, 5065), True, 'import tensorflow as tf\n'), ((5155, 5188), 'tensorflow.sparse.retain', 'tf.sparse.retain', (['x', 'dropout_mask'], {}), '(x, dropout_mask)\n', (5171, 5188), True, 'import tensorflow as tf\n'), ((5939, 5965), 'scipy.sparse.eye', 'sp.eye', (['adjacency.shape[0]'], {}), '(adjacency.shape[0])\n', (5945, 5965), True, 'import scipy.sparse as sp\n'), ((6651, 6666), 'scipy.sparse.diags', 'sp.diags', (['r_inv'], {}), '(r_inv)\n', (6659, 6666), True, 'import scipy.sparse as sp\n'), ((8466, 8494), 'tensorflow.sparse.reorder', 'tf.sparse.reorder', (['adjacency'], {}), '(adjacency)\n', (8483, 8494), True, 'import tensorflow as tf\n'), ((8670, 8690), 'numpy.arange', 'np.arange', (['train_num'], {}), '(train_num)\n', (8679, 8690), True, 'import numpy as np\n'), ((8711, 8753), 'numpy.arange', 'np.arange', (['train_num', '(train_num + eval_num)'], {}), '(train_num, train_num + eval_num)\n', (8720, 8753), True, 'import numpy as np\n'), ((8773, 8815), 'numpy.arange', 'np.arange', (['(train_num + eval_num)', 'total_num'], {}), '(train_num + eval_num, total_num)\n', (8782, 8815), True, 'import numpy as np\n'), ((8841, 8875), 'numpy.zeros', 'np.zeros', (['total_num'], {'dtype': 'np.bool'}), '(total_num, dtype=np.bool)\n', (8849, 8875), True, 'import numpy as np\n'), ((8902, 8936), 'numpy.zeros', 'np.zeros', (['total_num'], {'dtype': 'np.bool'}), '(total_num, dtype=np.bool)\n', (8910, 8936), True, 'import numpy as np\n'), ((8964, 8998), 'numpy.zeros', 'np.zeros', (['total_num'], {'dtype': 'np.bool'}), '(total_num, dtype=np.bool)\n', (8972, 8998), True, 'import numpy as np\n'), ((11591, 11609), 'numpy.stack', 'np.stack', (['features'], {}), '(features)\n', (11599, 11609), True, 'import numpy as np\n'), ((11627, 11643), 'numpy.stack', 'np.stack', (['labels'], {}), '(labels)\n', (11635, 11643), True, 'import numpy as np\n'), ((11660, 11675), 'numpy.stack', 'np.stack', (['edges'], {}), '(edges)\n', (11668, 11675), True, 'import numpy as np\n'), ((16035, 16071), 'tensorflow.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), '(kernel_initializer)\n', (16051, 16071), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((16104, 16138), 'tensorflow.keras.initializers.get', 'initializers.get', (['bias_initializer'], {}), '(bias_initializer)\n', (16120, 16138), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((16173, 16209), 'tensorflow.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (16189, 16209), False, 'from tensorflow.keras import regularizers\n'), ((16242, 16276), 'tensorflow.keras.regularizers.get', 'regularizers.get', (['bias_regularizer'], {}), '(bias_regularizer)\n', (16258, 16276), False, 'from tensorflow.keras import regularizers\n'), ((16310, 16344), 'tensorflow.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), '(kernel_constraint)\n', (16325, 16344), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((16376, 16408), 'tensorflow.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), '(bias_constraint)\n', (16391, 16408), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((17722, 17765), 'tensorflow.sparse.sparse_dense_matmul', 'tf.sparse.sparse_dense_matmul', (['adj', 'support'], {}), '(adj, support)\n', (17751, 17765), True, 'import tensorflow as tf\n'), ((5097, 5120), 'tensorflow.floor', 'tf.floor', (['random_tensor'], {}), '(random_tensor)\n', (5105, 5120), True, 'import tensorflow as tf\n'), ((6609, 6624), 'numpy.isinf', 'np.isinf', (['r_inv'], {}), '(r_inv)\n', (6617, 6624), True, 'import numpy as np\n'), ((7021, 7041), 'numpy.squeeze', 'np.squeeze', (['features'], {}), '(features)\n', (7031, 7041), True, 'import numpy as np\n'), ((7095, 7112), 'numpy.squeeze', 'np.squeeze', (['edges'], {}), '(edges)\n', (7105, 7112), True, 'import numpy as np\n'), ((7183, 7196), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (7191, 7196), True, 'import numpy as np\n'), ((8161, 8188), 'tensorflow.sparse.reorder', 'tf.sparse.reorder', (['features'], {}), '(features)\n', (8178, 8188), True, 'import tensorflow as tf\n'), ((9524, 9544), 'tensorflow.where', 'tf.where', (['train_mask'], {}), '(train_mask)\n', (9532, 9544), True, 'import tensorflow as tf\n'), ((9586, 9606), 'tensorflow.where', 'tf.where', (['train_mask'], {}), '(train_mask)\n', (9594, 9606), True, 'import tensorflow as tf\n'), ((9803, 9820), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9818, 9820), True, 'import tensorflow as tf\n'), ((10223, 10237), 'tensorflow.where', 'tf.where', (['mask'], {}), '(mask)\n', (10231, 10237), True, 'import tensorflow as tf\n'), ((10289, 10303), 'tensorflow.where', 'tf.where', (['mask'], {}), '(mask)\n', (10297, 10303), True, 'import tensorflow as tf\n'), ((10328, 10356), 'tensorflow.argmax', 'tf.argmax', (['masked_labels', '(-1)'], {}), '(masked_labels, -1)\n', (10337, 10356), True, 'import tensorflow as tf\n'), ((10358, 10389), 'tensorflow.argmax', 'tf.argmax', (['test_mask_logits', '(-1)'], {}), '(test_mask_logits, -1)\n', (10367, 10389), True, 'import tensorflow as tf\n'), ((10425, 10454), 'tensorflow.cast', 'tf.cast', (['ll'], {'dtype': 'tf.float32'}), '(ll, dtype=tf.float32)\n', (10432, 10454), True, 'import tensorflow as tf\n'), ((13361, 13384), 'pickle.dump', 'pickle.dump', (['results', 'f'], {}), '(results, f)\n', (13372, 13384), False, 'import pickle, copy\n'), ((17587, 17637), 'tensorflow.sparse.sparse_dense_matmul', 'tf.sparse.sparse_dense_matmul', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (17616, 17637), True, 'import tensorflow as tf\n'), ((17674, 17704), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (17683, 17704), True, 'import tensorflow as tf\n'), ((18103, 18150), 'tensorflow.keras.initializers.serialize', 'initializers.serialize', (['self.kernel_initializer'], {}), '(self.kernel_initializer)\n', (18125, 18150), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((18213, 18258), 'tensorflow.keras.initializers.serialize', 'initializers.serialize', (['self.bias_initializer'], {}), '(self.bias_initializer)\n', (18235, 18258), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((18323, 18370), 'tensorflow.keras.regularizers.serialize', 'regularizers.serialize', (['self.kernel_regularizer'], {}), '(self.kernel_regularizer)\n', (18345, 18370), False, 'from tensorflow.keras import regularizers\n'), ((18433, 18478), 'tensorflow.keras.regularizers.serialize', 'regularizers.serialize', (['self.bias_regularizer'], {}), '(self.bias_regularizer)\n', (18455, 18478), False, 'from tensorflow.keras import regularizers\n'), ((18542, 18587), 'tensorflow.keras.constraints.serialize', 'constraints.serialize', (['self.kernel_constraint'], {}), '(self.kernel_constraint)\n', (18563, 18587), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((18649, 18692), 'tensorflow.keras.constraints.serialize', 'constraints.serialize', (['self.bias_constraint'], {}), '(self.bias_constraint)\n', (18670, 18692), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((3764, 3778), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3775, 3778), False, 'import pickle, copy\n'), ((6564, 6584), 'numpy.power', 'np.power', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (6572, 6584), True, 'import numpy as np\n'), ((8356, 8391), 'tensorflow.cast', 'tf.cast', (['adjacency.data', 'tf.float32'], {}), '(adjacency.data, tf.float32)\n', (8363, 8391), True, 'import tensorflow as tf\n'), ((2898, 2930), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (2922, 2930), True, 'import tensorflow as tf\n'), ((6754, 6777), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['features'], {}), '(features)\n', (6767, 6777), True, 'import scipy.sparse as sp\n'), ((8050, 8084), 'tensorflow.cast', 'tf.cast', (['features.data', 'tf.float32'], {}), '(features.data, tf.float32)\n', (8057, 8084), True, 'import tensorflow as tf\n'), ((3060, 3092), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (3084, 3092), True, 'import tensorflow as tf\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from pandas_datareader import data as pdr
# Import our data
def get_stock(stocks, start, end):
stockdata = pdr.get_data_yahoo(stocks, start, end)
stockdata = stockdata['Close']
returns = stockdata.pct_change()
meanreturns = returns.mean()
covmatrix = returns.cov()
return meanreturns, covmatrix
stocklist = ['JPM', 'AAPL', 'MSFT', 'NFLX', 'INTC', 'AMD', 'NVDA']
stocks = [stock for stock in stocklist]
enddate = dt.datetime.now()
startdate = enddate - dt.timedelta(days=300)
meanreturns, covmatrix = get_stock(stocks, startdate, enddate)
# randomize weights
weights = np.random.random(len(meanreturns))
# Weights add up to 1
weights /= np.sum(weights)
# Monte Carlo Simulations
mc_sims = 400
# Time in days
T = 100
# Arrays to store and retrive information
meanM = np.full(shape=(T, len(weights)), fill_value=meanreturns)
meanM = meanM.T
portfolio_sims = np.full(shape=(T, mc_sims), fill_value=0.0)
# Set our portfolio value
initialPortfolio = 10000
for m in range(0, mc_sims):
# Monte Carlo loops
normaldist = np.random.normal(size=(T, len(weights)))
Ltriangle = np.linalg.cholesky(covmatrix)
# Get our daily returns
dailyreturns = meanM + np.inner(normaldist, Ltriangle)
portfolio_sims[:,m] = np.cumprod(np.inner(weights, dailyreturns.T)+1)*initialPortfolio
plt.plot(portfolio_sims)
plt.ylabel('Portfolio Value ($)')
plt.xlabel('Days')
plt.title('Monte Carlo Simulation of Portfolio')
plt.show() | [
"numpy.full",
"matplotlib.pyplot.title",
"pandas_datareader.data.get_data_yahoo",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"datetime.timedelta",
"numpy.inner",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"datetime.datetime.now",
"numpy.linalg.cholesky"
] | [((535, 552), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (550, 552), True, 'import datetime as dt\n'), ((761, 776), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (767, 776), True, 'import numpy as np\n'), ((985, 1028), 'numpy.full', 'np.full', ([], {'shape': '(T, mc_sims)', 'fill_value': '(0.0)'}), '(shape=(T, mc_sims), fill_value=0.0)\n', (992, 1028), True, 'import numpy as np\n'), ((1420, 1444), 'matplotlib.pyplot.plot', 'plt.plot', (['portfolio_sims'], {}), '(portfolio_sims)\n', (1428, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Portfolio Value ($)"""'], {}), "('Portfolio Value ($)')\n", (1455, 1478), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1497), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (1489, 1497), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1546), 'matplotlib.pyplot.title', 'plt.title', (['"""Monte Carlo Simulation of Portfolio"""'], {}), "('Monte Carlo Simulation of Portfolio')\n", (1507, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1557), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1555, 1557), True, 'import matplotlib.pyplot as plt\n'), ((209, 247), 'pandas_datareader.data.get_data_yahoo', 'pdr.get_data_yahoo', (['stocks', 'start', 'end'], {}), '(stocks, start, end)\n', (227, 247), True, 'from pandas_datareader import data as pdr\n'), ((575, 597), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(300)'}), '(days=300)\n', (587, 597), True, 'import datetime as dt\n'), ((1209, 1238), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['covmatrix'], {}), '(covmatrix)\n', (1227, 1238), True, 'import numpy as np\n'), ((1295, 1326), 'numpy.inner', 'np.inner', (['normaldist', 'Ltriangle'], {}), '(normaldist, Ltriangle)\n', (1303, 1326), True, 'import numpy as np\n'), ((1365, 1398), 'numpy.inner', 'np.inner', (['weights', 'dailyreturns.T'], {}), '(weights, dailyreturns.T)\n', (1373, 1398), True, 'import numpy as np\n')] |
import sys
import time
import math
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision as vsn
#from apex.fp16_utils import FP16_Optimizer
from models.nets import ResUNet
from utils.data_loaders import get_data_mt_loaders
from utils.data_vis import plot_from_torch
from utils.evaluations import FocalLoss2d, DiceLoss, get_iou_vector, ConsistencyLoss
import utils.lovasz_losses as L
parser = argparse.ArgumentParser(description='TGS Salt')
parser.add_argument('--imsize', default=128, type=int,
help='imsize to use for training')
parser.add_argument('--batch_size', default=32, type=int,
help='size of batches')
parser.add_argument('--num_folds', default=5, type=int,
help='number of cross val folds')
parser.add_argument('--epochs', default=1000, type=int,
help='number of epochs')
parser.add_argument('--start_fold', default=0, type=int,
help='first fold to try')
#parser.add_argument('--lr', default=0.01, type=float,
# help='learning rate')
parser.add_argument('--lr_max', default=0.01, type=float,
help='initial learning rate')
parser.add_argument('--lr_min', default=0.0003, type=float,
help='min lr for cosine annealing')
parser.add_argument('--num_cycles', default=7, type=int,
help='how many cyclic lr cycles')
parser.add_argument('--unlab_ratio', default=0.25, type=float,
help='ratio of unlabeled data in batch')
parser.add_argument('--wt_max', default=1, type=float,
help='max weight on consistency loss')
parser.add_argument('--rampup', default=10, type=int,
help='how long to ramp up the unsupervised weight')
parser.add_argument('--lr_rampup', default=1, type=int,
help='how long to ramp up the unsupervised weight')
parser.add_argument('--lr_rampdown', default=50, type=int,
help='how long to ramp up the unsupervised weight')
parser.add_argument('--lr_scale', default=0.1, type=float,
help='how much to reduce lr on plateau')
parser.add_argument('--l2', default=1e-5, type=float,
help='l2 regularization for model')
parser.add_argument('--lambda_dice', default=1.0, type=float,
help='lambda value for coordinate loss')
parser.add_argument('--es_patience', default=30, type=int,
help='early stopping patience')
parser.add_argument('--lr_patience', default=20, type=int,
help='early stopping patience')
parser.add_argument('--gpu', default=1, type=int,
help='which gpu for training')
parser.add_argument('--model_name', default='resunet_mt', type=str,
help='name of model for saving/loading weights')
parser.add_argument('--exp_name', default='tgs_slt', type=str,
help='name of experiment for saving files')
parser.add_argument('--debug', action='store_true',
help='whether to display debug info')
parser.add_argument('--load_best', action='store_true',
help='load the previous best net to continue training')
parser.add_argument('--freeze_bn', action='store_true',
help='freeze batch norm during finetuning')
parser.add_argument('--use_lovasz', action='store_true',
help='whether to use focal loss during finetuning')
parser.add_argument('--cos_anneal', action='store_true',
help='whether to use cosine annealing for learning rate')
args = parser.parse_args()
# define the loss functions
focal_loss = FocalLoss2d()
bce = nn.BCEWithLogitsLoss()
cl = ConsistencyLoss()
dice = DiceLoss()
# set up the dual gpu
#device = torch.device('cuda:1' if args.gpu else 'cpu')
#device_b = torch.device('cuda:0' if args.gpu else 'cpu')
def update_teacher(teacher, student, alpha=0.99):
for param_t, param_s in zip(teacher.parameters(), student.parameters()):
param_t.data *= alpha
param_t.data += param_s.data * (1. - alpha)
# training function
def train(student, teacher, optimizer, train_loader,
w_t=0., e=0, freeze_bn=False, use_lovasz=False):
'''
uses the data loader to grab a batch of images
pushes images through network and gathers predictions
updates network weights by evaluating the loss functions
'''
# set network to train mode
student.train(True, freeze_bn)
teacher.train(True, freeze_bn)
# keep track of our loss
iter_loss = 0.
iter_closs = 0.
# loop over the images for the desired amount
for i, data in enumerate(train_loader):
imgs_a = data['img_a'].cuda()
imgs_b = data['img_b'].cuda()
msks = data['msk'].cuda()
labeled_bool = data['is_labeled'].cuda()
has_msk = data['has_msk'].float().cuda()
mask = labeled_bool == 1
if args.debug and i == 0:
#print('{} labeled, {} total'.format(len(imgs_a[mask]), len(imgs_a)))
img_a_grid = vsn.utils.make_grid(imgs_a, normalize=True)
img_b_grid = vsn.utils.make_grid(imgs_b, normalize=True)
msk_grid = vsn.utils.make_grid(msks)
vsn.utils.save_image(img_a_grid, '../imgs/train_mt_imgs_a.png')
vsn.utils.save_image(img_b_grid, '../imgs/train_mt_imgs_b.png')
vsn.utils.save_image(msk_grid, '../imgs/train_msks.png')
# zero gradients from previous run
optimizer.zero_grad()
# get predictions
preds_a, bool_a = student(imgs_a)
# calculate bce loss
if use_lovasz:
loss_s = L.lovasz_hinge(preds_a[mask], msks[mask])
else:
loss_s = focal_loss(preds_a[mask], msks[mask])
loss_s += L.lovasz_hinge(preds_a[mask], msks[mask])
loss_s += bce(bool_a[mask], has_msk[mask].view(-1,1))
# get the teacher predictions
with torch.no_grad():
preds_b, bool_b = teacher(imgs_b)
loss_c = cl(preds_a, preds_b)
loss_c += cl(bool_a, bool_b)
loss = loss_s + w_t * loss_c
#calculate gradients
loss.backward()
# update weights
optimizer.step()
# get training stats
iter_loss += loss_s.item()
iter_closs += loss_c.item()
# update the teacher weights
grad_step = e * (len(train_loader.dataset) / args.batch_size) + (i+1)
alpha = min(1. - 1. / (grad_step + 1), 0.99)
update_teacher(teacher, student, alpha=alpha)
# make a cool terminal output
sys.stdout.write('\r')
sys.stdout.write('B: {:>3}/{:<3} | loss: {:.4} | step: {} alpha: {:.4}'.format(i+1,
len(train_loader),
loss.item(),
int(grad_step),
alpha))
epoch_loss = iter_loss / (len(train_loader.dataset) / args.batch_size)
epoch_closs = iter_closs / (len(train_loader.dataset) / args.batch_size)
print('\n' + 'Avg Train Loss: {:.4}, Avg Consist. Loss: {:.4}'.format(epoch_loss,
epoch_closs))
return epoch_loss
# validation function
def valid(net, optimizer, valid_loader, mtype='student', use_lovasz=False):
net.eval()
# keep track of losses
val_ious = []
val_iter_loss = 0.
# no gradients during validation
with torch.no_grad():
for i, data in enumerate(valid_loader):
valid_imgs = data['img'].cuda()
valid_msks = data['msk'].cuda()
valid_msk_bool = data['has_msk'].float().cuda()
# get predictions
msk_vpreds, bool_v = net(valid_imgs)
# calculate loss
if use_lovasz:
vloss = L.lovasz_hinge(msk_vpreds, valid_msks)
else:
vloss = focal_loss(msk_vpreds, valid_msks)
vloss += L.lovasz_hinge(msk_vpreds, valid_msks)
vloss += bce(bool_v, valid_msk_bool.view(-1,1))
# get validation stats
val_iter_loss += vloss.item()
val_ious.append(get_iou_vector(valid_msks.cpu().numpy()[:,:,13:114, 13:114],
msk_vpreds.sigmoid().cpu().numpy()[:,:,13:114, 13:114]))
epoch_vloss = val_iter_loss / (len(valid_loader.dataset) / args.batch_size)
print('{} Avg Eval Loss: {:.4}, Avg IOU: {:.4}'.format(mtype, epoch_vloss, np.mean(val_ious)))
return epoch_vloss, np.mean(val_ious)
def train_network(student, teacher, fold=0, model_ckpt=None):
# train the network, allow for keyboard interrupt
try:
# define optimizer
optimizer = optim.SGD(student.parameters(), lr=args.lr_max, momentum=0.9)
# get the loaders
train_loader, valid_loader = get_data_mt_loaders(imsize=args.imsize,
batch_size=args.batch_size,
num_folds=args.num_folds,
fold=fold,
unlabeled_ratio=args.unlab_ratio)
# start training with BN on
use_wt = True
use_lovasz = False
freeze_bn = False
train_losses = []
valid_losses_s = []
valid_ious_s = []
valid_losses_t = []
valid_ious_t = []
valid_patience = 0
best_val_metric = 1000.0
best_val_iou = 0.0
cycle = 0
t_ = 0
w_t = 0.
mt_counter = 0
print('Training ...')
for e in range(args.epochs):
print('\n' + 'Epoch {}/{}'.format(e, args.epochs))
# LR warm-up
#if e < args.lr_rampup:
# lr = args.lr * (min(t_+1, args.lr_rampup) / args.lr_rampup)
# if we get to the end of lr period, save swa weights
if t_ >= args.lr_rampdown:
# reset the counter
t_ = 0
cycle += 1
save_imgs = True
torch.save(net.state_dict(),
'../model_weights/{}_{}_cycle-{}_fold-{}.pth'.format(args.model_name,
args.exp_name,
cycle,
fold))
for params in optimizer.param_groups:
if args.cos_anneal:
params['lr'] = (args.lr_min + 0.5 * (args.lr_max - args.lr_min) *
(1 + np.cos(np.pi * t_ / args.lr_rampdown)))
#elif e < args.lr_rampup:
# params['lr'] = args.lr * (min(t_+1, args.lr_rampup) / args.lr_rampup)
print('Learning rate set to {:.4}'.format(optimizer.param_groups[0]['lr']))
start = time.time()
t_l = train(student, teacher, optimizer, train_loader, w_t, e, freeze_bn, use_lovasz)
v_l_s, viou_s = valid(student, optimizer, valid_loader, 'student', use_lovasz)
v_l_t, viou_t = valid(teacher, optimizer, valid_loader, 'teacher', use_lovasz)
# save the model on best validation loss
if viou_t > best_val_iou:
teacher.eval()
torch.save(teacher.state_dict(), model_ckpt)
best_val_metric = v_l_t
best_val_iou = viou_t
valid_patience = 0
# only start using the patience values when we get past the rampup period
else:
valid_patience += 1
# if the model stops improving by a certain num epoch, stop
if cycle == args.num_cycles:
break
# if the model doesn't improve for n epochs, reduce learning rate
if cycle >= 1:
if args.use_lovasz:
print('switching to lovasz')
use_lovasz = True
#dice_weight += 0.5
if not args.cos_anneal:
print('Reducing learning rate by {}'.format(args.lr_scale))
for params in optimizer.param_groups:
params['lr'] *= args.lr_scale
# if the model doesn't improve for n epochs, reduce learning rate
if valid_patience == args.lr_patience:
if args.freeze_bn:
freeze_bn = True
#batch_size = batch_size // 2 if batch_size // 2 >= 16 else 16
if args.use_lovasz:
use_lovasz = True
#use_wt = True
#w_t = args.wt_max
#print('Reducing learning rate by {}'.format(args.lr_scale))
#for params in optimizer.param_groups:
# params['lr'] *= args.lr_scale
# record losses
train_losses.append(t_l)
valid_losses_s.append(v_l_s)
valid_ious_s.append(viou_s)
valid_losses_t.append(v_l_t)
valid_ious_t.append(viou_t)
# update w_t
w_t = args.wt_max * math.exp(-5 * (1. - (min(t_, args.rampup) / args.rampup))**2)
t_ += 1
print('Setting w_t to {:.4}'.format(w_t))
print('Time: {}'.format(time.time()-start))
except KeyboardInterrupt:
pass
import pandas as pd
out_dict = {'train_losses': train_losses,
'valid_losses_s': valid_losses_s,
'valid_ious_s': valid_ious_s,
'valid_losses_t': valid_losses_t,
'valid_ious_t': valid_ious_t}
out_log = pd.DataFrame(out_dict)
out_log.to_csv('../logs/mt_fold-{}.csv'.format(fold), index=False)
return best_val_iou
def train_folds():
best_ious = []
for fold in range(args.start_fold, args.num_folds):
#if fold > 0:
# break
# set model filenames
model_params = [args.model_name, args.exp_name, fold]
MODEL_CKPT = '../model_weights/best_mt_{}_{}_fold-{}.pth'.format(*model_params)
if args.load_best:
net.load_state_dict(torch.load(MODEL_CKPT,
map_location=lambda storage,
loc: storage))
student = ResUNet(use_bool=True)
teacher = ResUNet(use_bool=True)
for param in teacher.parameters():
param.detach_()
if args.gpu == 99:
student = nn.DataParallel(student, device_ids=[0,1]).cuda()
teacher = nn.DataParallel(teacher, device_ids=[0,1]).cuda()
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
student.cuda()
teacher.cuda()
print('Starting fold {} ...'.format(fold))
best_ious.append(train_network(student, teacher, fold, model_ckpt=MODEL_CKPT))
print('Average IOU:', np.mean(best_ious))
if __name__ == '__main__':
train_folds()
| [
"sys.stdout.write",
"utils.evaluations.DiceLoss",
"argparse.ArgumentParser",
"numpy.mean",
"torch.no_grad",
"pandas.DataFrame",
"torch.load",
"torch.cuda.set_device",
"utils.evaluations.FocalLoss2d",
"utils.evaluations.ConsistencyLoss",
"torch.nn.BCEWithLogitsLoss",
"torchvision.utils.save_ima... | [((566, 613), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TGS Salt"""'}), "(description='TGS Salt')\n", (589, 613), False, 'import argparse\n'), ((3831, 3844), 'utils.evaluations.FocalLoss2d', 'FocalLoss2d', ([], {}), '()\n', (3842, 3844), False, 'from utils.evaluations import FocalLoss2d, DiceLoss, get_iou_vector, ConsistencyLoss\n'), ((3851, 3873), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (3871, 3873), True, 'import torch.nn as nn\n'), ((3879, 3896), 'utils.evaluations.ConsistencyLoss', 'ConsistencyLoss', ([], {}), '()\n', (3894, 3896), False, 'from utils.evaluations import FocalLoss2d, DiceLoss, get_iou_vector, ConsistencyLoss\n'), ((3904, 3914), 'utils.evaluations.DiceLoss', 'DiceLoss', ([], {}), '()\n', (3912, 3914), False, 'from utils.evaluations import FocalLoss2d, DiceLoss, get_iou_vector, ConsistencyLoss\n'), ((14076, 14098), 'pandas.DataFrame', 'pd.DataFrame', (['out_dict'], {}), '(out_dict)\n', (14088, 14098), True, 'import pandas as pd\n'), ((6775, 6797), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (6791, 6797), False, 'import sys\n'), ((7701, 7716), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7714, 7716), False, 'import torch\n'), ((8784, 8801), 'numpy.mean', 'np.mean', (['val_ious'], {}), '(val_ious)\n', (8791, 8801), True, 'import numpy as np\n'), ((9100, 9242), 'utils.data_loaders.get_data_mt_loaders', 'get_data_mt_loaders', ([], {'imsize': 'args.imsize', 'batch_size': 'args.batch_size', 'num_folds': 'args.num_folds', 'fold': 'fold', 'unlabeled_ratio': 'args.unlab_ratio'}), '(imsize=args.imsize, batch_size=args.batch_size,\n num_folds=args.num_folds, fold=fold, unlabeled_ratio=args.unlab_ratio)\n', (9119, 9242), False, 'from utils.data_loaders import get_data_mt_loaders\n'), ((14745, 14767), 'models.nets.ResUNet', 'ResUNet', ([], {'use_bool': '(True)'}), '(use_bool=True)\n', (14752, 14767), False, 'from models.nets import ResUNet\n'), ((14786, 14808), 'models.nets.ResUNet', 'ResUNet', ([], {'use_bool': '(True)'}), '(use_bool=True)\n', (14793, 14808), False, 'from models.nets import ResUNet\n'), ((15365, 15383), 'numpy.mean', 'np.mean', (['best_ious'], {}), '(best_ious)\n', (15372, 15383), True, 'import numpy as np\n'), ((5231, 5274), 'torchvision.utils.make_grid', 'vsn.utils.make_grid', (['imgs_a'], {'normalize': '(True)'}), '(imgs_a, normalize=True)\n', (5250, 5274), True, 'import torchvision as vsn\n'), ((5300, 5343), 'torchvision.utils.make_grid', 'vsn.utils.make_grid', (['imgs_b'], {'normalize': '(True)'}), '(imgs_b, normalize=True)\n', (5319, 5343), True, 'import torchvision as vsn\n'), ((5367, 5392), 'torchvision.utils.make_grid', 'vsn.utils.make_grid', (['msks'], {}), '(msks)\n', (5386, 5392), True, 'import torchvision as vsn\n'), ((5406, 5469), 'torchvision.utils.save_image', 'vsn.utils.save_image', (['img_a_grid', '"""../imgs/train_mt_imgs_a.png"""'], {}), "(img_a_grid, '../imgs/train_mt_imgs_a.png')\n", (5426, 5469), True, 'import torchvision as vsn\n'), ((5482, 5545), 'torchvision.utils.save_image', 'vsn.utils.save_image', (['img_b_grid', '"""../imgs/train_mt_imgs_b.png"""'], {}), "(img_b_grid, '../imgs/train_mt_imgs_b.png')\n", (5502, 5545), True, 'import torchvision as vsn\n'), ((5558, 5614), 'torchvision.utils.save_image', 'vsn.utils.save_image', (['msk_grid', '"""../imgs/train_msks.png"""'], {}), "(msk_grid, '../imgs/train_msks.png')\n", (5578, 5614), True, 'import torchvision as vsn\n'), ((5831, 5872), 'utils.lovasz_losses.lovasz_hinge', 'L.lovasz_hinge', (['preds_a[mask]', 'msks[mask]'], {}), '(preds_a[mask], msks[mask])\n', (5845, 5872), True, 'import utils.lovasz_losses as L\n'), ((5968, 6009), 'utils.lovasz_losses.lovasz_hinge', 'L.lovasz_hinge', (['preds_a[mask]', 'msks[mask]'], {}), '(preds_a[mask], msks[mask])\n', (5982, 6009), True, 'import utils.lovasz_losses as L\n'), ((6125, 6140), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6138, 6140), False, 'import torch\n'), ((8740, 8757), 'numpy.mean', 'np.mean', (['val_ious'], {}), '(val_ious)\n', (8747, 8757), True, 'import numpy as np\n'), ((15078, 15109), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (15099, 15109), False, 'import torch\n'), ((8073, 8111), 'utils.lovasz_losses.lovasz_hinge', 'L.lovasz_hinge', (['msk_vpreds', 'valid_msks'], {}), '(msk_vpreds, valid_msks)\n', (8087, 8111), True, 'import utils.lovasz_losses as L\n'), ((8214, 8252), 'utils.lovasz_losses.lovasz_hinge', 'L.lovasz_hinge', (['msk_vpreds', 'valid_msks'], {}), '(msk_vpreds, valid_msks)\n', (8228, 8252), True, 'import utils.lovasz_losses as L\n'), ((11301, 11312), 'time.time', 'time.time', ([], {}), '()\n', (11310, 11312), False, 'import time\n'), ((14573, 14638), 'torch.load', 'torch.load', (['MODEL_CKPT'], {'map_location': '(lambda storage, loc: storage)'}), '(MODEL_CKPT, map_location=lambda storage, loc: storage)\n', (14583, 14638), False, 'import torch\n'), ((14930, 14973), 'torch.nn.DataParallel', 'nn.DataParallel', (['student'], {'device_ids': '[0, 1]'}), '(student, device_ids=[0, 1])\n', (14945, 14973), True, 'import torch.nn as nn\n'), ((15002, 15045), 'torch.nn.DataParallel', 'nn.DataParallel', (['teacher'], {'device_ids': '[0, 1]'}), '(teacher, device_ids=[0, 1])\n', (15017, 15045), True, 'import torch.nn as nn\n'), ((13733, 13744), 'time.time', 'time.time', ([], {}), '()\n', (13742, 13744), False, 'import time\n'), ((11009, 11046), 'numpy.cos', 'np.cos', (['(np.pi * t_ / args.lr_rampdown)'], {}), '(np.pi * t_ / args.lr_rampdown)\n', (11015, 11046), True, 'import numpy as np\n')] |
import numpy as np
import xlsxwriter
import tableprint
from random import randint
import xlrd
import matplotlib.pyplot as plt
from num2words import num2words
import sys
import itertools
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
plt.rcParams.update({'font.size':7})
def mce(n, one_n):
Magnetization_val = []
Temperature_vals = []
External_Fields = []
M = Magnetization_val
T = Temperature_vals
H = External_Fields
print("\n i.e. Please add one extra magnetic field (Hmax + ∆H) in your excel sheet with null \n magnetization values (M) to get accurate output.\n\n \n\n")
datasample = [['H0', 'M (T0,H0)', 'M (T1,H0)', '...'],['H1', 'M (T0,H1)', 'M (T1,H1)', '...'],['H2', 'M (T0,H2)', 'M (T1,H2)', '...'],['...','...','...','...']]
tableprint.table(datasample, ['Magnetic Field (H)', 'Magnetization(M) at T0','Magnetization(M) at T1','...'])
yesorno = input("\n have you arranged your data in your excel sheet according to the format given above (YES/NO)? ")
if yesorno == 'YES' :
print ("\n")
else:
print ("\n please arrange your data according to the format given above. ")
exit()
samp_name = input("\n enter the sample nomenclature : ")
Path_one = input("\n enter the excel file directory of M(H) data(example : C:\File name.xlsx): ")
path_two = input(" enter the file directory (example : C:\File name.xlsx), where the -∆Sm(T) data will be stored : ")
path_three = input(" enter the file directory (example : C:\File name.xlsx), where the arrott plot data will be stored : ")
n = int(n)
one_n = int(one_n)
two_n = int(n * one_n)
print("\n\n now, enter", num2words(n), "temperature values\n")
for b in range(0, (n)):
Temperature_val = input(" enter the temperature value : ")
T.append(Temperature_val)
plot_legend = input("\n\n do you want to plot the figures with legend (YES/NO)? ")
book = xlrd.open_workbook(Path_one)
sheet = book.sheet_by_name('Sheet1')
data = [[sheet.cell_value(r, c) for c in range(n+1)] for r in range(sheet.nrows)]
for a in range(1, one_n+1, 1):
H.append((data[a])[0])
for b in range(0, n):
T.append(T[b])
for b in range(1, n+1):
M.append((data[a])[b])
three_entropy_change_con = []
temperatures = []
one_n_max = one_n - 2
Multiplier = (H[one_n_max]-H[0])/10
for q in range(0, n-1, 1):
one_entropy_change_con = 0
two_entropy_change_con = []
Label_one = []
for j,i in zip(range(0, (one_n-1), 1),range(q, two_n, n)):
entropy_change = abs((float(M[i+1]) - float(M[i]))/(float(T[i+1]) - float(T[i]))) * (float(H[j+1]) - float(H[j]))
one_entropy_change_con += entropy_change
one_entropy_change_con = float(one_entropy_change_con)
j_th_field = H[j]
remainder = j_th_field % Multiplier
if remainder == 0 :
two_entropy_change_con.append(one_entropy_change_con)
Label_one.append(H[j]*(10**(-4)))
temperatures.append(float(T[q]))
three_entropy_change_con.append(two_entropy_change_con)
six_entropy_change_con = []
six_entropy_change_con.append(temperatures)
five_entropy_change_con = []
for j in range(0, len(Label_one), 1):
four_entropy_change_con = []
for i in range(0, n-1, 1):
four_entropy_change_con.append((10**(-4))*(three_entropy_change_con[i])[j])
five_entropy_change_con.append(four_entropy_change_con)
six_entropy_change_con.append(four_entropy_change_con)
colour = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'tab:orange', 'tab:gray', 'tab:brown', 'tab:blue']
marker = itertools.cycle(['^', 'o', 'v', '*', '<', 'p', '>', 'h', 'P', 'H', 'X'])
workbook = xlsxwriter.Workbook(path_two)
worksheet = workbook.add_worksheet()
row = 0
for col, data in enumerate(six_entropy_change_con):
worksheet.write_column(row, col, data)
workbook.close()
one_n_pop = one_n - 1
one_M_plot_final = []
H_plot_final = H
H_plot_final.pop(one_n_pop)
for k in range(0, n, 1):
one_M_plot = []
for l in range(0, (one_n-1), 1):
index = (((l+1)*(n - k)) + l*k) - 1
one_M_plot.append(M[index])
one_M_plot_final.append(one_M_plot)
M_sqr = np.square(one_M_plot_final)
one_H_by_M_con = []
for j in range(0, n, 1):
two_H_by_M_con = []
for i in range(0, one_n-1, 1):
H_by_M_val = H_plot_final[i] / one_M_plot_final[j][i]
two_H_by_M_con.append(H_by_M_val)
one_H_by_M_con.append(two_H_by_M_con)
two_M_plot_final = []
for k in range(0, (one_n - 1), 1):
two_M_plot = []
for l in range(k*n, ((k+1)*n)-1, 1):
two_M_plot.append(M[l])
two_M_plot_final.append(two_M_plot)
if plot_legend == 'YES' :
for k in range (0, n, 1):
plt.plot(H_plot_final, one_M_plot_final[k], linestyle='solid', label = T[n-(k+1)], marker = next(marker), markersize =6, linewidth=2)
plt.legend(loc='upper left',frameon = False, ncol= 3)
plt.xlabel("Magnetic Field(H)", fontname = "Georgia")
plt.ylabel("Magnetization(M)", fontname = "Georgia")
plt.title("Magnetization vs Applied Field", fontname = "Georgia")
plt.show()
for k in range (0, (one_n - 1), (int(one_n/10))):
plt.plot(temperatures, two_M_plot_final[k], linestyle='solid', label = (round((H[k]*(10**(-4))),1)), marker = next(marker), markersize =6, linewidth=2)
plt.legend(loc='upper right',frameon = False, ncol= 2)
plt.xlabel("Temperature(T)", fontname = "Georgia")
plt.ylabel("Magnetization(M)", fontname = "Georgia")
plt.title("Magnetization vs Temperature", fontname = "Georgia")
plt.show()
for q in range(0, len(Label_one), 1):
plt.plot((temperatures), (five_entropy_change_con[q]), linestyle='solid', label= Label_one[q], color = colour[q], marker = next(marker), markersize =6, linewidth=2)
plt.legend(loc='upper right',frameon = False, ncol= 2)
plt.xlabel("Temperature(T)", fontname = "Georgia")
plt.ylabel("-∆Sm", fontname = "Georgia")
plt.title("-∆Sm vs Temperature", fontname = "Georgia")
plt.show()
for i in range (0, n, 1):
plt.plot(one_H_by_M_con[i], M_sqr[i], linestyle='solid', label = T[n-(i+1)], marker = next(marker), markersize =6, linewidth=2)
plt.legend(loc='upper right',frameon = False, ncol= 2)
plt.xlabel("H/M (Applied Field / Magnetization)", fontname = "Georgia")
plt.ylabel("M^2 (Magnetization Square)", fontname = "Georgia")
plt.title("M^2 vs H/M", fontname = "Georgia")
plt.show()
else:
for k in range (0, n, 1):
plt.plot(H_plot_final, one_M_plot_final[k], linestyle='solid', label = T[n-(k+1)], marker = next(marker), markersize =6, linewidth=2)
plt.xlabel("Magnetic Field(H)", fontname = "Georgia")
plt.ylabel("Magnetization(M)", fontname = "Georgia")
plt.title("Magnetization vs Applied Field", fontname = "Georgia")
plt.show()
for k in range (0, (one_n - 1), (int(one_n/10))):
plt.plot(temperatures, two_M_plot_final[k], linestyle='solid', label = (round((H[k]*(10**(-4))),1)), marker = next(marker), markersize =6, linewidth=2)
plt.xlabel("Temperature(T)", fontname = "Georgia")
plt.ylabel("Magnetization(M)", fontname = "Georgia")
plt.title("Magnetization vs Temperature", fontname = "Georgia")
plt.show()
for q in range(0, len(Label_one), 1):
plt.plot((temperatures), (five_entropy_change_con[q]), linestyle='solid', color = colour[q], marker = next(marker), markersize =6, linewidth=2)
plt.xlabel("Temperature(T)", fontname = "Georgia")
plt.ylabel("-∆Sm", fontname = "Georgia")
plt.title("-∆Sm vs Temperature", fontname = "Georgia")
plt.show()
for i in range (0, n, 1):
plt.plot(one_H_by_M_con[i], M_sqr[i], linestyle='solid', label = T[n-(i+1)], marker = next(marker), markersize =6, linewidth=2)
plt.xlabel("H/M (Applied Field / Magnetization)", fontname = "Georgia")
plt.ylabel("M^2 (Magnetization Square)", fontname = "Georgia")
plt.title("M^2 vs H/M", fontname = "Georgia")
plt.show()
M_pow_MFT = np.power(one_M_plot_final, 2)
H_by_M_pow_MFT = np.power(one_H_by_M_con, 1)
M_pow_TMFT = np.power(one_M_plot_final, 4)
H_by_M_pow_TMFT = np.power(one_H_by_M_con, 1)
M_pow_3DH = np.power(one_M_plot_final, (1/0.365))
H_by_M_pow_3DH = np.power(one_H_by_M_con, (1/1.336))
M_pow_3DI = np.power(one_M_plot_final, (1/0.325))
H_by_M_pow_3DI = np.power(one_H_by_M_con,(1/1.24))
plt.subplot(2,2,1)
for i in range (0, n, 1):
plt.plot(H_by_M_pow_MFT[i], M_pow_MFT[i], linestyle='solid', linewidth=2)
plt.xlabel("(H/M)^(1/γ)", fontname = "Georgia")
plt.ylabel("M^(1/β)", fontname = "Georgia")
plt.title("Arrott plot 01 (β:0.5; γ:1)" , fontname = "Georgia")
plt.subplot(2,2,2)
for i in range (0, n, 1):
plt.plot(H_by_M_pow_TMFT[i], M_pow_TMFT[i], linestyle='solid', linewidth=2)
plt.xlabel("(H/M)^(1/γ)", fontname = "Georgia")
plt.ylabel("M^(1/β)", fontname = "Georgia")
plt.title("Arrott plot 02 (β:0.25; γ:1)" , fontname = "Georgia")
plt.subplot(2,2,3)
for i in range (0, n, 1):
plt.plot(H_by_M_pow_3DH[i], M_pow_3DH[i], linestyle='solid', linewidth=2)
plt.xlabel("(H/M)^(1/γ)", fontname = "Georgia")
plt.ylabel("M^(1/β)", fontname = "Georgia")
plt.title("Arrott plot 03 (β:0.365; γ:1.336)" , fontname = "Georgia")
plt.subplot(2,2,4)
for i in range (0, n, 1):
plt.plot(H_by_M_pow_3DI[i], M_pow_3DI[i], linestyle='solid', linewidth=2)
plt.xlabel("(H/M)^(1/γ)", fontname = "Georgia")
plt.ylabel("M^(1/β)", fontname = "Georgia")
plt.title("Arrott plot 04 (β:0.325; γ:1.24)" , fontname = "Georgia")
plt.tight_layout()
plt.show()
for i in range (0,2*n,1):
lo = 2*i+1
T.insert(lo, ' ')
M_sqr_vs_H_by_M = one_H_by_M_con
M_sqr_tolist = M_sqr.tolist()
for i in range (0,n,1):
x_index = 2*i + 1
M_sqr_vs_H_by_M.insert(x_index, M_sqr_tolist[i])
for i in range (0,2*n,1):
M_sqr_vs_H_by_M[i].insert(0, T[i])
M_sqr_vs_H_by_M[i].insert(1, ' ')
if i%2 == 0 :
M_sqr_vs_H_by_M[i].insert(2, 'H/M')
else:
M_sqr_vs_H_by_M[i].insert(2, 'M^2')
workbook = xlsxwriter.Workbook(path_three)
worksheet = workbook.add_worksheet()
row = 0
for col, data in enumerate(M_sqr_vs_H_by_M):
worksheet.write_column(row, col, data)
workbook.close()
T_FWHM_con = []
RCP_con = []
for j in range(1, (len(Label_one) + 1), 1):
del_S_peak = np.max(six_entropy_change_con[j])
for k in range(0, n-1, 1):
if (six_entropy_change_con[j][k] == del_S_peak):
kth_index = k
max_entropy_at_T = six_entropy_change_con[0][kth_index]
half_max = float(del_S_peak/2)
half_max_entropy_at_T_con = []
half_max_entropy_at_T_con.append(float(0.0))
for i in range(0, n-2, 1):
i_th = six_entropy_change_con[j][i]
i_th_plus_one = six_entropy_change_con[j][i+1]
if ((i_th_plus_one >= half_max >= i_th) or (i_th_plus_one <= half_max <= i_th)):
T_i_th = six_entropy_change_con[0][i]
T_i_th_plus_one = six_entropy_change_con[0][i+1]
del_S_dif = abs(float(i_th_plus_one - i_th))
T_dif = float(T_i_th_plus_one - T_i_th)
dif_bet_ith_half = abs(half_max - i_th)
half_max_entropy_at_T = (float((T_dif/del_S_dif) * dif_bet_ith_half)) + T_i_th
half_max_entropy_at_T_con.append(abs(half_max_entropy_at_T))
half_max_entropy_at_T_con.append(float(six_entropy_change_con[0][n-2]))
for l in range(0, (len(half_max_entropy_at_T_con))-1, 1):
l_th = half_max_entropy_at_T_con[l]
l_th_plus_one = half_max_entropy_at_T_con[l+1]
if ((l_th <= max_entropy_at_T) and (l_th_plus_one >= max_entropy_at_T)):
T_left = l_th
T_right = l_th_plus_one
T_FWHM = T_right - T_left
RCP = T_FWHM * del_S_peak
T_FWHM_con.append(float(round(T_FWHM,4)))
RCP_con.append(float(round(RCP,4)))
samp_name_plus_RCP = "RCP (" + samp_name + ") :: max val : " + str(np.max(RCP_con))
samp_name_plus_T_FWHM = "T_FWHM (" + samp_name + ") :: max width : " + str(np.max(T_FWHM_con))
fig,ax1 = plt.subplots()
ax1.set_xlabel("Magnetic Field(H)", fontname = "Georgia")
ax1.set_ylabel("RCP", fontname = "Georgia")
ax1.plot(Label_one, RCP_con, linestyle='solid', marker = 'h', label = samp_name_plus_RCP, color = 'b', markersize =6, linewidth=2)
ax1.legend(loc='upper left',frameon = False, ncol= 2)
ax1.tick_params(axis='y')
ax2 = ax1.twinx()
ax2.set_ylabel("T_FWHM", fontname = "Georgia")
ax2.plot(Label_one, T_FWHM_con, linestyle='solid', marker = 'H', label = samp_name_plus_T_FWHM, color = 'r', markersize =6, linewidth=2)
ax2.legend(loc='lower right',frameon = False, ncol= 2)
ax2.tick_params(axis='y')
plt.title("RCP/T_FWHM vs H", fontname = "Georgia")
plt.show()
return ("\n check the excel spreadsheets, data has been successfully saved.")
| [
"matplotlib.pyplot.title",
"tableprint.table",
"itertools.cycle",
"matplotlib.pyplot.tight_layout",
"warnings.simplefilter",
"numpy.power",
"numpy.max",
"matplotlib.pyplot.rcParams.update",
"num2words.num2words",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend... | [((282, 319), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 7}"], {}), "({'font.size': 7})\n", (301, 319), True, 'import matplotlib.pyplot as plt\n'), ((247, 278), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (268, 278), False, 'import warnings\n'), ((846, 961), 'tableprint.table', 'tableprint.table', (['datasample', "['Magnetic Field (H)', 'Magnetization(M) at T0', 'Magnetization(M) at T1',\n '...']"], {}), "(datasample, ['Magnetic Field (H)',\n 'Magnetization(M) at T0', 'Magnetization(M) at T1', '...'])\n", (862, 961), False, 'import tableprint\n'), ((2084, 2112), 'xlrd.open_workbook', 'xlrd.open_workbook', (['Path_one'], {}), '(Path_one)\n', (2102, 2112), False, 'import xlrd\n'), ((4059, 4131), 'itertools.cycle', 'itertools.cycle', (["['^', 'o', 'v', '*', '<', 'p', '>', 'h', 'P', 'H', 'X']"], {}), "(['^', 'o', 'v', '*', '<', 'p', '>', 'h', 'P', 'H', 'X'])\n", (4074, 4131), False, 'import itertools\n'), ((4154, 4183), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['path_two'], {}), '(path_two)\n', (4173, 4183), False, 'import xlsxwriter\n'), ((4737, 4764), 'numpy.square', 'np.square', (['one_M_plot_final'], {}), '(one_M_plot_final)\n', (4746, 4764), True, 'import numpy as np\n'), ((9288, 9317), 'numpy.power', 'np.power', (['one_M_plot_final', '(2)'], {}), '(one_M_plot_final, 2)\n', (9296, 9317), True, 'import numpy as np\n'), ((9340, 9367), 'numpy.power', 'np.power', (['one_H_by_M_con', '(1)'], {}), '(one_H_by_M_con, 1)\n', (9348, 9367), True, 'import numpy as np\n'), ((9388, 9417), 'numpy.power', 'np.power', (['one_M_plot_final', '(4)'], {}), '(one_M_plot_final, 4)\n', (9396, 9417), True, 'import numpy as np\n'), ((9441, 9468), 'numpy.power', 'np.power', (['one_H_by_M_con', '(1)'], {}), '(one_H_by_M_con, 1)\n', (9449, 9468), True, 'import numpy as np\n'), ((9488, 9525), 'numpy.power', 'np.power', (['one_M_plot_final', '(1 / 0.365)'], {}), '(one_M_plot_final, 1 / 0.365)\n', (9496, 9525), True, 'import numpy as np\n'), ((9548, 9583), 'numpy.power', 'np.power', (['one_H_by_M_con', '(1 / 1.336)'], {}), '(one_H_by_M_con, 1 / 1.336)\n', (9556, 9583), True, 'import numpy as np\n'), ((9603, 9640), 'numpy.power', 'np.power', (['one_M_plot_final', '(1 / 0.325)'], {}), '(one_M_plot_final, 1 / 0.325)\n', (9611, 9640), True, 'import numpy as np\n'), ((9663, 9697), 'numpy.power', 'np.power', (['one_H_by_M_con', '(1 / 1.24)'], {}), '(one_H_by_M_con, 1 / 1.24)\n', (9671, 9697), True, 'import numpy as np\n'), ((9706, 9726), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (9717, 9726), True, 'import matplotlib.pyplot as plt\n'), ((9849, 9894), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""(H/M)^(1/γ)"""'], {'fontname': '"""Georgia"""'}), "('(H/M)^(1/γ)', fontname='Georgia')\n", (9859, 9894), True, 'import matplotlib.pyplot as plt\n'), ((9902, 9943), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M^(1/β)"""'], {'fontname': '"""Georgia"""'}), "('M^(1/β)', fontname='Georgia')\n", (9912, 9943), True, 'import matplotlib.pyplot as plt\n'), ((9951, 10011), 'matplotlib.pyplot.title', 'plt.title', (['"""Arrott plot 01 (β:0.5; γ:1)"""'], {'fontname': '"""Georgia"""'}), "('Arrott plot 01 (β:0.5; γ:1)', fontname='Georgia')\n", (9960, 10011), True, 'import matplotlib.pyplot as plt\n'), ((10028, 10048), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (10039, 10048), True, 'import matplotlib.pyplot as plt\n'), ((10173, 10218), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""(H/M)^(1/γ)"""'], {'fontname': '"""Georgia"""'}), "('(H/M)^(1/γ)', fontname='Georgia')\n", (10183, 10218), True, 'import matplotlib.pyplot as plt\n'), ((10226, 10267), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M^(1/β)"""'], {'fontname': '"""Georgia"""'}), "('M^(1/β)', fontname='Georgia')\n", (10236, 10267), True, 'import matplotlib.pyplot as plt\n'), ((10275, 10336), 'matplotlib.pyplot.title', 'plt.title', (['"""Arrott plot 02 (β:0.25; γ:1)"""'], {'fontname': '"""Georgia"""'}), "('Arrott plot 02 (β:0.25; γ:1)', fontname='Georgia')\n", (10284, 10336), True, 'import matplotlib.pyplot as plt\n'), ((10353, 10373), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (10364, 10373), True, 'import matplotlib.pyplot as plt\n'), ((10496, 10541), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""(H/M)^(1/γ)"""'], {'fontname': '"""Georgia"""'}), "('(H/M)^(1/γ)', fontname='Georgia')\n", (10506, 10541), True, 'import matplotlib.pyplot as plt\n'), ((10549, 10590), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M^(1/β)"""'], {'fontname': '"""Georgia"""'}), "('M^(1/β)', fontname='Georgia')\n", (10559, 10590), True, 'import matplotlib.pyplot as plt\n'), ((10598, 10664), 'matplotlib.pyplot.title', 'plt.title', (['"""Arrott plot 03 (β:0.365; γ:1.336)"""'], {'fontname': '"""Georgia"""'}), "('Arrott plot 03 (β:0.365; γ:1.336)', fontname='Georgia')\n", (10607, 10664), True, 'import matplotlib.pyplot as plt\n'), ((10681, 10701), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (10692, 10701), True, 'import matplotlib.pyplot as plt\n'), ((10824, 10869), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""(H/M)^(1/γ)"""'], {'fontname': '"""Georgia"""'}), "('(H/M)^(1/γ)', fontname='Georgia')\n", (10834, 10869), True, 'import matplotlib.pyplot as plt\n'), ((10877, 10918), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M^(1/β)"""'], {'fontname': '"""Georgia"""'}), "('M^(1/β)', fontname='Georgia')\n", (10887, 10918), True, 'import matplotlib.pyplot as plt\n'), ((10926, 10991), 'matplotlib.pyplot.title', 'plt.title', (['"""Arrott plot 04 (β:0.325; γ:1.24)"""'], {'fontname': '"""Georgia"""'}), "('Arrott plot 04 (β:0.325; γ:1.24)', fontname='Georgia')\n", (10935, 10991), True, 'import matplotlib.pyplot as plt\n'), ((11008, 11026), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11024, 11026), True, 'import matplotlib.pyplot as plt\n'), ((11032, 11042), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11040, 11042), True, 'import matplotlib.pyplot as plt\n'), ((11613, 11644), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['path_three'], {}), '(path_three)\n', (11632, 11644), False, 'import xlsxwriter\n'), ((13830, 13844), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13842, 13844), True, 'import matplotlib.pyplot as plt\n'), ((14500, 14548), 'matplotlib.pyplot.title', 'plt.title', (['"""RCP/T_FWHM vs H"""'], {'fontname': '"""Georgia"""'}), "('RCP/T_FWHM vs H', fontname='Georgia')\n", (14509, 14548), True, 'import matplotlib.pyplot as plt\n'), ((14563, 14573), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14571, 14573), True, 'import matplotlib.pyplot as plt\n'), ((1798, 1810), 'num2words.num2words', 'num2words', (['n'], {}), '(n)\n', (1807, 1810), False, 'from num2words import num2words\n'), ((5559, 5610), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'frameon': '(False)', 'ncol': '(3)'}), "(loc='upper left', frameon=False, ncol=3)\n", (5569, 5610), True, 'import matplotlib.pyplot as plt\n'), ((5628, 5679), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Magnetic Field(H)"""'], {'fontname': '"""Georgia"""'}), "('Magnetic Field(H)', fontname='Georgia')\n", (5638, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5692, 5742), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnetization(M)"""'], {'fontname': '"""Georgia"""'}), "('Magnetization(M)', fontname='Georgia')\n", (5702, 5742), True, 'import matplotlib.pyplot as plt\n'), ((5755, 5818), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetization vs Applied Field"""'], {'fontname': '"""Georgia"""'}), "('Magnetization vs Applied Field', fontname='Georgia')\n", (5764, 5818), True, 'import matplotlib.pyplot as plt\n'), ((5842, 5852), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5850, 5852), True, 'import matplotlib.pyplot as plt\n'), ((6094, 6146), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'frameon': '(False)', 'ncol': '(2)'}), "(loc='upper right', frameon=False, ncol=2)\n", (6104, 6146), True, 'import matplotlib.pyplot as plt\n'), ((6164, 6212), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature(T)"""'], {'fontname': '"""Georgia"""'}), "('Temperature(T)', fontname='Georgia')\n", (6174, 6212), True, 'import matplotlib.pyplot as plt\n'), ((6225, 6275), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnetization(M)"""'], {'fontname': '"""Georgia"""'}), "('Magnetization(M)', fontname='Georgia')\n", (6235, 6275), True, 'import matplotlib.pyplot as plt\n'), ((6288, 6349), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetization vs Temperature"""'], {'fontname': '"""Georgia"""'}), "('Magnetization vs Temperature', fontname='Georgia')\n", (6297, 6349), True, 'import matplotlib.pyplot as plt\n'), ((6373, 6383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6381, 6383), True, 'import matplotlib.pyplot as plt\n'), ((6715, 6763), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature(T)"""'], {'fontname': '"""Georgia"""'}), "('Temperature(T)', fontname='Georgia')\n", (6725, 6763), True, 'import matplotlib.pyplot as plt\n'), ((6776, 6814), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""-∆Sm"""'], {'fontname': '"""Georgia"""'}), "('-∆Sm', fontname='Georgia')\n", (6786, 6814), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6879), 'matplotlib.pyplot.title', 'plt.title', (['"""-∆Sm vs Temperature"""'], {'fontname': '"""Georgia"""'}), "('-∆Sm vs Temperature', fontname='Georgia')\n", (6836, 6879), True, 'import matplotlib.pyplot as plt\n'), ((6903, 6913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6911, 6913), True, 'import matplotlib.pyplot as plt\n'), ((7119, 7171), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'frameon': '(False)', 'ncol': '(2)'}), "(loc='upper right', frameon=False, ncol=2)\n", (7129, 7171), True, 'import matplotlib.pyplot as plt\n'), ((7189, 7258), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""H/M (Applied Field / Magnetization)"""'], {'fontname': '"""Georgia"""'}), "('H/M (Applied Field / Magnetization)', fontname='Georgia')\n", (7199, 7258), True, 'import matplotlib.pyplot as plt\n'), ((7271, 7331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M^2 (Magnetization Square)"""'], {'fontname': '"""Georgia"""'}), "('M^2 (Magnetization Square)', fontname='Georgia')\n", (7281, 7331), True, 'import matplotlib.pyplot as plt\n'), ((7344, 7387), 'matplotlib.pyplot.title', 'plt.title', (['"""M^2 vs H/M"""'], {'fontname': '"""Georgia"""'}), "('M^2 vs H/M', fontname='Georgia')\n", (7353, 7387), True, 'import matplotlib.pyplot as plt\n'), ((7411, 7421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7419, 7421), True, 'import matplotlib.pyplot as plt\n'), ((7645, 7696), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Magnetic Field(H)"""'], {'fontname': '"""Georgia"""'}), "('Magnetic Field(H)', fontname='Georgia')\n", (7655, 7696), True, 'import matplotlib.pyplot as plt\n'), ((7709, 7759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnetization(M)"""'], {'fontname': '"""Georgia"""'}), "('Magnetization(M)', fontname='Georgia')\n", (7719, 7759), True, 'import matplotlib.pyplot as plt\n'), ((7772, 7835), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetization vs Applied Field"""'], {'fontname': '"""Georgia"""'}), "('Magnetization vs Applied Field', fontname='Georgia')\n", (7781, 7835), True, 'import matplotlib.pyplot as plt\n'), ((7859, 7869), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7867, 7869), True, 'import matplotlib.pyplot as plt\n'), ((8127, 8175), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature(T)"""'], {'fontname': '"""Georgia"""'}), "('Temperature(T)', fontname='Georgia')\n", (8137, 8175), True, 'import matplotlib.pyplot as plt\n'), ((8188, 8238), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnetization(M)"""'], {'fontname': '"""Georgia"""'}), "('Magnetization(M)', fontname='Georgia')\n", (8198, 8238), True, 'import matplotlib.pyplot as plt\n'), ((8251, 8312), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetization vs Temperature"""'], {'fontname': '"""Georgia"""'}), "('Magnetization vs Temperature', fontname='Georgia')\n", (8260, 8312), True, 'import matplotlib.pyplot as plt\n'), ((8336, 8346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8344, 8346), True, 'import matplotlib.pyplot as plt\n'), ((8602, 8650), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature(T)"""'], {'fontname': '"""Georgia"""'}), "('Temperature(T)', fontname='Georgia')\n", (8612, 8650), True, 'import matplotlib.pyplot as plt\n'), ((8663, 8701), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""-∆Sm"""'], {'fontname': '"""Georgia"""'}), "('-∆Sm', fontname='Georgia')\n", (8673, 8701), True, 'import matplotlib.pyplot as plt\n'), ((8714, 8766), 'matplotlib.pyplot.title', 'plt.title', (['"""-∆Sm vs Temperature"""'], {'fontname': '"""Georgia"""'}), "('-∆Sm vs Temperature', fontname='Georgia')\n", (8723, 8766), True, 'import matplotlib.pyplot as plt\n'), ((8790, 8800), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8798, 8800), True, 'import matplotlib.pyplot as plt\n'), ((9020, 9089), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""H/M (Applied Field / Magnetization)"""'], {'fontname': '"""Georgia"""'}), "('H/M (Applied Field / Magnetization)', fontname='Georgia')\n", (9030, 9089), True, 'import matplotlib.pyplot as plt\n'), ((9102, 9162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M^2 (Magnetization Square)"""'], {'fontname': '"""Georgia"""'}), "('M^2 (Magnetization Square)', fontname='Georgia')\n", (9112, 9162), True, 'import matplotlib.pyplot as plt\n'), ((9175, 9218), 'matplotlib.pyplot.title', 'plt.title', (['"""M^2 vs H/M"""'], {'fontname': '"""Georgia"""'}), "('M^2 vs H/M', fontname='Georgia')\n", (9184, 9218), True, 'import matplotlib.pyplot as plt\n'), ((9242, 9252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9250, 9252), True, 'import matplotlib.pyplot as plt\n'), ((9765, 9838), 'matplotlib.pyplot.plot', 'plt.plot', (['H_by_M_pow_MFT[i]', 'M_pow_MFT[i]'], {'linestyle': '"""solid"""', 'linewidth': '(2)'}), "(H_by_M_pow_MFT[i], M_pow_MFT[i], linestyle='solid', linewidth=2)\n", (9773, 9838), True, 'import matplotlib.pyplot as plt\n'), ((10087, 10162), 'matplotlib.pyplot.plot', 'plt.plot', (['H_by_M_pow_TMFT[i]', 'M_pow_TMFT[i]'], {'linestyle': '"""solid"""', 'linewidth': '(2)'}), "(H_by_M_pow_TMFT[i], M_pow_TMFT[i], linestyle='solid', linewidth=2)\n", (10095, 10162), True, 'import matplotlib.pyplot as plt\n'), ((10412, 10485), 'matplotlib.pyplot.plot', 'plt.plot', (['H_by_M_pow_3DH[i]', 'M_pow_3DH[i]'], {'linestyle': '"""solid"""', 'linewidth': '(2)'}), "(H_by_M_pow_3DH[i], M_pow_3DH[i], linestyle='solid', linewidth=2)\n", (10420, 10485), True, 'import matplotlib.pyplot as plt\n'), ((10740, 10813), 'matplotlib.pyplot.plot', 'plt.plot', (['H_by_M_pow_3DI[i]', 'M_pow_3DI[i]'], {'linestyle': '"""solid"""', 'linewidth': '(2)'}), "(H_by_M_pow_3DI[i], M_pow_3DI[i], linestyle='solid', linewidth=2)\n", (10748, 10813), True, 'import matplotlib.pyplot as plt\n'), ((11933, 11966), 'numpy.max', 'np.max', (['six_entropy_change_con[j]'], {}), '(six_entropy_change_con[j])\n', (11939, 11966), True, 'import numpy as np\n'), ((6647, 6699), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'frameon': '(False)', 'ncol': '(2)'}), "(loc='upper right', frameon=False, ncol=2)\n", (6657, 6699), True, 'import matplotlib.pyplot as plt\n'), ((13696, 13711), 'numpy.max', 'np.max', (['RCP_con'], {}), '(RCP_con)\n', (13702, 13711), True, 'import numpy as np\n'), ((13793, 13811), 'numpy.max', 'np.max', (['T_FWHM_con'], {}), '(T_FWHM_con)\n', (13799, 13811), True, 'import numpy as np\n')] |
import numpy as np #Numpy maths
def XYZ_equatorial(X, Y, Z, X_error=None, Y_error=None, Z_error=None):
"""
Transforms Galactic position XYZ to equatorial coordinates (ra,dec) and distance. All inputs must be numpy arrays of the same dimension.
param X: Galactic position X toward Galactic center (parsec)
param Y: Galactic position Y in the driection of Galactic motion (parsec)
param Z: Galactic position Z outside and perpendicular to Galacic plane (toward Galactic North pole; parsec)
output (ra,dec,dist): Tuple containing equatorial position and distance (right ascension in degrees; declination in degrees; distance in parsec)
output (ra,dec,dist,edist): Tuple containing equatorial position, distance, and measurement error on distance (right ascension in degrees; declination in degrees; distance in parsec, error in parsec), used if any measurement errors are given as input.
"""
#Verify keywords
num_stars = np.size(X)
if np.size(Y) != num_stars or np.size(Z) != num_stars:
raise ValueError('X, Y and Z must all be numpy arrays of the same size !')
if (X_error is not None and np.size(X_error) != num_stars) or (Y_error is not None and np.size(Y_error) != num_stars) or (Z_error is not None and np.size(Z_error) != num_stars):
raise ValueError('X_error, Y_error and Z_error must be numpy arrays of the same size as X, Y and Z !')
#Compute distance
dist = np.SQRT(X**2 + Y**2 + Z**2)
#Compute Galactic coordinates
gl = np.degrees(np.arctan2(Y, X))
XY_dist = np.sqrt(X**2 + Y**2)
gb = 90.0 - np.degrees(np.arctan2(XY_dist, Z))
#Transform Galactic coordinates to equatorial cooridinates
(ra, dec) = galactic_equatorial(gl, gb)
#Propagate measurement errors on distance
if X_error is not None:
edist = np.SQRT((X*X_error)**2 + (Y*Y_error)**2 + (Z*Z_error)**2)/dist
return (ra, dec, dist, edist)
else:
return (ra, dec, dist) | [
"numpy.size",
"numpy.arctan2",
"numpy.SQRT",
"numpy.sqrt"
] | [((935, 945), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (942, 945), True, 'import numpy as np\n'), ((1392, 1425), 'numpy.SQRT', 'np.SQRT', (['(X ** 2 + Y ** 2 + Z ** 2)'], {}), '(X ** 2 + Y ** 2 + Z ** 2)\n', (1399, 1425), True, 'import numpy as np\n'), ((1499, 1523), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (1506, 1523), True, 'import numpy as np\n'), ((1470, 1486), 'numpy.arctan2', 'np.arctan2', (['Y', 'X'], {}), '(Y, X)\n', (1480, 1486), True, 'import numpy as np\n'), ((950, 960), 'numpy.size', 'np.size', (['Y'], {}), '(Y)\n', (957, 960), True, 'import numpy as np\n'), ((977, 987), 'numpy.size', 'np.size', (['Z'], {}), '(Z)\n', (984, 987), True, 'import numpy as np\n'), ((1544, 1566), 'numpy.arctan2', 'np.arctan2', (['XY_dist', 'Z'], {}), '(XY_dist, Z)\n', (1554, 1566), True, 'import numpy as np\n'), ((1751, 1820), 'numpy.SQRT', 'np.SQRT', (['((X * X_error) ** 2 + (Y * Y_error) ** 2 + (Z * Z_error) ** 2)'], {}), '((X * X_error) ** 2 + (Y * Y_error) ** 2 + (Z * Z_error) ** 2)\n', (1758, 1820), True, 'import numpy as np\n'), ((1108, 1124), 'numpy.size', 'np.size', (['X_error'], {}), '(X_error)\n', (1115, 1124), True, 'import numpy as np\n'), ((1167, 1183), 'numpy.size', 'np.size', (['Y_error'], {}), '(Y_error)\n', (1174, 1183), True, 'import numpy as np\n'), ((1226, 1242), 'numpy.size', 'np.size', (['Z_error'], {}), '(Z_error)\n', (1233, 1242), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, '../../')
import numpy as np
import pandas as pd
import mut.thermo
import mut.bayes
import mut.stats
import joblib
import multiprocessing as mp
cpus = mp.cpu_count() - 2
import tqdm
constants = mut.thermo.load_constants()
# Load the prior predictive check data.
prior_data = pd.read_csv('../../data/Chure2019_IND_prior_predictive_checks.csv')
# Load the stan model.
KaKi_model = mut.bayes.StanModel('../stan/Chure2019_KaKi_only.stan')
KaKi_epAI_model = mut.bayes.StanModel('../stan/Chure2019_KaKi_epAI.stan')
_model = {'KaKi_only': KaKi_model, 'KaKi_epAI':KaKi_epAI_model}
# Set up a dataframe to store the properties.
samples_dfs = []
sbc_dfs = []
# Define the thinning constant for computing the rank statistic.
thin = 5
# Set up a single round of the SBC as a function for easy parallelization
def sbc(g, d):
# Generate the data dictionary.
data_dict = {'J':1,
'N': len(d),
'idx': np.ones(len(d)).astype(int),
'ep_RA': -13.9,
'R': np.ones(len(d)) * constants['RBS1027'],
'Nns': 4.6E6,
'n_sites': constants['n_sites'],
'c': d['IPTGuM'],
'fc': d['fc_draw']}
# Define the columns for renaming
columns={'Ka[1]': 'Ka', 'sigma[1]':'sigma', 'Ki[1]':'Ki',
'ep_a[1]':'ep_a', 'ep_i[1]': 'ep_i'}
# Determine the ground truth for each parameter.
gt = {'Ka': d['ka'].unique(),
'Ki': d['ki'].unique(),
'ep_a':d['ep_a'].unique(),
'ep_i':d['ep_i'].unique(),
'sigma': d['sigma'].unique()}
if g[0] == 'KaKi_only':
data_dict['ep_AI'] = constants['ep_AI']
pars = ['Ka', 'Ki', 'ep_a', 'ep_i', 'sigma']
else:
gt['ep_AI'] = d['ep_ai'].unique()
pars = ['Ka', 'Ki', 'ep_AI', 'ep_a', 'ep_i', 'sigma']
columns['ep_AI[1]'] = 'ep_AI'
# Sample the model
model = _model[g[0]]
_, samples = model.sample(data_dict=data_dict, iter=2000, n_jobs=1, chains=4)
samples.rename(columns=columns, inplace=True)
samples['sim_idx'] = g[1]
samples['model'] = g[0]
samples_dfs.append(samples)
# Compute the properties for each parameter.
_sbc_dfs = []
for p in pars:
print(p, samples[p].head())
_df = pd.DataFrame([])
z_score = (np.mean(samples[p]) - gt[p]) / np.std(samples[p])
shrinkage = 1 - (np.var(samples[p]) / np.var(prior_data[p.lower()].unique()))
_df['z_score'] = z_score
_df['shrinkage'] = shrinkage
_df['param'] = p
_df['rank'] = np.sum(samples[p].values[::thin] < gt[p])
_df['rank_ndraws'] = len(samples[p].values[::thin])
_df['post_median'] = np.mean(samples[p])
_df['post_mean'] = np.median(samples[p])
_df['post_mode'] = samples.iloc[np.argmax(samples['lp__'].values)][p]
_df['model'] = g[0]
_df['ground_truth'] = gt[p]
_sbc_dfs.append(_df)
_sbc_dfs = pd.concat(_sbc_dfs)
_sbc_dfs['sim_idx'] = g[1]
sbc_dfs.append(_sbc_dfs)
return [samples, _sbc_dfs]
out = joblib.Parallel(n_jobs=cpus)(joblib.delayed(sbc)(g, d)for g, d in tqdm.tqdm(prior_data.groupby(['model', 'draw'])))
_samples = [a[0] for a in out]
_sbc = [a[1] for a in out]
sbc_df = pd.concat(_sbc)
sbc_df.to_csv('../../data/Chure2019_IND_sbc_samples.csv', index=False)
| [
"pandas.DataFrame",
"numpy.sum",
"numpy.argmax",
"pandas.read_csv",
"numpy.median",
"numpy.std",
"sys.path.insert",
"numpy.mean",
"joblib.Parallel",
"joblib.delayed",
"numpy.var",
"pandas.concat",
"multiprocessing.cpu_count"
] | [((35, 63), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../"""'], {}), "(0, '../../')\n", (50, 63), False, 'import sys\n'), ((331, 398), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/Chure2019_IND_prior_predictive_checks.csv"""'], {}), "('../../data/Chure2019_IND_prior_predictive_checks.csv')\n", (342, 398), True, 'import pandas as pd\n'), ((3599, 3614), 'pandas.concat', 'pd.concat', (['_sbc'], {}), '(_sbc)\n', (3608, 3614), True, 'import pandas as pd\n'), ((205, 219), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (217, 219), True, 'import multiprocessing as mp\n'), ((3285, 3304), 'pandas.concat', 'pd.concat', (['_sbc_dfs'], {}), '(_sbc_dfs)\n', (3294, 3304), True, 'import pandas as pd\n'), ((3416, 3444), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'cpus'}), '(n_jobs=cpus)\n', (3431, 3444), False, 'import joblib\n'), ((2540, 2556), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (2552, 2556), True, 'import pandas as pd\n'), ((2854, 2895), 'numpy.sum', 'np.sum', (['(samples[p].values[::thin] < gt[p])'], {}), '(samples[p].values[::thin] < gt[p])\n', (2860, 2895), True, 'import numpy as np\n'), ((2993, 3012), 'numpy.mean', 'np.mean', (['samples[p]'], {}), '(samples[p])\n', (3000, 3012), True, 'import numpy as np\n'), ((3044, 3065), 'numpy.median', 'np.median', (['samples[p]'], {}), '(samples[p])\n', (3053, 3065), True, 'import numpy as np\n'), ((2611, 2629), 'numpy.std', 'np.std', (['samples[p]'], {}), '(samples[p])\n', (2617, 2629), True, 'import numpy as np\n'), ((3445, 3464), 'joblib.delayed', 'joblib.delayed', (['sbc'], {}), '(sbc)\n', (3459, 3464), False, 'import joblib\n'), ((2580, 2599), 'numpy.mean', 'np.mean', (['samples[p]'], {}), '(samples[p])\n', (2587, 2599), True, 'import numpy as np\n'), ((2659, 2677), 'numpy.var', 'np.var', (['samples[p]'], {}), '(samples[p])\n', (2665, 2677), True, 'import numpy as np\n'), ((3110, 3143), 'numpy.argmax', 'np.argmax', (["samples['lp__'].values"], {}), "(samples['lp__'].values)\n", (3119, 3143), True, 'import numpy as np\n')] |
#!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
import logging
import math
import numpy as np
import pandas as pd
from constants import *
log = logging.getLogger(__name__)
def _split_pandas_data_with_ratios(data, ratios, seed=SEED, shuffle=False):
"""Helper function to split pandas DataFrame with given ratios
Note:
Implementation referenced from `this source
<https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test>`_.
Args:
data (pd.DataFrame): Pandas data frame to be split.
ratios (list of floats): list of ratios for split. The ratios have to sum to 1.
seed (int): random seed.
shuffle (bool): whether data will be shuffled when being split.
Returns:
list: List of pd.DataFrame split by the given specifications.
"""
if math.fsum(ratios) != 1.0:
raise ValueError("The ratios have to sum to 1")
split_index = np.cumsum(ratios).tolist()[:-1]
if shuffle:
data = data.sample(frac=1, random_state=seed)
splits = np.split(data, [round(x * len(data)) for x in split_index])
# Add split index (this makes splitting by group more efficient).
for i in range(len(ratios)):
splits[i]["split_index"] = i
return splits
def split_stratified(data, ratio, col_user=DEFAULT_USER_COL, seed=SEED, shuffle=True):
"""Pandas stratified splitter.
For each user / item, the split function takes proportions of ratings which is
specified by the split ratio(s). The split is stratified.
Args:
data (pd.DataFrame): Pandas DataFrame to be split.
ratio (list): Ratio for splitting data. list of float numbers, the splitter splits data into several portions
corresponding to the split ratios. If ratios are not summed to 1, they will be normalized.
seed (int): Seed.
shuffle (bool): Whether or not shuffle each user profile before splitting
col_user (str): column name of user IDs.
Returns:
list: Splits of the input data as pd.DataFrame.
"""
if col_user not in data.columns:
raise ValueError("Schema of data not valid. Missing User Col")
if math.fsum(ratio) != 1.0:
logging.warning("ratios passed don't sum to 1, normalization is applied")
ratio = [x / math.fsum(ratio) for x in ratio]
df_grouped = data.groupby(col_user)
# Split by each group and aggregate splits together.
splits = []
for name, group in df_grouped:
group_splits = _split_pandas_data_with_ratios(
df_grouped.get_group(name), ratio, shuffle=shuffle, seed=seed
)
# Concatenate the list of split dataframes.
concat_group_splits = pd.concat(group_splits)
splits.append(concat_group_splits)
# Concatenate splits for all the groups together.
splits_all = pd.concat(splits)
# Take split by split_index
splits_list = [
splits_all[splits_all["split_index"] == x].drop("split_index", axis=1)
for x in range(len(ratio))
]
return splits_list
def split_hit_rate(data, col_user=DEFAULT_USER_COL, seed=SEED, shuffle=True):
"""
Remove a single interaction for each user
Args:
data (pd.DataFrame): interactions dataframe, every row is an interaction between a user and an item
col_user (str): name of the column containig user IDs
seed (int): random seed
shuffle (bool): whether to shuffle the user profile before sample the interaction to remove,
if False the last one will be removed
Returns:
pd.DataFrame, pd.DataFrame: train and test interactions dataframes
"""
df_grouped = data.groupby(col_user)
train = []
test = []
for name, group in df_grouped:
if shuffle:
group = group.sample(frac=1, random_state=seed)
sample = group.loc[[group.index[-1]]]
group = group.drop(group.index[-1])
train.append(group)
test.append(sample)
train_df = pd.concat(train)
test_df = pd.concat(test)
return train_df, test_df
| [
"logging.warning",
"math.fsum",
"numpy.cumsum",
"pandas.concat",
"logging.getLogger"
] | [((159, 186), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (176, 186), False, 'import logging\n'), ((2893, 2910), 'pandas.concat', 'pd.concat', (['splits'], {}), '(splits)\n', (2902, 2910), True, 'import pandas as pd\n'), ((4050, 4066), 'pandas.concat', 'pd.concat', (['train'], {}), '(train)\n', (4059, 4066), True, 'import pandas as pd\n'), ((4081, 4096), 'pandas.concat', 'pd.concat', (['test'], {}), '(test)\n', (4090, 4096), True, 'import pandas as pd\n'), ((872, 889), 'math.fsum', 'math.fsum', (['ratios'], {}), '(ratios)\n', (881, 889), False, 'import math\n'), ((2219, 2235), 'math.fsum', 'math.fsum', (['ratio'], {}), '(ratio)\n', (2228, 2235), False, 'import math\n'), ((2252, 2325), 'logging.warning', 'logging.warning', (['"""ratios passed don\'t sum to 1, normalization is applied"""'], {}), '("ratios passed don\'t sum to 1, normalization is applied")\n', (2267, 2325), False, 'import logging\n'), ((2753, 2776), 'pandas.concat', 'pd.concat', (['group_splits'], {}), '(group_splits)\n', (2762, 2776), True, 'import pandas as pd\n'), ((973, 990), 'numpy.cumsum', 'np.cumsum', (['ratios'], {}), '(ratios)\n', (982, 990), True, 'import numpy as np\n'), ((2347, 2363), 'math.fsum', 'math.fsum', (['ratio'], {}), '(ratio)\n', (2356, 2363), False, 'import math\n')] |
import numpy as np
import pandangas as pg
import pandangas.simulation as sim
import pandangas.topology as top
import pytest
import fluids
from thermo.chemical import Chemical
from tests.test_core import fix_create
def test_scaled_loads(fix_create):
net = fix_create
assert sim._scaled_loads_as_dict(net) == {'BUS2': 0.000262, 'BUS3': 0.000394}
def test_p_min_loads(fix_create):
net = fix_create
assert sim._p_min_loads_as_dict(net) == {'BUS2': 0.022E5, 'BUS3': 0.022E5}
def test_p_nom_feed(fix_create):
net = fix_create
assert sim._p_nom_feed_as_dict(net) == {'BUS1': 0.025E5, 'BUSF': 0.9E5}
def test_i_mat(fix_create):
net = fix_create
g = top.graphs_by_level_as_dict(net)["BP"]
i_mat = sim._i_mat(g)
assert type(i_mat) is np.ndarray
waited = np.array([[1., 0., 1.], [-1., -1., 0.], [0., 1., -1.]])
for l in waited:
assert l in i_mat
def test_dp_from_m_dot():
gas = Chemical('natural gas', T=10+273.15, P=4.5E5)
material = fluids.nearest_material_roughness('steel', clean=True)
eps = fluids.material_roughness(material)
assert round(sim._dp_from_m_dot_vec(0.005, 100, 0.05, eps, gas).tolist(), 1) == 61.8
def test_run_sim(fix_create):
net = fix_create
p_nodes, m_dot_pipes, m_dot_nodes, gas = sim._run_sim(net)
assert round(gas.rho, 3) == 0.017
assert p_nodes == {'BUS1': 2500.0, 'BUS2': 1962.7, 'BUS3': 1827.8}
assert m_dot_pipes == {'PIPE3': 6.6e-05, 'PIPE1': 0.000328, 'PIPE2': 0.000328}
assert m_dot_nodes == {'BUS1': -0.000656, 'BUS2': 0.000262, 'BUS3': 0.000394}
@pytest.fixture()
def fix_create_full_mp():
net = pg.create_empty_network()
busf = pg.create_bus(net, level="MP", name="BUSF")
bus1 = pg.create_bus(net, level="MP", name="BUS1")
bus2 = pg.create_bus(net, level="MP", name="BUS2")
bus3 = pg.create_bus(net, level="MP", name="BUS3")
pg.create_load(net, bus1, p_kW=10.0, name="LOAD1")
pg.create_load(net, bus2, p_kW=15.0, name="LOAD2")
pg.create_load(net, bus3, p_kW=20.0, name="LOAD3")
pg.create_pipe(net, busf, bus1, length_m=100, diameter_m=0.05, name="PIPE0")
pg.create_pipe(net, bus1, bus2, length_m=400, diameter_m=0.05, name="PIPE1")
pg.create_pipe(net, bus1, bus3, length_m=500, diameter_m=0.05, name="PIPE2")
pg.create_pipe(net, bus2, bus3, length_m=500, diameter_m=0.05, name="PIPE3")
# TODO: switch to absolute pressure (Pa) ?
pg.create_feeder(net, busf, p_lim_kW=50, p_Pa=0.9E5, name="FEEDER")
return net
def test_run_sim_mp(fix_create):
net = fix_create_full_mp()
p_nodes, m_dot_pipes, m_dot_nodes, gas = sim._run_sim(net, level="MP")
assert round(gas.rho, 3) == 0.683
assert p_nodes == {'BUS1': 89968.3, 'BUS2': 89949.1, 'BUS3': 89945.3, 'BUSF': 90000.0}
assert m_dot_pipes == {'PIPE0': 0.001181, 'PIPE1': 0.000469, 'PIPE2': 0.00045, 'PIPE3': 7.5e-05}
assert m_dot_nodes == {'BUSF': -0.001181, 'BUS1': 0.000262, 'BUS2': 0.000394, 'BUS3': 0.000525}
| [
"pandangas.simulation._run_sim",
"pandangas.create_pipe",
"pandangas.simulation._p_min_loads_as_dict",
"pandangas.simulation._dp_from_m_dot_vec",
"fluids.nearest_material_roughness",
"pandangas.simulation._scaled_loads_as_dict",
"fluids.material_roughness",
"pandangas.create_empty_network",
"pytest.... | [((1584, 1600), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1598, 1600), False, 'import pytest\n'), ((735, 748), 'pandangas.simulation._i_mat', 'sim._i_mat', (['g'], {}), '(g)\n', (745, 748), True, 'import pandangas.simulation as sim\n'), ((799, 863), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0], [-1.0, -1.0, 0.0], [0.0, 1.0, -1.0]]'], {}), '([[1.0, 0.0, 1.0], [-1.0, -1.0, 0.0], [0.0, 1.0, -1.0]])\n', (807, 863), True, 'import numpy as np\n'), ((940, 990), 'thermo.chemical.Chemical', 'Chemical', (['"""natural gas"""'], {'T': '(10 + 273.15)', 'P': '(450000.0)'}), "('natural gas', T=10 + 273.15, P=450000.0)\n", (948, 990), False, 'from thermo.chemical import Chemical\n'), ((1001, 1055), 'fluids.nearest_material_roughness', 'fluids.nearest_material_roughness', (['"""steel"""'], {'clean': '(True)'}), "('steel', clean=True)\n", (1034, 1055), False, 'import fluids\n'), ((1066, 1101), 'fluids.material_roughness', 'fluids.material_roughness', (['material'], {}), '(material)\n', (1091, 1101), False, 'import fluids\n'), ((1289, 1306), 'pandangas.simulation._run_sim', 'sim._run_sim', (['net'], {}), '(net)\n', (1301, 1306), True, 'import pandangas.simulation as sim\n'), ((1637, 1662), 'pandangas.create_empty_network', 'pg.create_empty_network', ([], {}), '()\n', (1660, 1662), True, 'import pandangas as pg\n'), ((1675, 1718), 'pandangas.create_bus', 'pg.create_bus', (['net'], {'level': '"""MP"""', 'name': '"""BUSF"""'}), "(net, level='MP', name='BUSF')\n", (1688, 1718), True, 'import pandangas as pg\n'), ((1731, 1774), 'pandangas.create_bus', 'pg.create_bus', (['net'], {'level': '"""MP"""', 'name': '"""BUS1"""'}), "(net, level='MP', name='BUS1')\n", (1744, 1774), True, 'import pandangas as pg\n'), ((1786, 1829), 'pandangas.create_bus', 'pg.create_bus', (['net'], {'level': '"""MP"""', 'name': '"""BUS2"""'}), "(net, level='MP', name='BUS2')\n", (1799, 1829), True, 'import pandangas as pg\n'), ((1841, 1884), 'pandangas.create_bus', 'pg.create_bus', (['net'], {'level': '"""MP"""', 'name': '"""BUS3"""'}), "(net, level='MP', name='BUS3')\n", (1854, 1884), True, 'import pandangas as pg\n'), ((1890, 1940), 'pandangas.create_load', 'pg.create_load', (['net', 'bus1'], {'p_kW': '(10.0)', 'name': '"""LOAD1"""'}), "(net, bus1, p_kW=10.0, name='LOAD1')\n", (1904, 1940), True, 'import pandangas as pg\n'), ((1945, 1995), 'pandangas.create_load', 'pg.create_load', (['net', 'bus2'], {'p_kW': '(15.0)', 'name': '"""LOAD2"""'}), "(net, bus2, p_kW=15.0, name='LOAD2')\n", (1959, 1995), True, 'import pandangas as pg\n'), ((2000, 2050), 'pandangas.create_load', 'pg.create_load', (['net', 'bus3'], {'p_kW': '(20.0)', 'name': '"""LOAD3"""'}), "(net, bus3, p_kW=20.0, name='LOAD3')\n", (2014, 2050), True, 'import pandangas as pg\n'), ((2056, 2132), 'pandangas.create_pipe', 'pg.create_pipe', (['net', 'busf', 'bus1'], {'length_m': '(100)', 'diameter_m': '(0.05)', 'name': '"""PIPE0"""'}), "(net, busf, bus1, length_m=100, diameter_m=0.05, name='PIPE0')\n", (2070, 2132), True, 'import pandangas as pg\n'), ((2137, 2213), 'pandangas.create_pipe', 'pg.create_pipe', (['net', 'bus1', 'bus2'], {'length_m': '(400)', 'diameter_m': '(0.05)', 'name': '"""PIPE1"""'}), "(net, bus1, bus2, length_m=400, diameter_m=0.05, name='PIPE1')\n", (2151, 2213), True, 'import pandangas as pg\n'), ((2218, 2294), 'pandangas.create_pipe', 'pg.create_pipe', (['net', 'bus1', 'bus3'], {'length_m': '(500)', 'diameter_m': '(0.05)', 'name': '"""PIPE2"""'}), "(net, bus1, bus3, length_m=500, diameter_m=0.05, name='PIPE2')\n", (2232, 2294), True, 'import pandangas as pg\n'), ((2299, 2375), 'pandangas.create_pipe', 'pg.create_pipe', (['net', 'bus2', 'bus3'], {'length_m': '(500)', 'diameter_m': '(0.05)', 'name': '"""PIPE3"""'}), "(net, bus2, bus3, length_m=500, diameter_m=0.05, name='PIPE3')\n", (2313, 2375), True, 'import pandangas as pg\n'), ((2429, 2498), 'pandangas.create_feeder', 'pg.create_feeder', (['net', 'busf'], {'p_lim_kW': '(50)', 'p_Pa': '(90000.0)', 'name': '"""FEEDER"""'}), "(net, busf, p_lim_kW=50, p_Pa=90000.0, name='FEEDER')\n", (2445, 2498), True, 'import pandangas as pg\n'), ((2624, 2653), 'pandangas.simulation._run_sim', 'sim._run_sim', (['net'], {'level': '"""MP"""'}), "(net, level='MP')\n", (2636, 2653), True, 'import pandangas.simulation as sim\n'), ((286, 316), 'pandangas.simulation._scaled_loads_as_dict', 'sim._scaled_loads_as_dict', (['net'], {}), '(net)\n', (311, 316), True, 'import pandangas.simulation as sim\n'), ((425, 454), 'pandangas.simulation._p_min_loads_as_dict', 'sim._p_min_loads_as_dict', (['net'], {}), '(net)\n', (449, 454), True, 'import pandangas.simulation as sim\n'), ((560, 588), 'pandangas.simulation._p_nom_feed_as_dict', 'sim._p_nom_feed_as_dict', (['net'], {}), '(net)\n', (583, 588), True, 'import pandangas.simulation as sim\n'), ((684, 716), 'pandangas.topology.graphs_by_level_as_dict', 'top.graphs_by_level_as_dict', (['net'], {}), '(net)\n', (711, 716), True, 'import pandangas.topology as top\n'), ((1119, 1169), 'pandangas.simulation._dp_from_m_dot_vec', 'sim._dp_from_m_dot_vec', (['(0.005)', '(100)', '(0.05)', 'eps', 'gas'], {}), '(0.005, 100, 0.05, eps, gas)\n', (1141, 1169), True, 'import pandangas.simulation as sim\n')] |
# Copyright 2016-2020 <NAME>. See also the LICENSE file.
import numpy as np
class SphericalLinearPreferenceModel:
def __init__(self, shape=512, rng=None):
"""
Create model object.
:param shape: shape of the vector to learn the preference from.
"""
self._rng = rng or np.random.RandomState(0)
self._shape = shape
# Learnable parameters
self._training_examples = []
@property
def is_random(self):
return len(self._training_examples) == 0
def train(self, training_examples):
self._training_examples = training_examples
# self._r0 = -5
def generate(self, size, variance=0):
"""
Generate new data for current model parameters.
:param size the number of vectors to generate.
:param variance the larger the factor, the more mutation has the output
:return: an array of vectors similar to those used for training.
"""
if self.is_random:
return sample_uniform_on_sphere(self._rng, self._shape, size)
# For variance from 0 to 4
# a, scale, std
params = [
(10, 1, .02),
(3, 1, .03),
(1, 1, .05), # Middle range - a uniform dist. in convex combination of training examples
(1, 1.5, .07), # Start going outside of the convex combination of training examples
(1.2, 2.0, .1) # Concentrate in the middle, as the values at the boarder have little visual difference
][variance]
k = len(self._training_examples)
# Convert to spherical coordinates
_, phi = cartesian_to_spherical_n(self._training_examples)
# [0, pi] -> [-pi, pi] for all but the last
phi[:, :-1] = 2 * phi[:, :-1] - np.pi
if k == 1:
# Only one training example, it will be varied later by a normal "noise".
output_phi = phi
else:
# Mix k training examples
if hasattr(self, '_r0') and k == 2:
# Go through convex combinations.
self._r0 += 0.1
r = np.array([self._r0, 1 - self._r0]).reshape(size, k)
elif k == 2 and False:
# Simple test for n = 2
r = self._rng.standard_normal(size=1) * variance + 0.5
r = np.array([r, 1-r]).reshape(size, k)
else:
# Random coefficients of shape (size, k)
r = scaled_dirichlet(self._rng, k=k, size=size, a=params[0], scale=params[1])
print(r, r.sum(axis=1))
# Sines and cosines of shape (size, k, 511)
sin = np.broadcast_to(np.sin(phi), (size,) + phi.shape)
cos = np.broadcast_to(np.cos(phi), (size,) + phi.shape)
# Expand to shape (size, k, 1)
r = np.expand_dims(r, 2)
sin = sin * r
cos = cos * r
# Linear combinations of shape (size, 511)
output_phi = np.arctan2(sin.sum(axis=1), cos.sum(axis=1))
# Add normal "noise"
output_phi += self._rng.normal(scale=params[2], size=output_phi.shape)
# [-pi, pi] -> [0, pi] for all but the last
output_phi[:, :-1] = (output_phi[:, :-1] + np.pi) / 2
output = spherical_to_cartesian_n(np.ones((size, 1)), output_phi)
return output
def spherical_to_cartesian_n(r, phi):
"""
Convert spherical to cartesian coordinates in n dimensions.
See https://en.wikipedia.org/wiki/N-sphere#Spherical_coordinates.
:param r: radius (a scalar or a column of scalars).
:param phi: a vector of anlges of size n-1 or column of such vectors.
phi[0:n-2] vary in range [0, pi], phi[n-1] in [0, 2*pi] or in [-pi, pi].
:return: cartesian coordinates (a vector of size n or a column of such vectors).
"""
ones_shape = (1,) if phi.ndim == 1 else phi.shape[:1] + (1,)
ones = np.full(ones_shape, 1.0, dtype=phi.dtype)
sinphi = np.sin(phi)
axis = 0 if phi.ndim == 1 else 1
sinphi = np.cumprod(sinphi, axis=axis)
sinphi = np.concatenate((ones, sinphi), axis=axis)
cosphi = np.cos(phi)
cosphi = np.concatenate((cosphi, ones), axis=axis)
x = sinphi * cosphi * r
return x
def cartesian_to_spherical_n(x, eps=1e-10):
"""
Converts cartesian to spherical coordinates in n dimensions.
See https://en.wikipedia.org/wiki/N-sphere#Spherical_coordinates.
:param x: cartesian coordinates (a vector or an array of row vectors).
:param eps: elements of x < eps are considered to be 0.
:return: r, phi
r: radius (a scalar or a column of scalars)
phi: a vector of angles of size n-1 or column of such vectors.
phi[0:n-2] vary in range [0, pi], phi[n-1] in [-pi, pi].
"""
is_reshaped = False
if x.ndim == 1:
is_reshaped = True
x = x.reshape(1, -1)
x2 = np.flip(x * x, axis=1)
n = np.sqrt(np.cumsum(x2, axis=1))
n = np.flip(n, axis=1)
r = n[:, 0].reshape(-1, 1)
n = n[:, :-1]
with np.errstate(divide='ignore', invalid='ignore'):
xn = x[:, :-1] / n
phi = np.arccos(xn)
phi[n < eps] = 0
#
# The description in wikipedia boils down to changing the sign of the phi_(n-1) (using 1-based indexing)
# if and only if
# 1. there is no k such that x_k != 0 and all x_i == 0 for i > k
# and
# 2. x_n < 0
s = x[:, -1] < 0
phi[s, -1] *= -1
if is_reshaped:
r = r.item()
phi = phi.reshape(phi.size)
return r, phi
def mean_of_angles(angles, axis=None):
"""
Compute mean of angular values as described in https://en.wikipedia.org/wiki/Mean_of_circular_quantities.
:param angles: an array of angles.
:param axis: Axis or axes along which the means are computed.
:return: mean.
"""
s = np.sin(angles)
c = np.cos(angles)
m = np.arctan2(s.sum(axis=axis), c.sum(axis=axis))
return m
def sample_uniform_on_sphere(rng, dim, size):
"""
Sample uniform random points on n-sphere.
See
https://mathworld.wolfram.com/HyperspherePointPicking.html
https://stackoverflow.com/questions/15880367/python-uniform-distribution-of-points-on-4-dimensional-sphere
http://extremelearning.com.au/how-to-generate-uniformly-random-points-on-n-spheres-and-n-balls/
:param rng a random number generator to use.
:param dim: dimension of the space (e.g. 3 for 3d).
:param size: number of points.
:return: an array uniformly distributed points. The normalization to the unit length is not done to avoid
division by zero and because the is done by the model.
"""
return rng.normal(size=(size, dim))
def scaled_dirichlet(rng, k, a, size=None, scale=1):
"""
Sample from a symmetric Dirichlet distribution of dimension k and parameter a, scaled around its mean.
It generates vectors of dimension k. Sum of the elements of the vectors is 1.
The elements are in the range [(1-scale) / k, (scale * (k-1) + 1) / k]
The mean of each element is 1 / k.
The variance is scale**2 * (k-1) / k**2 / (k * a + 1).
:param rng: random number generator.
:param k: dimensionality.
:param a: distribution parameter in [eps, +inf]. eps shall be > 0.01 or so to avoid nans.
:param size: output shape, the output will have a shape (size, k). If size is None, a vector of size k is returned.
:param scale: scale factor.
:return: a size vectors with k elements each.
"""
if type(size) == int:
size = (size,)
if False:
# Use the native numpy function.
x =rng.dirichlet(np.full(k, a), size)
else:
# Use the gamma distribution as in tensorflow.js.
y = rng.gamma(np.full(k, a), size=size + (k,))
x = y / y.sum(axis=-1, keepdims=True)
mean = 1. / k
return (x - mean) * scale + mean
| [
"numpy.full",
"numpy.cumprod",
"numpy.flip",
"numpy.expand_dims",
"numpy.errstate",
"numpy.random.RandomState",
"numpy.ones",
"numpy.cumsum",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.arccos",
"numpy.concatenate"
] | [((3925, 3966), 'numpy.full', 'np.full', (['ones_shape', '(1.0)'], {'dtype': 'phi.dtype'}), '(ones_shape, 1.0, dtype=phi.dtype)\n', (3932, 3966), True, 'import numpy as np\n'), ((3980, 3991), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3986, 3991), True, 'import numpy as np\n'), ((4042, 4071), 'numpy.cumprod', 'np.cumprod', (['sinphi'], {'axis': 'axis'}), '(sinphi, axis=axis)\n', (4052, 4071), True, 'import numpy as np\n'), ((4085, 4126), 'numpy.concatenate', 'np.concatenate', (['(ones, sinphi)'], {'axis': 'axis'}), '((ones, sinphi), axis=axis)\n', (4099, 4126), True, 'import numpy as np\n'), ((4140, 4151), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4146, 4151), True, 'import numpy as np\n'), ((4165, 4206), 'numpy.concatenate', 'np.concatenate', (['(cosphi, ones)'], {'axis': 'axis'}), '((cosphi, ones), axis=axis)\n', (4179, 4206), True, 'import numpy as np\n'), ((4889, 4911), 'numpy.flip', 'np.flip', (['(x * x)'], {'axis': '(1)'}), '(x * x, axis=1)\n', (4896, 4911), True, 'import numpy as np\n'), ((4960, 4978), 'numpy.flip', 'np.flip', (['n'], {'axis': '(1)'}), '(n, axis=1)\n', (4967, 4978), True, 'import numpy as np\n'), ((5124, 5137), 'numpy.arccos', 'np.arccos', (['xn'], {}), '(xn)\n', (5133, 5137), True, 'import numpy as np\n'), ((5834, 5848), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (5840, 5848), True, 'import numpy as np\n'), ((5857, 5871), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (5863, 5871), True, 'import numpy as np\n'), ((4928, 4949), 'numpy.cumsum', 'np.cumsum', (['x2'], {'axis': '(1)'}), '(x2, axis=1)\n', (4937, 4949), True, 'import numpy as np\n'), ((5038, 5084), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (5049, 5084), True, 'import numpy as np\n'), ((314, 338), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (335, 338), True, 'import numpy as np\n'), ((2844, 2864), 'numpy.expand_dims', 'np.expand_dims', (['r', '(2)'], {}), '(r, 2)\n', (2858, 2864), True, 'import numpy as np\n'), ((3311, 3329), 'numpy.ones', 'np.ones', (['(size, 1)'], {}), '((size, 1))\n', (3318, 3329), True, 'import numpy as np\n'), ((7612, 7625), 'numpy.full', 'np.full', (['k', 'a'], {}), '(k, a)\n', (7619, 7625), True, 'import numpy as np\n'), ((7723, 7736), 'numpy.full', 'np.full', (['k', 'a'], {}), '(k, a)\n', (7730, 7736), True, 'import numpy as np\n'), ((2682, 2693), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2688, 2693), True, 'import numpy as np\n'), ((2750, 2761), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2756, 2761), True, 'import numpy as np\n'), ((2131, 2165), 'numpy.array', 'np.array', (['[self._r0, 1 - self._r0]'], {}), '([self._r0, 1 - self._r0])\n', (2139, 2165), True, 'import numpy as np\n'), ((2349, 2369), 'numpy.array', 'np.array', (['[r, 1 - r]'], {}), '([r, 1 - r])\n', (2357, 2369), True, 'import numpy as np\n')] |
"""Test the surface_io module."""
from collections import OrderedDict
import shutil
import logging
import pytest
import json
import numpy as np
import xtgeo
import yaml
import fmu.dataio
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
CFG = OrderedDict()
CFG["model"] = {"name": "Test", "revision": "AUTO"}
CFG["masterdata"] = {
"smda": {
"country": [
{"identifier": "Norway", "uuid": "ad214d85-8a1d-19da-e053-c918a4889309"}
],
"discovery": [{"short_identifier": "abdcef", "uuid": "ghijk"}],
}
}
CFG2 = {}
with open("tests/data/drogon/global_config2/global_variables.yml", "r") as stream:
CFG2 = yaml.safe_load(stream)
RUN = "tests/data/drogon/ertrun1/realization-0/iter-0/rms"
CASEPATH = "tests/data/drogon/ertrun1"
def test_surface_io(tmp_path):
"""Minimal test surface io, uses tmp_path."""
# make a fake RegularSurface
srf = xtgeo.RegularSurface(
ncol=20, nrow=30, xinc=20, yinc=20, values=np.ma.ones((20, 30)), name="test"
)
fmu.dataio.ExportData.export_root = tmp_path.resolve()
fmu.dataio.ExportData.surface_fformat = "irap_binary"
exp = fmu.dataio.ExportData(content="depth")
exp._pwd = tmp_path
exp.to_file(srf)
assert (tmp_path / "maps" / "test.gri").is_file() is True
assert (tmp_path / "maps" / ".test.gri.yml").is_file() is True
def test_surface_io_export_subfolder(tmp_path):
"""Minimal test surface io with export_subfolder set,
uses tmp_path."""
srf = xtgeo.RegularSurface(
ncol=20, nrow=30, xinc=20, yinc=20, values=np.ma.ones((20, 30)), name="test"
)
fmu.dataio.ExportData.export_root = tmp_path.resolve()
fmu.dataio.ExportData.surface_fformat = "irap_binary"
exp = fmu.dataio.ExportData(content="depth")
exp._pwd = tmp_path
with pytest.warns(UserWarning):
exp.to_file(srf, subfolder="mysubfolder")
assert (tmp_path / "maps" / "mysubfolder" / "test.gri").is_file() is True
assert (tmp_path / "maps" / "mysubfolder" / ".test.gri.yml").is_file() is True
def test_surface_io_larger_case(tmp_path):
"""Larger test surface io, uses global config from Drogon to tmp_path."""
# make a fake RegularSurface
srf = xtgeo.RegularSurface(
ncol=20,
nrow=30,
xinc=20,
yinc=20,
values=np.ma.ones((20, 30)),
name="TopVolantis",
)
fmu.dataio.ExportData.export_root = tmp_path.resolve()
fmu.dataio.ExportData.surface_fformat = "irap_binary"
exp = fmu.dataio.ExportData(
config=CFG2,
content="depth",
unit="m",
vertical_domain={"depth": "msl"},
timedata=None,
is_prediction=True,
is_observation=False,
tagname="what Descr",
verbosity="INFO",
)
exp._pwd = tmp_path
exp.to_file(srf, verbosity="DEBUG")
metadataout = tmp_path / "maps" / ".topvolantis--what_descr.gri.yml"
assert metadataout.is_file() is True
print(metadataout)
def test_surface_io_larger_case_ertrun(tmp_path):
"""Larger test surface io as ERTRUN, uses global config from Drogon to tmp_path.
Need some file acrobatics here to make the tmp_path area look like an ERTRUN first.
"""
current = tmp_path / "scratch" / "fields" / "user"
current.mkdir(parents=True, exist_ok=True)
shutil.copytree(CASEPATH, current / "mycase")
fmu.dataio.ExportData.export_root = "../../share/results"
fmu.dataio.ExportData.surface_fformat = "irap_binary"
runfolder = current / "mycase" / "realization-0" / "iter-0" / "rms" / "model"
runfolder.mkdir(parents=True, exist_ok=True)
out = current / "mycase" / "realization-0" / "iter-0" / "share" / "results" / "maps"
exp = fmu.dataio.ExportData(
config=CFG2,
content="depth",
unit="m",
vertical_domain={"depth": "msl"},
timedata=None,
is_prediction=True,
is_observation=False,
tagname="what Descr",
verbosity="INFO",
runfolder=runfolder.resolve(),
workflow="my current workflow",
)
# make a fake RegularSurface
srf = xtgeo.RegularSurface(
ncol=20,
nrow=30,
xinc=20,
yinc=20,
values=np.ma.ones((20, 30)),
name="TopVolantis",
)
exp.to_file(srf, verbosity="INFO")
metadataout = out / ".topvolantis--what_descr.gri.yml"
assert metadataout.is_file() is True
# now read the metadata file and test some key entries:
with open(metadataout, "r") as stream:
meta = yaml.safe_load(stream)
assert (
meta["file"]["relative_path"]
== "realization-0/iter-0/share/results/maps/topvolantis--what_descr.gri"
)
assert meta["class"] == "surface", meta["class"]
assert meta["fmu"]["model"]["name"] == "ff"
assert meta["fmu"]["iteration"]["name"] == "iter-0"
assert meta["fmu"]["realization"]["name"] == "realization-0"
assert meta["data"]["stratigraphic"] is True
# display_name is not set, checking that 'name' was used
assert meta["display"]["name"] == "TopVolantis"
logger.debug("\n%s", json.dumps(meta, indent=2))
| [
"pytest.warns",
"json.dumps",
"yaml.safe_load",
"numpy.ma.ones",
"collections.OrderedDict",
"shutil.copytree",
"logging.getLogger"
] | [((198, 225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'import logging\n'), ((264, 277), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (275, 277), False, 'from collections import OrderedDict\n'), ((668, 690), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (682, 690), False, 'import yaml\n'), ((3337, 3382), 'shutil.copytree', 'shutil.copytree', (['CASEPATH', "(current / 'mycase')"], {}), "(CASEPATH, current / 'mycase')\n", (3352, 3382), False, 'import shutil\n'), ((1826, 1851), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (1838, 1851), False, 'import pytest\n'), ((4551, 4573), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (4565, 4573), False, 'import yaml\n'), ((5124, 5150), 'json.dumps', 'json.dumps', (['meta'], {'indent': '(2)'}), '(meta, indent=2)\n', (5134, 5150), False, 'import json\n'), ((990, 1010), 'numpy.ma.ones', 'np.ma.ones', (['(20, 30)'], {}), '((20, 30))\n', (1000, 1010), True, 'import numpy as np\n'), ((1586, 1606), 'numpy.ma.ones', 'np.ma.ones', (['(20, 30)'], {}), '((20, 30))\n', (1596, 1606), True, 'import numpy as np\n'), ((2337, 2357), 'numpy.ma.ones', 'np.ma.ones', (['(20, 30)'], {}), '((20, 30))\n', (2347, 2357), True, 'import numpy as np\n'), ((4236, 4256), 'numpy.ma.ones', 'np.ma.ones', (['(20, 30)'], {}), '((20, 30))\n', (4246, 4256), True, 'import numpy as np\n')] |
# Carregar o dataset MNIST
# Obs: Este script é baseado na versão do livro http://neuralnetworksanddeeplearning.com/, com a devida autorização do autor.
# Imports
import pickle
import gzip
import numpy as np
def load_data():
f = gzip.open('../data/processed/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding="latin1")
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e | [
"numpy.zeros",
"pickle.load",
"gzip.open",
"numpy.reshape"
] | [((236, 285), 'gzip.open', 'gzip.open', (['"""../data/processed/mnist.pkl.gz"""', '"""rb"""'], {}), "('../data/processed/mnist.pkl.gz', 'rb')\n", (245, 285), False, 'import gzip\n'), ((334, 367), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (345, 367), False, 'import pickle\n'), ((1017, 1034), 'numpy.zeros', 'np.zeros', (['(10, 1)'], {}), '((10, 1))\n', (1025, 1034), True, 'import numpy as np\n'), ((521, 544), 'numpy.reshape', 'np.reshape', (['x', '(784, 1)'], {}), '(x, (784, 1))\n', (531, 544), True, 'import numpy as np\n'), ((716, 739), 'numpy.reshape', 'np.reshape', (['x', '(784, 1)'], {}), '(x, (784, 1))\n', (726, 739), True, 'import numpy as np\n'), ((837, 860), 'numpy.reshape', 'np.reshape', (['x', '(784, 1)'], {}), '(x, (784, 1))\n', (847, 860), True, 'import numpy as np\n')] |
import pickle
import os
import pandas as pd
from datetime import datetime as dt
import numpy as np
from VaccineAllocation import load_config_file,config_path
from reporting.plotting import plot_multi_tier_sims
from reporting.report_pdf import generate_report
from reporting.output_processors import build_report
from pipelinemultitier import read_hosp, multi_tier_pipeline
import csv
def getACS_util(reg_hosp, profiles, T):
# output the expected ACS utilization number
useList = []
maxUseList = []
maxDayList = []
capList = []
overList = []
noTriggered = 0
for p in profiles:
# time series of over regular capacity
overCap_reg = np.maximum(np.sum(p['IHT'], axis = (1,2)) - reg_hosp, 0)
# time series of over ACS capacity
overCap_ACS = np.maximum(np.sum(p['IHT'], axis = (1,2)) - p["capacity"],0)
# time series of ACS usage
acs_usage = overCap_reg - overCap_ACS
# time series of ACS capacity
acs_cap = np.array(p["capacity"]) - reg_hosp
# number of paths with ACS triggered
if p['acs_triggered'] and len(np.unique(p['capacity'][:T])) > 1:
noTriggered += 1
# ACS required for this path
maxUseList.append(np.max(overCap_reg[:T]))
# number of days requiring ACS for this path
maxDayList.append(np.sum(overCap_reg[:T] > 0))
# total number of ACS usage for this path
useList.append(np.sum(acs_usage[:T]))
# total capacity of ACS usage for this path
capList.append(np.sum(acs_cap[:T]))
# total number of ACS unsatisfaction for this path
overList.append(np.sum(overCap_ACS[:T]))
meanUse = np.mean(useList)
meanUtil = np.nanmean(np.array(useList)/np.array(capList))
#breakpoint()
return useList,maxUseList,maxDayList,capList,overList,meanUse,meanUtil,noTriggered
def getACS_util_ICU(reg_hosp, profiles, T, t_start = 0):
# output the expected ACS utilization number
useList = []
maxUseList = []
maxDayList = []
capList = []
overList = []
noTriggered = 0
for p in profiles:
# time series of over regular capacity
overCap_reg = np.maximum(np.sum(p['ICU'], axis = (1,2))[t_start:] - reg_hosp, 0)
# time series of over ACS capacity
overCap_ACS = np.maximum(np.sum(p['ICU'], axis = (1,2))[t_start:] - p["capacity"][t_start:],0)
# time series of ACS usage
acs_usage = overCap_reg - overCap_ACS
# time series of ACS capacity
acs_cap = np.array(p["capacity"]) - reg_hosp
# number of paths with ACS triggered
if p['acs_triggered'] and len(np.unique(p['capacity'][:T])) > 1:
noTriggered += 1
# ACS required for this path
maxUseList.append(np.max(overCap_reg[:T]))
# number of days requiring ACS for this path
maxDayList.append(np.sum(overCap_reg[:T] > 0))
# total number of ACS usage for this path
useList.append(np.sum(acs_usage[:T]))
# total capacity of ACS usage for this path
capList.append(np.sum(acs_cap[:T]))
# total number of ACS unsatisfaction for this path
overList.append(np.sum(overCap_ACS[:T]))
meanUse = np.mean(useList)
meanUtil = np.nanmean(np.array(useList)/np.array(capList))
# breakpoint()
return useList,maxUseList,maxDayList,capList,overList,meanUse,meanUtil,noTriggered
def getACS_reppath(profiles,reg_cap):
dateList = []
for i in range(len(profiles)):
if len(np.where(np.array(profiles[i]['capacity']) > reg_cap)[0]) > 0:
dateList.append([i,np.where(np.array(profiles[i]['capacity']) > reg_cap)[0][0]])
else:
dateList.append([i,10000])
dateList.sort(key = lambda x: x[1])
return dateList
def getACS_gap(profiles, reg_cap):
outList = []
for i in range(300):
IHTList = np.sum(profiles[i]['IHT'], axis = (1,2))
capList = np.array(profiles[i]['capacity'])
profList = [i]
if len(np.where(IHTList > reg_cap)[0]) > 0:
profList.append(np.where(IHTList > reg_cap)[0][0])
else:
profList.append(10000)
if len(np.where(capList > reg_cap)[0]) > 0:
profList.append(np.where(capList > reg_cap)[0][0])
else:
profList.append(10000)
outList.append(profList)
return outList
def getACS_gap_ICU(profiles, reg_cap):
outList = []
for i in range(len(profiles)):
ICUList = np.sum(profiles[i]['ICU'], axis = (1,2))
capList = np.array(profiles[i]['capacity'])
profList = [i]
if len(np.where(ICUList > reg_cap)[0]) > 0:
profList.append(np.where(ICUList > reg_cap)[0][0])
else:
profList.append(10000)
if len(np.where(capList > reg_cap)[0]) > 0:
profList.append(np.where(capList > reg_cap)[0][0])
else:
profList.append(10000)
outList.append(profList)
return outList
fileList = os.listdir("output")
# load Austin real hospitalization
file_path = "instances/austin/austin_real_hosp_updated.csv"
start_date = dt(2020,2,28)
real_hosp = read_hosp(file_path, start_date)
hosp_beds_list = None
file_path = "instances/austin/austin_hosp_ad_updated.csv"
hosp_ad = read_hosp(file_path, start_date, "admits")
file_path = "instances/austin/austin_real_icu_updated.csv"
real_icu = read_hosp(file_path, start_date)
hosp_beds_list = None
# newly defined color
add_tiers = {0.62379925: '#ffE000',
0.6465315: '#ffC000',
0.66926375: '#ffA000',
0.71472825: '#ff6000',
0.7374605: '#ff4000',
0.76019275: '#ff2000'
}
fi = open("/Users/nazlicanarslan/Desktop/github_clones/COVID19-vaccine/VaccineAllocation/output/v-acs.csv","w",newline="")
csvWriter = csv.writer(fi,dialect='excel')
csvWriter.writerow(['Case_Name','ACS_Quantity','ACS_Trigger','Scenario with ACS Triggered',
'Infeasible Scenarios','Mean ACS Usage', 'Mean ACS Util Rate',
'Max No of Days Requiring ACS','95% Days Requiring ACS',
'Max ACS Required', '95% ACS Required', 'Original Unmet Mean', 'Original Unmet Median', 'Original Unmet Std', 'Original Unmet 5%', 'Original Unmet 95%'])
trend_comp = True
for instance_raw in fileList:
if ".p" in instance_raw:
try:
instance_name = instance_raw[:-2]
file_path = f'output/austin_acs_071321/{instance_name}.p'
#breakpoint()
with open(file_path, 'rb') as outfile:
read_output = pickle.load(outfile)
instance, interventions, best_params, best_policy, vaccines, profiles, sim_output, expected_cost, config, seeds_info = read_output
if "11_13" in instance_name:
end_history = dt(2020,11,13)
else:
end_history = dt(2021,7,12)
t_start = (end_history - start_date).days
acs_results = getACS_util(instance.hosp_beds,profiles,601)
print("====================================================")
print(instance_name)
case_name = str(instance.transmission_file)[:-4]
print(case_name)
#instance_name = "austin_{}_{}".format(case_name,best_policy.acs_Q)
#os.rename(file_path, r"/Users/haoxiangyang/Desktop/Git/COVID19_CAOE/InterventionsMIP/output/ACS_Analysis_11_13/" + instance_name + ".p")
print("ACS Trigger: ", best_policy.acs_thrs)
print("ACS Quantity: ", best_policy.acs_Q)
infeas_scen = np.sum(np.array(acs_results[4]) > 0)
print("Infeasible Scenarios Testing: ", infeas_scen)
print("Mean ACS Usage: ", acs_results[5])
mean_util_rate = np.round(acs_results[6]*100,2)
print("Mean ACS Utilization Rate: ", mean_util_rate)
print("Number of paths hitting the trigger: ", acs_results[7])
print("Maximum number of days requiring ACS", np.max(acs_results[2]))
print("95 Percentile of days requiring ACS", np.percentile(acs_results[2],95))
print("Maximum ACS required", np.max(acs_results[1]))
print("95 Percentile of ACS required", np.percentile(acs_results[1],95))
n_replicas = len(profiles)
unmet_IHT = [np.sum(np.maximum(np.sum(profiles[i]['IHT'],axis = (1,2)) - 1500,0)) for i in range(300)]
over_mean = np.mean(unmet_IHT)
over_median = np.median(unmet_IHT)
over_std = np.std(unmet_IHT)
over_5P = np.percentile(unmet_IHT,5)
over_95P = np.percentile(unmet_IHT,95)
data = [case_name, best_policy.acs_Q, best_policy.acs_thrs, acs_results[7],
infeas_scen, acs_results[5], mean_util_rate,
np.max(acs_results[2]), np.percentile(acs_results[2],95),
np.max(acs_results[1]), np.percentile(acs_results[1],95),over_mean,over_median,over_std,over_5P,over_95P]
csvWriter.writerow(data)
dateList = getACS_reppath(profiles,1500)
if not trend_comp:
cpid = dateList[14][0]
else:
cpid = 0
IHD_plot = plot_multi_tier_sims(instance_name,
instance,
best_policy,
profiles, ['sim'] * len(profiles),
real_hosp,
plot_left_axis=['IHT'],
plot_right_axis=[],
T=601,
interventions=interventions,
show=False,
align_axes=True,
plot_triggers=False,
plot_ACS_triggers=True,
plot_trigger_annotations=False,
plot_legend=False,
y_lim=best_policy.acs_Q + 2000,
policy_params=best_params,
n_replicas=n_replicas,
config=config,
add_tiers=add_tiers,
real_new_admission=real_hosp,
real_hosp_or_icu=real_hosp,
t_start = t_start,
is_representative_path=False,
central_path_id = cpid,
cap_path_id = cpid,
history_white = True,
acs_fill = True,
)
IYIH_plot = plot_multi_tier_sims(instance_name,
instance,
best_policy,
profiles, ['sim'] * len(profiles),
real_hosp,
plot_left_axis=['ToIHT'],
plot_right_axis=[],
T=601,
interventions=interventions,
show=False,
align_axes=False,
plot_triggers=False,
plot_ACS_triggers=True,
plot_trigger_annotations=False,
plot_legend=False,
y_lim=300,
policy_params=best_params,
n_replicas=n_replicas,
config=config,
hosp_beds_list=hosp_beds_list,
real_new_admission=hosp_ad,
add_tiers=add_tiers,
t_start = t_start,
central_path_id = cpid,
cap_path_id = cpid,
history_white = True,
no_fill = True,
#no_center = True
)
except:
pass
fi.close()
# ICU expansion analysis
fileList = os.listdir("output")
# load Austin real hospitalization
file_path = "instances/austin/austin_real_hosp_updated.csv"
start_date = dt(2020,2,28)
real_hosp = read_hosp(file_path, start_date)
hosp_beds_list = None
file_path = "instances/austin/austin_hosp_ad_updated.csv"
hosp_ad = read_hosp(file_path, start_date, "admits")
file_path = "instances/austin/austin_real_icu_updated.csv"
real_icu = read_hosp(file_path, start_date)
hosp_beds_list = None
fi = open("/Users/nazlicanarslan/Desktop/github_clones/COVID19-vaccine/VaccineAllocation/output/v-acs_ICU.csv","w",newline="")
csvWriter = csv.writer(fi,dialect='excel')
csvWriter.writerow(['Case_Name','ACS_Quantity','ACS_Trigger','Scenario with ACS Triggered',
'Infeasible Scenarios','Mean ACS Usage', 'Mean ACS Util Rate',
'Max No of Days Requiring ACS','95% Days Requiring ACS',
'Max ACS Required', '95% ACS Required', 'Original Unmet Mean', 'Original Unmet Median', 'Original Unmet Std', 'Original Unmet 5%', 'Original Unmet 95%'])
trend_comp = True
for instance_raw in fileList:
if ".p" in instance_raw:
# try:
instance_name = instance_raw[:-2]
file_path = f'output/{instance_name}.p'
with open(file_path, 'rb') as outfile:
read_output = pickle.load(outfile)
instance, interventions, best_params, best_policy, vaccines, profiles, sim_output, expected_cost, config, seeds_info = read_output
# if "11_13" in instance_name:
# end_history = dt(2020,11,13)
# else:
# end_history = dt(2020,7,12)
end_history = dt(2021,11,24)
t_start = (end_history - start_date).days
acs_results = getACS_util_ICU(instance.icu,profiles,601,t_start)
print("====================================================")
print(instance_name)
case_name = str(instance.transmission_file)[str(instance.transmission_file).find("/instances/austin")+len("/instances/austin")+1:-4]
print(case_name)
instance_name = "austin_{}_{}_{}_{}".format(case_name,best_policy.acs_Q,best_policy.acs_type, best_policy.acs_thrs)
#os.rename(file_path, r"/Users/haoxiangyang/Desktop/Git/COVID19_CAOE/InterventionsMIP/output/ACS_Analysis_11_13/" + instance_name + ".p")
print("ACS Trigger: ", best_policy.acs_thrs)
print("ACS Quantity: ", best_policy.acs_Q)
infeas_scen = np.sum(np.array(acs_results[4]) > 0)
print("Infeasible Scenarios Testing: ", infeas_scen)
print("Mean ACS Usage: ", acs_results[5])
mean_util_rate = np.round(acs_results[6]*100,2)
print("Mean ACS Utilization Rate: ", mean_util_rate)
print("Number of paths hitting the trigger: ", acs_results[7])
print("Maximum number of days requiring ACS", np.max(acs_results[2]))
print("95 Percentile of days requiring ACS", np.percentile(acs_results[2],95))
print("90 Percentile of days requiring ACS", np.percentile(acs_results[2],90))
print("80 Percentile of days requiring ACS", np.percentile(acs_results[2],80))
print("50 Percentile of days requiring ACS", np.percentile(acs_results[2],50))
print("Maximum ACS required", np.max(acs_results[1]))
print("95 Percentile of ACS required", np.percentile(acs_results[1],95))
print("90 Percentile of ACS required", np.percentile(acs_results[1],90))
print("80 Percentile of ACS required", np.percentile(acs_results[1],80))
print("50 Percentile of ACS required", np.percentile(acs_results[1],50))
n_replicas = len(profiles)
unmet_ICU = [np.sum(np.maximum(np.sum(profiles[i]['ICU'],axis = (1,2))[t_start:] - 200,0)) for i in range(len(profiles))]
over_mean = np.mean(unmet_ICU)
over_median = np.median(unmet_ICU)
over_std = np.std(unmet_ICU)
over_5P = np.percentile(unmet_ICU,5)
over_95P = np.percentile(unmet_ICU,95)
data = [case_name, best_policy.acs_Q, best_policy.acs_thrs, acs_results[7],
infeas_scen, acs_results[5], mean_util_rate,
np.max(acs_results[2]), np.percentile(acs_results[2],95),
np.max(acs_results[1]), np.percentile(acs_results[1],95),over_mean,over_median,over_std,over_5P,over_95P,
np.percentile(acs_results[1],50),np.percentile(acs_results[1],75)]
csvWriter.writerow(data)
dateList = getACS_reppath(profiles,1500)
if not trend_comp:
cpid = dateList[14][0]
else:
cpid = 0
# IHD_plot = plot_multi_tier_sims(instance_name,
# instance,
# best_policy,
# profiles, ['sim'] * len(profiles),
# real_hosp,
# plot_left_axis=['ICU'],
# plot_right_axis=[],
# T=601,
# interventions=interventions,
# show=True,
# align_axes=True,
# plot_triggers=False,
# plot_trigger_annotations=False,
# plot_legend=False,
# y_lim=best_policy.acs_Q + 500,
# policy_params=best_params,
# n_replicas=n_replicas,
# config=config,
# real_new_admission=real_hosp,
# real_hosp_or_icu=real_icu,
# t_start = t_start,
# is_representative_path=False,
# central_path_id = cpid,
# cap_path_id = cpid,
# history_white = True,
# acs_type = 'ICU'
# )
# IYIH_plot = plot_multi_tier_sims(instance_name,
# instance,
# best_policy,
# profiles, ['sim'] * len(profiles),
# real_hosp,
# plot_left_axis=['ToIHT'],
# plot_right_axis=[],
# T=601,
# interventions=interventions,
# show=False,
# align_axes=False,
# plot_triggers=False,
# plot_ACS_triggers=True,
# plot_trigger_annotations=False,
# plot_legend=False,
# y_lim=250,
# policy_params=best_params,
# n_replicas=n_replicas,
# config=config,
# hosp_beds_list=hosp_beds_list,
# real_new_admission=hosp_ad,
# t_start = t_start,
# central_path_id = cpid,
# cap_path_id = cpid,
# history_white = True
# )
# except:
# pass
fi.close()
| [
"pipelinemultitier.read_hosp",
"numpy.sum",
"csv.writer",
"numpy.median",
"numpy.std",
"datetime.datetime",
"numpy.percentile",
"numpy.max",
"numpy.mean",
"numpy.array",
"pickle.load",
"numpy.where",
"numpy.round",
"os.listdir",
"numpy.unique"
] | [((5023, 5043), 'os.listdir', 'os.listdir', (['"""output"""'], {}), "('output')\n", (5033, 5043), False, 'import os\n'), ((5153, 5168), 'datetime.datetime', 'dt', (['(2020)', '(2)', '(28)'], {}), '(2020, 2, 28)\n', (5155, 5168), True, 'from datetime import datetime as dt\n'), ((5179, 5211), 'pipelinemultitier.read_hosp', 'read_hosp', (['file_path', 'start_date'], {}), '(file_path, start_date)\n', (5188, 5211), False, 'from pipelinemultitier import read_hosp, multi_tier_pipeline\n'), ((5302, 5344), 'pipelinemultitier.read_hosp', 'read_hosp', (['file_path', 'start_date', '"""admits"""'], {}), "(file_path, start_date, 'admits')\n", (5311, 5344), False, 'from pipelinemultitier import read_hosp, multi_tier_pipeline\n'), ((5415, 5447), 'pipelinemultitier.read_hosp', 'read_hosp', (['file_path', 'start_date'], {}), '(file_path, start_date)\n', (5424, 5447), False, 'from pipelinemultitier import read_hosp, multi_tier_pipeline\n'), ((5854, 5885), 'csv.writer', 'csv.writer', (['fi'], {'dialect': '"""excel"""'}), "(fi, dialect='excel')\n", (5864, 5885), False, 'import csv\n'), ((12457, 12477), 'os.listdir', 'os.listdir', (['"""output"""'], {}), "('output')\n", (12467, 12477), False, 'import os\n'), ((12587, 12602), 'datetime.datetime', 'dt', (['(2020)', '(2)', '(28)'], {}), '(2020, 2, 28)\n', (12589, 12602), True, 'from datetime import datetime as dt\n'), ((12613, 12645), 'pipelinemultitier.read_hosp', 'read_hosp', (['file_path', 'start_date'], {}), '(file_path, start_date)\n', (12622, 12645), False, 'from pipelinemultitier import read_hosp, multi_tier_pipeline\n'), ((12736, 12778), 'pipelinemultitier.read_hosp', 'read_hosp', (['file_path', 'start_date', '"""admits"""'], {}), "(file_path, start_date, 'admits')\n", (12745, 12778), False, 'from pipelinemultitier import read_hosp, multi_tier_pipeline\n'), ((12849, 12881), 'pipelinemultitier.read_hosp', 'read_hosp', (['file_path', 'start_date'], {}), '(file_path, start_date)\n', (12858, 12881), False, 'from pipelinemultitier import read_hosp, multi_tier_pipeline\n'), ((13045, 13076), 'csv.writer', 'csv.writer', (['fi'], {'dialect': '"""excel"""'}), "(fi, dialect='excel')\n", (13055, 13076), False, 'import csv\n'), ((1699, 1715), 'numpy.mean', 'np.mean', (['useList'], {}), '(useList)\n', (1706, 1715), True, 'import numpy as np\n'), ((3246, 3262), 'numpy.mean', 'np.mean', (['useList'], {}), '(useList)\n', (3253, 3262), True, 'import numpy as np\n'), ((3908, 3947), 'numpy.sum', 'np.sum', (["profiles[i]['IHT']"], {'axis': '(1, 2)'}), "(profiles[i]['IHT'], axis=(1, 2))\n", (3914, 3947), True, 'import numpy as np\n'), ((3967, 4000), 'numpy.array', 'np.array', (["profiles[i]['capacity']"], {}), "(profiles[i]['capacity'])\n", (3975, 4000), True, 'import numpy as np\n'), ((4514, 4553), 'numpy.sum', 'np.sum', (["profiles[i]['ICU']"], {'axis': '(1, 2)'}), "(profiles[i]['ICU'], axis=(1, 2))\n", (4520, 4553), True, 'import numpy as np\n'), ((4573, 4606), 'numpy.array', 'np.array', (["profiles[i]['capacity']"], {}), "(profiles[i]['capacity'])\n", (4581, 4606), True, 'import numpy as np\n'), ((14136, 14152), 'datetime.datetime', 'dt', (['(2021)', '(11)', '(24)'], {}), '(2021, 11, 24)\n', (14138, 14152), True, 'from datetime import datetime as dt\n'), ((15216, 15249), 'numpy.round', 'np.round', (['(acs_results[6] * 100)', '(2)'], {}), '(acs_results[6] * 100, 2)\n', (15224, 15249), True, 'import numpy as np\n'), ((16464, 16482), 'numpy.mean', 'np.mean', (['unmet_ICU'], {}), '(unmet_ICU)\n', (16471, 16482), True, 'import numpy as np\n'), ((16509, 16529), 'numpy.median', 'np.median', (['unmet_ICU'], {}), '(unmet_ICU)\n', (16518, 16529), True, 'import numpy as np\n'), ((16553, 16570), 'numpy.std', 'np.std', (['unmet_ICU'], {}), '(unmet_ICU)\n', (16559, 16570), True, 'import numpy as np\n'), ((16593, 16620), 'numpy.percentile', 'np.percentile', (['unmet_ICU', '(5)'], {}), '(unmet_ICU, 5)\n', (16606, 16620), True, 'import numpy as np\n'), ((16643, 16671), 'numpy.percentile', 'np.percentile', (['unmet_ICU', '(95)'], {}), '(unmet_ICU, 95)\n', (16656, 16671), True, 'import numpy as np\n'), ((998, 1021), 'numpy.array', 'np.array', (["p['capacity']"], {}), "(p['capacity'])\n", (1006, 1021), True, 'import numpy as np\n'), ((1252, 1275), 'numpy.max', 'np.max', (['overCap_reg[:T]'], {}), '(overCap_reg[:T])\n', (1258, 1275), True, 'import numpy as np\n'), ((1356, 1383), 'numpy.sum', 'np.sum', (['(overCap_reg[:T] > 0)'], {}), '(overCap_reg[:T] > 0)\n', (1362, 1383), True, 'import numpy as np\n'), ((1458, 1479), 'numpy.sum', 'np.sum', (['acs_usage[:T]'], {}), '(acs_usage[:T])\n', (1464, 1479), True, 'import numpy as np\n'), ((1556, 1575), 'numpy.sum', 'np.sum', (['acs_cap[:T]'], {}), '(acs_cap[:T])\n', (1562, 1575), True, 'import numpy as np\n'), ((1660, 1683), 'numpy.sum', 'np.sum', (['overCap_ACS[:T]'], {}), '(overCap_ACS[:T])\n', (1666, 1683), True, 'import numpy as np\n'), ((1742, 1759), 'numpy.array', 'np.array', (['useList'], {}), '(useList)\n', (1750, 1759), True, 'import numpy as np\n'), ((1760, 1777), 'numpy.array', 'np.array', (['capList'], {}), '(capList)\n', (1768, 1777), True, 'import numpy as np\n'), ((2545, 2568), 'numpy.array', 'np.array', (["p['capacity']"], {}), "(p['capacity'])\n", (2553, 2568), True, 'import numpy as np\n'), ((2799, 2822), 'numpy.max', 'np.max', (['overCap_reg[:T]'], {}), '(overCap_reg[:T])\n', (2805, 2822), True, 'import numpy as np\n'), ((2903, 2930), 'numpy.sum', 'np.sum', (['(overCap_reg[:T] > 0)'], {}), '(overCap_reg[:T] > 0)\n', (2909, 2930), True, 'import numpy as np\n'), ((3005, 3026), 'numpy.sum', 'np.sum', (['acs_usage[:T]'], {}), '(acs_usage[:T])\n', (3011, 3026), True, 'import numpy as np\n'), ((3103, 3122), 'numpy.sum', 'np.sum', (['acs_cap[:T]'], {}), '(acs_cap[:T])\n', (3109, 3122), True, 'import numpy as np\n'), ((3207, 3230), 'numpy.sum', 'np.sum', (['overCap_ACS[:T]'], {}), '(overCap_ACS[:T])\n', (3213, 3230), True, 'import numpy as np\n'), ((3289, 3306), 'numpy.array', 'np.array', (['useList'], {}), '(useList)\n', (3297, 3306), True, 'import numpy as np\n'), ((3307, 3324), 'numpy.array', 'np.array', (['capList'], {}), '(capList)\n', (3315, 3324), True, 'import numpy as np\n'), ((7891, 7924), 'numpy.round', 'np.round', (['(acs_results[6] * 100)', '(2)'], {}), '(acs_results[6] * 100, 2)\n', (7899, 7924), True, 'import numpy as np\n'), ((8590, 8608), 'numpy.mean', 'np.mean', (['unmet_IHT'], {}), '(unmet_IHT)\n', (8597, 8608), True, 'import numpy as np\n'), ((8635, 8655), 'numpy.median', 'np.median', (['unmet_IHT'], {}), '(unmet_IHT)\n', (8644, 8655), True, 'import numpy as np\n'), ((8679, 8696), 'numpy.std', 'np.std', (['unmet_IHT'], {}), '(unmet_IHT)\n', (8685, 8696), True, 'import numpy as np\n'), ((8719, 8746), 'numpy.percentile', 'np.percentile', (['unmet_IHT', '(5)'], {}), '(unmet_IHT, 5)\n', (8732, 8746), True, 'import numpy as np\n'), ((8769, 8797), 'numpy.percentile', 'np.percentile', (['unmet_IHT', '(95)'], {}), '(unmet_IHT, 95)\n', (8782, 8797), True, 'import numpy as np\n'), ((13775, 13795), 'pickle.load', 'pickle.load', (['outfile'], {}), '(outfile)\n', (13786, 13795), False, 'import pickle\n'), ((15458, 15480), 'numpy.max', 'np.max', (['acs_results[2]'], {}), '(acs_results[2])\n', (15464, 15480), True, 'import numpy as np\n'), ((15541, 15574), 'numpy.percentile', 'np.percentile', (['acs_results[2]', '(95)'], {}), '(acs_results[2], 95)\n', (15554, 15574), True, 'import numpy as np\n'), ((15632, 15665), 'numpy.percentile', 'np.percentile', (['acs_results[2]', '(90)'], {}), '(acs_results[2], 90)\n', (15645, 15665), True, 'import numpy as np\n'), ((15723, 15756), 'numpy.percentile', 'np.percentile', (['acs_results[2]', '(80)'], {}), '(acs_results[2], 80)\n', (15736, 15756), True, 'import numpy as np\n'), ((15814, 15847), 'numpy.percentile', 'np.percentile', (['acs_results[2]', '(50)'], {}), '(acs_results[2], 50)\n', (15827, 15847), True, 'import numpy as np\n'), ((15890, 15912), 'numpy.max', 'np.max', (['acs_results[1]'], {}), '(acs_results[1])\n', (15896, 15912), True, 'import numpy as np\n'), ((15965, 15998), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(95)'], {}), '(acs_results[1], 95)\n', (15978, 15998), True, 'import numpy as np\n'), ((16050, 16083), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(90)'], {}), '(acs_results[1], 90)\n', (16063, 16083), True, 'import numpy as np\n'), ((16135, 16168), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(80)'], {}), '(acs_results[1], 80)\n', (16148, 16168), True, 'import numpy as np\n'), ((16220, 16253), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(50)'], {}), '(acs_results[1], 50)\n', (16233, 16253), True, 'import numpy as np\n'), ((16858, 16880), 'numpy.max', 'np.max', (['acs_results[2]'], {}), '(acs_results[2])\n', (16864, 16880), True, 'import numpy as np\n'), ((16882, 16915), 'numpy.percentile', 'np.percentile', (['acs_results[2]', '(95)'], {}), '(acs_results[2], 95)\n', (16895, 16915), True, 'import numpy as np\n'), ((16936, 16958), 'numpy.max', 'np.max', (['acs_results[1]'], {}), '(acs_results[1])\n', (16942, 16958), True, 'import numpy as np\n'), ((16960, 16993), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(95)'], {}), '(acs_results[1], 95)\n', (16973, 16993), True, 'import numpy as np\n'), ((17062, 17095), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(50)'], {}), '(acs_results[1], 50)\n', (17075, 17095), True, 'import numpy as np\n'), ((17095, 17128), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(75)'], {}), '(acs_results[1], 75)\n', (17108, 17128), True, 'import numpy as np\n'), ((689, 718), 'numpy.sum', 'np.sum', (["p['IHT']"], {'axis': '(1, 2)'}), "(p['IHT'], axis=(1, 2))\n", (695, 718), True, 'import numpy as np\n'), ((811, 840), 'numpy.sum', 'np.sum', (["p['IHT']"], {'axis': '(1, 2)'}), "(p['IHT'], axis=(1, 2))\n", (817, 840), True, 'import numpy as np\n'), ((6629, 6649), 'pickle.load', 'pickle.load', (['outfile'], {}), '(outfile)\n', (6640, 6649), False, 'import pickle\n'), ((6887, 6903), 'datetime.datetime', 'dt', (['(2020)', '(11)', '(13)'], {}), '(2020, 11, 13)\n', (6889, 6903), True, 'from datetime import datetime as dt\n'), ((6950, 6965), 'datetime.datetime', 'dt', (['(2021)', '(7)', '(12)'], {}), '(2021, 7, 12)\n', (6952, 6965), True, 'from datetime import datetime as dt\n'), ((8133, 8155), 'numpy.max', 'np.max', (['acs_results[2]'], {}), '(acs_results[2])\n', (8139, 8155), True, 'import numpy as np\n'), ((8214, 8247), 'numpy.percentile', 'np.percentile', (['acs_results[2]', '(95)'], {}), '(acs_results[2], 95)\n', (8227, 8247), True, 'import numpy as np\n'), ((8290, 8312), 'numpy.max', 'np.max', (['acs_results[1]'], {}), '(acs_results[1])\n', (8296, 8312), True, 'import numpy as np\n'), ((8365, 8398), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(95)'], {}), '(acs_results[1], 95)\n', (8378, 8398), True, 'import numpy as np\n'), ((8984, 9006), 'numpy.max', 'np.max', (['acs_results[2]'], {}), '(acs_results[2])\n', (8990, 9006), True, 'import numpy as np\n'), ((9008, 9041), 'numpy.percentile', 'np.percentile', (['acs_results[2]', '(95)'], {}), '(acs_results[2], 95)\n', (9021, 9041), True, 'import numpy as np\n'), ((9062, 9084), 'numpy.max', 'np.max', (['acs_results[1]'], {}), '(acs_results[1])\n', (9068, 9084), True, 'import numpy as np\n'), ((9086, 9119), 'numpy.percentile', 'np.percentile', (['acs_results[1]', '(95)'], {}), '(acs_results[1], 95)\n', (9099, 9119), True, 'import numpy as np\n'), ((15025, 15049), 'numpy.array', 'np.array', (['acs_results[4]'], {}), '(acs_results[4])\n', (15033, 15049), True, 'import numpy as np\n'), ((1116, 1144), 'numpy.unique', 'np.unique', (["p['capacity'][:T]"], {}), "(p['capacity'][:T])\n", (1125, 1144), True, 'import numpy as np\n'), ((2206, 2235), 'numpy.sum', 'np.sum', (["p['ICU']"], {'axis': '(1, 2)'}), "(p['ICU'], axis=(1, 2))\n", (2212, 2235), True, 'import numpy as np\n'), ((2338, 2367), 'numpy.sum', 'np.sum', (["p['ICU']"], {'axis': '(1, 2)'}), "(p['ICU'], axis=(1, 2))\n", (2344, 2367), True, 'import numpy as np\n'), ((2663, 2691), 'numpy.unique', 'np.unique', (["p['capacity'][:T]"], {}), "(p['capacity'][:T])\n", (2672, 2691), True, 'import numpy as np\n'), ((4039, 4066), 'numpy.where', 'np.where', (['(IHTList > reg_cap)'], {}), '(IHTList > reg_cap)\n', (4047, 4066), True, 'import numpy as np\n'), ((4203, 4230), 'numpy.where', 'np.where', (['(capList > reg_cap)'], {}), '(capList > reg_cap)\n', (4211, 4230), True, 'import numpy as np\n'), ((4645, 4672), 'numpy.where', 'np.where', (['(ICUList > reg_cap)'], {}), '(ICUList > reg_cap)\n', (4653, 4672), True, 'import numpy as np\n'), ((4809, 4836), 'numpy.where', 'np.where', (['(capList > reg_cap)'], {}), '(capList > reg_cap)\n', (4817, 4836), True, 'import numpy as np\n'), ((7700, 7724), 'numpy.array', 'np.array', (['acs_results[4]'], {}), '(acs_results[4])\n', (7708, 7724), True, 'import numpy as np\n'), ((4104, 4131), 'numpy.where', 'np.where', (['(IHTList > reg_cap)'], {}), '(IHTList > reg_cap)\n', (4112, 4131), True, 'import numpy as np\n'), ((4268, 4295), 'numpy.where', 'np.where', (['(capList > reg_cap)'], {}), '(capList > reg_cap)\n', (4276, 4295), True, 'import numpy as np\n'), ((4710, 4737), 'numpy.where', 'np.where', (['(ICUList > reg_cap)'], {}), '(ICUList > reg_cap)\n', (4718, 4737), True, 'import numpy as np\n'), ((4874, 4901), 'numpy.where', 'np.where', (['(capList > reg_cap)'], {}), '(capList > reg_cap)\n', (4882, 4901), True, 'import numpy as np\n'), ((3547, 3580), 'numpy.array', 'np.array', (["profiles[i]['capacity']"], {}), "(profiles[i]['capacity'])\n", (3555, 3580), True, 'import numpy as np\n'), ((8494, 8533), 'numpy.sum', 'np.sum', (["profiles[i]['IHT']"], {'axis': '(1, 2)'}), "(profiles[i]['IHT'], axis=(1, 2))\n", (8500, 8533), True, 'import numpy as np\n'), ((16349, 16388), 'numpy.sum', 'np.sum', (["profiles[i]['ICU']"], {'axis': '(1, 2)'}), "(profiles[i]['ICU'], axis=(1, 2))\n", (16355, 16388), True, 'import numpy as np\n'), ((3641, 3674), 'numpy.array', 'np.array', (["profiles[i]['capacity']"], {}), "(profiles[i]['capacity'])\n", (3649, 3674), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import pickle
import rospy
import sys
from sensor_stick.pcl_helper import *
from sensor_stick.training_helper import spawn_model
from sensor_stick.training_helper import delete_model
from sensor_stick.training_helper import initial_setup
from sensor_stick.training_helper import capture_sample
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from sensor_stick.srv import GetNormals
from geometry_msgs.msg import Pose
from sensor_msgs.msg import PointCloud2
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
if __name__ == '__main__':
rospy.init_node('capture_node')
# Select the models based on the world number and number of samples
# per model specified in arguments.
# If no argument specified, default to world 1 and 10 samples
num_args = len(sys.argv)
if num_args == 1:
world_num = 1;
samples_per_model = 10;
elif num_args == 2:
world_num = int(sys.argv[1]);
samples_per_model = 10;
else:
world_num = int(sys.argv[1]);
samples_per_model = int(sys.argv[2]);
# From the corresponding pick_list_*.yaml files
if world_num == 1:
models = ['biscuits','soap','soap2'];
elif world_num == 2:
models = ['biscuits','soap','soap2','book','glue'];
elif world_num == 3:
models = ['biscuits','soap','soap2','book','glue',\
'sticky_notes','snacks','eraser'];
# Disable gravity and delete the ground plane
initial_setup()
labeled_features = []
for model_name in models:
spawn_model(model_name)
for i in range(samples_per_model):
# make five attempts to get a valid a point cloud then give up
sample_was_good = False
try_count = 0
while not sample_was_good and try_count < 5:
sample_cloud = capture_sample()
sample_cloud_arr = ros_to_pcl(sample_cloud).to_array()
# Check for invalid clouds.
if sample_cloud_arr.shape[0] == 0:
print('Invalid cloud detected')
try_count += 1
else:
sample_was_good = True
# Extract histogram features
chists = compute_color_histograms(sample_cloud, using_hsv=True)
normals = get_normals(sample_cloud)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
labeled_features.append([feature, model_name])
delete_model()
pickle.dump(labeled_features, open('training_set.sav', 'wb'))
| [
"sensor_stick.features.compute_normal_histograms",
"sensor_stick.training_helper.spawn_model",
"sensor_stick.features.compute_color_histograms",
"sensor_stick.training_helper.initial_setup",
"rospy.ServiceProxy",
"rospy.init_node",
"sensor_stick.training_helper.delete_model",
"sensor_stick.training_he... | [((619, 683), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/feature_extractor/get_normals"""', 'GetNormals'], {}), "('/feature_extractor/get_normals', GetNormals)\n", (637, 683), False, 'import rospy\n'), ((760, 791), 'rospy.init_node', 'rospy.init_node', (['"""capture_node"""'], {}), "('capture_node')\n", (775, 791), False, 'import rospy\n'), ((1670, 1685), 'sensor_stick.training_helper.initial_setup', 'initial_setup', ([], {}), '()\n', (1683, 1685), False, 'from sensor_stick.training_helper import initial_setup\n'), ((1751, 1774), 'sensor_stick.training_helper.spawn_model', 'spawn_model', (['model_name'], {}), '(model_name)\n', (1762, 1774), False, 'from sensor_stick.training_helper import spawn_model\n'), ((2726, 2740), 'sensor_stick.training_helper.delete_model', 'delete_model', ([], {}), '()\n', (2738, 2740), False, 'from sensor_stick.training_helper import delete_model\n'), ((2444, 2498), 'sensor_stick.features.compute_color_histograms', 'compute_color_histograms', (['sample_cloud'], {'using_hsv': '(True)'}), '(sample_cloud, using_hsv=True)\n', (2468, 2498), False, 'from sensor_stick.features import compute_color_histograms\n'), ((2568, 2602), 'sensor_stick.features.compute_normal_histograms', 'compute_normal_histograms', (['normals'], {}), '(normals)\n', (2593, 2602), False, 'from sensor_stick.features import compute_normal_histograms\n'), ((2625, 2657), 'numpy.concatenate', 'np.concatenate', (['(chists, nhists)'], {}), '((chists, nhists))\n', (2639, 2657), True, 'import numpy as np\n'), ((2045, 2061), 'sensor_stick.training_helper.capture_sample', 'capture_sample', ([], {}), '()\n', (2059, 2061), False, 'from sensor_stick.training_helper import capture_sample\n')] |
"""This submodule contains the DSLRImage class and its Monochrome subclass.
The DSLRImage class serves the purpose of containing all needed information
for a frame, as well as the methods for binning, extracting monochrome
channels, and writing the file to FITS format.
"""
import os
from enum import IntEnum
from fractions import Fraction
from datetime import datetime
from astropy.time import Time
from astropy.io import fits
from photutils import CircularAperture, CircularAnnulus
from photutils.centroids import fit_2dgaussian # , GaussianConst2D
from skimage.feature import register_translation
from skimage.measure import block_reduce
import exifread
import numpy as np
from rawkit.raw import Raw
import libraw
from matplotlib import pyplot as plt
__all__ = ["ImageType", "Color", "DSLRImage", "Monochrome", "Star"]
w = 5
# polusirina prozora
# jos uvek ne znamo koja vrednost bi morala da bude
CurrentFrame = None
class ImageType(IntEnum):
LIGHT = 0
BIAS = 1
DARK = 2
FLAT = 3
class Color(IntEnum):
RED = 0
GREEN = 1
BLUE = 2
def _get_current_frame():
return CurrentFrame
def _demosaic(im):
"""Demosaics the image,
i.e. turns a RGGB monochrome array into a RGB array.
"""
_im = np.resize(im, (len(im)-len(im) % 2, len(im[0])-len(im[0]) % 2))
_im = np.reshape(_im, (len(im)//2, 2, len(im[0])//2, 2))
im = np.empty((len(im)//2, len(im[0])//2, 3))
im[:, :, 0] = _im[:, 0, :, 0]
im[:, :, 1] = (_im[:, 0, :, 1] + _im[:, 1, :, 0])/2
im[:, :, 2] = _im[:, 1, :, 1]
return im
def isRaw(f):
try:
Raw(f)
return True
except Exception as e:
if type(e) is libraw.errors.FileUnsupported:
print("File unsupported:", f)
else:
print("Error:", e)
print("Ignoring this file.")
return False
class DSLRImage:
"""Loads an image from RAW format, stores the metadata and writes the image
as a NumPy array.
"""
fnum = np.zeros((4, 4), dtype=int)
# Declares a NumPy 2d array for filename serialization
def __init__(
self, impath, itype=ImageType.LIGHT, color=None
):
print('Initializing image class from file:', impath)
self.impath = impath
self.imtype = itype
self.imcolor = None
self.imdata, self.exptime, self.jdate = self.__parseData(impath)
self._genPath() # generates the serialized filename
self._binX = 1
self._binY = 1
print("Initialized image class: " + str(self))
def binImage(self, x, y=None):
"""Bins the data from the image. Requires the window width.
If window height is not specified, the window is assumed to be square.
"""
if y is None:
y = x
print(
"Binning image: " + str(self) + " (" + str(x) + "x" + str(y)
+ ")")
# h = len(self.imdata)
# w = len(self.imdata[0])
# hb = h - h%y
# wb = w - w%x
# # reduces the image size in case it isn't divisible by window size
# imdata_resized = np.resize(self.imdata, (hb, w, 3))
# imdata_resized1 = np.empty((hb, wb, 3))
# for r in range(len(imdata_resized)):
# imdata_resized1[r] = np.resize(imdata_resized[r], (wb, 3))
# imdata_resized1 = imdata_resized1.reshape((h//x, wb, y, 3), order='A')
# imdata_resized1 = imdata_resized1.reshape(
# (h//y, w//x, y, x, 3), order='F'
# )
# bindata=np.empty((h//y, w//x, 3))
# # reshapes the matrix into a set of arrays with length x*y
# for r in range(len(bindata)):
# for c in range(len(bindata[r])):
# # bins the arrays using the specified function
# if fn is 'mean':
# bindata[r][c] = np.mean(imdata_resized1[r][c])
# elif fn is 'median':
# bindata[r][c] = np.median(imdata_resized1[r][c])
# else:
# raise ValueError('Invalid argument for \'fn\' parameter')
# bindata = np.resize(bindata, (h//y, w//x, 1, 1, 3))
# bindata = bindata.reshape(h//y, w//x, 3)
# # reshapes the matrix back to its original form
bindata = block_reduce(
self.imdata, (x, y, 1), np.mean, np.mean(self.imdata)
)
self.imdata = bindata
self._binX *= x
self._binY *= y
def extractChannel(self, color):
"""Extracts the specified channel (R,G,B) from the RGB image."""
print("Extracting " + color.name + " channel from image " + str(self))
try:
imdata = self.imdata[:, :, color.value]
except AttributeError:
print("AttributeError for", str(self))
return Monochrome(imdata, self, color)
#def getData(self):
# loads the image data from the temporary folder
#return np.load(self.tmpPath + self.fname + '.npy')
#def _setData(self, idata):
# writes the image data to the temporary folder
#np.save(self.tmpPath + self.fname, idata)
def _genPath(self):
# generates a serialized file name in the format
# imagetype_ordinalnumber
#
# if the image is monochrome, the format is
# imagetype_ordinalnumber_color
cls = type(self)
itype = self.imtype
try:
color = self.imcolor.value
except(AttributeError):
color = 3
if(cls.fnum[itype][color] is None):
cls.fnum[itype][color] = 0
ftype = {0: "light", 1: "bias", 2: "dark", 3: "flat"}[itype]
try:
self.fname = (
ftype + "_" + str(cls.fnum[itype][color])
+ '_' + self.imcolor.name
)
except AttributeError:
self.fname = ftype + "_" + str(cls.fnum[itype][color])
cls.fnum[itype][color] += 1
self.tmpPath = os.path.dirname(self.impath) + '/temp/'
try:
os.makedirs(self.tmpPath)
except(OSError):
pass
def __parseData(self, impath):
# reads the metadata from the RAW file
print("Reading file: " + impath)
with Raw(impath) as img:
idata = _demosaic(img.raw_image())
with open(impath, 'rb') as f:
tags = exifread.process_file(f)
exptime = float(
Fraction(tags.get('EXIF ExposureTime').printable)
)
dt = tags.get('EXIF DateTimeOriginal').printable
(date, _, time) = dt.partition(' ')
dt = tuple([int(i) for i in date.split(':') + time.split(':')])
dt = datetime(*dt).isoformat()
#ofs = tags.get('EXIF TimeZoneOffset').printable
jdate = Time(dt, format='isot', scale='utc').jd
return idata, exptime, jdate
def __str__(self):
try:
return(
"DSLRImage(imtype=" + str(self.imtype)
+ ", color=" + str(self.imcolor)
+ ", fname=" + self.fname
+ ")"
)
except(AttributeError):
return(
"DSLRImage(imtype=" + str(self.imtype)
+ ", color=" + str(self.imcolor)
+ ")"
)
def __del__(self):
# deletes the temporary file/folder
print("Deleting image class: " + str(self))
# os.remove(self.tmpPath + self.fname + '.npy')
# try:
# os.rmdir(self.tmpPath)
# except OSError:
# pass
class Monochrome(DSLRImage):
"""A subtype of DSLRImage for single-color images.
Is meant to be generated from the extractChannel method. Avoid using the
class directly.
"""
def __init__(
self, imdata, origin, color=Color.GREEN,
stacked=False, translated=False
):
self.exptime = origin.exptime
self.jdate = origin.jdate
self.impath = origin.impath
self.imtype = origin.imtype
self._binX = origin._binX
self._binY = origin._binY
if type(origin) == Monochrome:
self.imcolor = origin.imcolor
else:
self.imcolor = color
self._genPath()
self.imdata = imdata
self.stars = []
def saveFITS(self, path, fname=None):
"""Writes the data to a FITS file."""
impath = path + (self.fname if fname is None else fname)
hdu = fits.PrimaryHDU(self.imdata.astype('uint16'))
print("Writing image " + str(self) + " to file: " + impath + ".fits")
hdu.header['EXPTIME'] = self.exptime
d = Time(self.jdate, format='jd', scale='utc').isot
hdu.header['DATE-OBS'] = d
hdu.header['IMAGETYP'] = self.imtype.name
hdu.header['XBINNING'] = self._binX
hdu.header['YBINNING'] = self._binY
n = 1
try:
hdu.writeto(impath + ".fits")
except OSError as e:
if(type(e) == FileNotFoundError):
os.makedirs(path)
hdu.writeto(impath + ".fits")
return
error = True
while(error is True):
try:
hdu.writeto(impath + "_" + str(n) + ".fits")
error = False
except OSError:
n += 1
print(
"File of the same name already exists, file written to",
impath + "_" + str(n) + ".fits"
)
def binImage(self, x, y=None, fn='mean'):
"""Same as the binImage method in the superclass, but optimized for
monochrome arrays.
"""
if y is None:
y = x
print(
"Binning monochrome image: " + str(self)
+ " (" + str(x) + "x" + str(y) + ")"
)
# h = len(self.imdata)
# w = len(self.imdata[0])
# hb = h - h%y
# wb = w - w%x
# imdata_resized = np.resize(self.imdata, (hb, w))
# imdata_resized1 = np.empty((hb, wb))
# for r in range(len(imdata_resized)):
# imdata_resized1[r] = np.resize(imdata_resized[r], wb)
# imdata_resized1 = imdata_resized1.reshape((h//x, wb, y), order='A')
# imdata_resized1 = imdata_resized1.reshape(
# (h//y, w//x, y, x), order='F'
# )
# bindata=np.empty((h//y, w//x))
# for r in range(len(bindata)):
# for c in range(len(bindata[r])):
# if fn is 'mean':
# bindata[r][c] = np.mean(imdata_resized1[r][c])
# elif fn is 'median':
# bindata[r][c] = np.median(imdata_resized1[r][c])
# else:
# raise ValueError('Invalid argument for \'fn\' parameter')
# bindata = np.resize(bindata, (h//y, w//x, 1, 1))
# bindata = bindata.reshape(h//y, w//x)
bindata = block_reduce(
self.imdata, (x, y), np.mean, np.mean(self.imdata)
)
self.imdata = bindata
self._binX *= x
self._binY *= y
def add_star(self, y, x, mag=None, name=None):
print("Adding star ({},{})".format(x, y))
self.make_current()
if mag is not None:
isVar = False
else:
isVar = True
st = Star(self, x, y, isVar, mag=mag, name=name)
self.stars.append(st)
r = np.mean([s.r for s in self.stars])
d_d = np.mean([s.d_d for s in self.stars])
d_a = np.mean([s.d_a for s in self.stars])
print("Added star", st)
for s in self.stars:
s.apertures[self] = CircularAperture((s.x[self], s.y[self]), r)
s.annuli[self] = CircularAnnulus(
(s.x[self], s.y[self]), r + d_d, r + d_d + d_a
)
s.r = r
s.d_d = d_d
s.d_a = d_a
print("\tUpdated aperture radii of star"
"({},{}) to ({}, {}~{})".format(
np.around(s.x[self], decimals=2),
np.around(s.y[self], decimals=2),
np.around(r, decimals=2),
np.around(r + d_d, decimals=2),
np.around(r + d_d + d_a, decimals=2)
)
)
def inherit_star(self, s, parent, shift=None, gauss=False, hh=20, hw=20):
hasStar = False
print("Inheriting star:", s)
for _s in self.stars:
if s.name == _s.name:
s = _s
hasStar = True
break
if not hasStar:
self.stars.append(s)
if shift is None:
x = int(s.get_x())
y = int(s.get_y())
w1 = parent.imdata[y-hh:y+hh, x-hw:x+hw]
w2 = self.imdata[y-hh:y+hh, x-hw:x+hw]
shift = -register_translation(w1, w2)[0]
print("Local offset for star", s, ":", shift)
print(
"(looking around({}, {}))".format(
int(s.get_x() + shift[1]),
int(s.get_y() + shift[0])
)
)
s.updateCoords(
self, s.get_x() + shift[1], s.get_y() + shift[0], gauss=gauss
)
print("Inherited star:", s)
def make_current(self):
global CurrentFrame
CurrentFrame = self
def show(self):
self.make_current()
plt.figure(figsize=(20, 15))
ax = plt.axes()
ax.imshow(np.log(self.imdata), cmap='gray')
for s in self.stars:
s.drawAperture(self, ax)
plt.show()
class Star:
def __init__(
self, parent, x, y, isVar, mag=None, r=None, d_d=None,
d_a=None, name=None
):
parent.make_current()
cls = type(self)
if hasattr(cls, 'n'):
cls.n += 1
else:
cls.n = 1
if name is not None:
self.name = name
else:
self.name = 'Star_' + str(cls.n)
self.mag = mag
self.isVar = isVar
if(self.isVar):
self.varMag = dict()
self.apertures = dict()
self.annuli = dict()
self.x = dict()
self.y = dict()
if r is None:
gaussian = fit_2dgaussian(parent.imdata[y-w:y+w, x-w:x+w])
x += gaussian.x_mean.value - w
y += gaussian.y_mean.value - w
r = 2 * np.sqrt(
gaussian.x_stddev + gaussian.y_stddev
)*(
np.sqrt(2*np.log(2))
)
if d_d is None:
d_d = r
if d_a is None:
d_a = r
R = r + d_d
self.apertures[parent] = CircularAperture([x, y], r)
self.annuli[parent] = CircularAnnulus([x, y], R, R+d_a)
self.r = r
self.d_d = d_d
self.d_a = d_a
self.x[parent] = x
self.y[parent] = y
print("Created star", self, "in frame", parent)
def get_x(self):
return self.x[_get_current_frame()]
def get_y(self):
return self.y[_get_current_frame()]
def updateCoords(self, frame, x, y, gauss=False):
x = int(x)
y = int(y)
if gauss:
try:
# plt.figure()
# plt.imshow(frame.imdata[y-2*w:y+2*w, x-2*w:x+2*w], cmap='gray')
# plt.title('Gaussian fitting space')
# plt.show()
gaussian = fit_2dgaussian(frame.imdata[y-w:y+w, x-w:x+w])
x += gaussian.x_mean.value - w
y += gaussian.y_mean.value - w
except ValueError:
print(
'({}, {}: [{}:{}, {}:{}]'.format(
self.x[frame], self.y[frame], y-w, y+w, x-w, x+w
)
)
self.apertures[frame] = CircularAperture([x, y], self.r)
self.annuli[frame] = CircularAnnulus(
[x, y], self.r+self.d_d, self.r+self.d_d+self.d_a
)
self.x[frame] = x
self.y[frame] = y
def defMag(self, frame, mag):
if not self.isVar:
raise AttributeError('Variable magnitude can only be defined on'
'variable stars')
self.varMag[frame] = mag
def drawAperture(self, frame, ax):
self.apertures[frame].plot(ax, fc='g', ec='g')
self.annuli[frame].plot(ax, fc='r', ec='r')
def FWHM(self, frame):
x = int(self.x[frame])
y = int(self.y[frame])
gaussian = fit_2dgaussian(frame.imdata[y-w:y+w, x-w:x+w])
r = 2 * np.sqrt(gaussian.x_stddev + gaussian.y_stddev
)*(
np.sqrt(2*np.log(2))
)
return r
def centroid(self, frame):
x = int(self.x[frame])
y = int(self.y[frame])
gaussian = fit_2dgaussian(frame.imdata[y-w:y+w, x-w:x+w])
x += gaussian.x_mean.value - w
y += gaussian.y_mean.value - w
return x, y
def __str__(self):
if self.isVar:
s = "Variable star: "
else:
s = "Fixed-magnitude star: "
if hasattr(self, 'name'):
s += self.name + '\n'
else:
s += "(no name)\n"
try:
s += "Centroid: ({}, {})\n".format(
int(np.around(self.get_x())),
int(np.around(self.get_y()))
)
except KeyError:
s += "Centroid unknown\n"
s += "Aperture radius: " + str(np.around(self.r, decimals=2)) + "\n"
s += "Annulus radii: {}~{}\n".format(
np.around(self.r+self.d_d, decimals=2),
np.around(self.r+self.d_d+self.d_a, decimals=2)
)
return s
class _debugImage(Monochrome):
def __init__(self, im):
if type(im) is np.ndarray:
self.imdata = im
self.exptime = None
self.jdate = None
self._binX = None
self._binY = None
else:
self.imdata = im.data
self.exptime = im.header['EXPTIME']
dt = im.header['DATE-OBS']
self.jdate = Time(dt, format='isot', scale='utc').jd
try:
self._binX = im.header['XBINNING']
self._binY = im.header['YBINNING']
except KeyError:
self._binX = None
self._binY = None
self.impath = None
self.imtype = ImageType.LIGHT
self.imcolor = Color.GREEN
self.stars = []
| [
"matplotlib.pyplot.show",
"os.makedirs",
"numpy.log",
"matplotlib.pyplot.axes",
"photutils.CircularAperture",
"os.path.dirname",
"photutils.centroids.fit_2dgaussian",
"numpy.zeros",
"astropy.time.Time",
"datetime.datetime",
"photutils.CircularAnnulus",
"rawkit.raw.Raw",
"matplotlib.pyplot.fi... | [((1983, 2010), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {'dtype': 'int'}), '((4, 4), dtype=int)\n', (1991, 2010), True, 'import numpy as np\n'), ((1591, 1597), 'rawkit.raw.Raw', 'Raw', (['f'], {}), '(f)\n', (1594, 1597), False, 'from rawkit.raw import Raw\n'), ((11521, 11555), 'numpy.mean', 'np.mean', (['[s.r for s in self.stars]'], {}), '([s.r for s in self.stars])\n', (11528, 11555), True, 'import numpy as np\n'), ((11570, 11606), 'numpy.mean', 'np.mean', (['[s.d_d for s in self.stars]'], {}), '([s.d_d for s in self.stars])\n', (11577, 11606), True, 'import numpy as np\n'), ((11621, 11657), 'numpy.mean', 'np.mean', (['[s.d_a for s in self.stars]'], {}), '([s.d_a for s in self.stars])\n', (11628, 11657), True, 'import numpy as np\n'), ((13588, 13616), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (13598, 13616), True, 'from matplotlib import pyplot as plt\n'), ((13630, 13640), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (13638, 13640), True, 'from matplotlib import pyplot as plt\n'), ((13767, 13777), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13775, 13777), True, 'from matplotlib import pyplot as plt\n'), ((14927, 14954), 'photutils.CircularAperture', 'CircularAperture', (['[x, y]', 'r'], {}), '([x, y], r)\n', (14943, 14954), False, 'from photutils import CircularAperture, CircularAnnulus\n'), ((14985, 15020), 'photutils.CircularAnnulus', 'CircularAnnulus', (['[x, y]', 'R', '(R + d_a)'], {}), '([x, y], R, R + d_a)\n', (15000, 15020), False, 'from photutils import CircularAperture, CircularAnnulus\n'), ((16085, 16117), 'photutils.CircularAperture', 'CircularAperture', (['[x, y]', 'self.r'], {}), '([x, y], self.r)\n', (16101, 16117), False, 'from photutils import CircularAperture, CircularAnnulus\n'), ((16147, 16219), 'photutils.CircularAnnulus', 'CircularAnnulus', (['[x, y]', '(self.r + self.d_d)', '(self.r + self.d_d + self.d_a)'], {}), '([x, y], self.r + self.d_d, self.r + self.d_d + self.d_a)\n', (16162, 16219), False, 'from photutils import CircularAperture, CircularAnnulus\n'), ((16779, 16833), 'photutils.centroids.fit_2dgaussian', 'fit_2dgaussian', (['frame.imdata[y - w:y + w, x - w:x + w]'], {}), '(frame.imdata[y - w:y + w, x - w:x + w])\n', (16793, 16833), False, 'from photutils.centroids import fit_2dgaussian\n'), ((17125, 17179), 'photutils.centroids.fit_2dgaussian', 'fit_2dgaussian', (['frame.imdata[y - w:y + w, x - w:x + w]'], {}), '(frame.imdata[y - w:y + w, x - w:x + w])\n', (17139, 17179), False, 'from photutils.centroids import fit_2dgaussian\n'), ((4328, 4348), 'numpy.mean', 'np.mean', (['self.imdata'], {}), '(self.imdata)\n', (4335, 4348), True, 'import numpy as np\n'), ((5968, 5996), 'os.path.dirname', 'os.path.dirname', (['self.impath'], {}), '(self.impath)\n', (5983, 5996), False, 'import os\n'), ((6033, 6058), 'os.makedirs', 'os.makedirs', (['self.tmpPath'], {}), '(self.tmpPath)\n', (6044, 6058), False, 'import os\n'), ((6238, 6249), 'rawkit.raw.Raw', 'Raw', (['impath'], {}), '(impath)\n', (6241, 6249), False, 'from rawkit.raw import Raw\n'), ((6362, 6386), 'exifread.process_file', 'exifread.process_file', (['f'], {}), '(f)\n', (6383, 6386), False, 'import exifread\n'), ((8724, 8766), 'astropy.time.Time', 'Time', (['self.jdate'], {'format': '"""jd"""', 'scale': '"""utc"""'}), "(self.jdate, format='jd', scale='utc')\n", (8728, 8766), False, 'from astropy.time import Time\n'), ((11082, 11102), 'numpy.mean', 'np.mean', (['self.imdata'], {}), '(self.imdata)\n', (11089, 11102), True, 'import numpy as np\n'), ((11751, 11794), 'photutils.CircularAperture', 'CircularAperture', (['(s.x[self], s.y[self])', 'r'], {}), '((s.x[self], s.y[self]), r)\n', (11767, 11794), False, 'from photutils import CircularAperture, CircularAnnulus\n'), ((11824, 11887), 'photutils.CircularAnnulus', 'CircularAnnulus', (['(s.x[self], s.y[self])', '(r + d_d)', '(r + d_d + d_a)'], {}), '((s.x[self], s.y[self]), r + d_d, r + d_d + d_a)\n', (11839, 11887), False, 'from photutils import CircularAperture, CircularAnnulus\n'), ((13659, 13678), 'numpy.log', 'np.log', (['self.imdata'], {}), '(self.imdata)\n', (13665, 13678), True, 'import numpy as np\n'), ((14446, 14501), 'photutils.centroids.fit_2dgaussian', 'fit_2dgaussian', (['parent.imdata[y - w:y + w, x - w:x + w]'], {}), '(parent.imdata[y - w:y + w, x - w:x + w])\n', (14460, 14501), False, 'from photutils.centroids import fit_2dgaussian\n'), ((17903, 17943), 'numpy.around', 'np.around', (['(self.r + self.d_d)'], {'decimals': '(2)'}), '(self.r + self.d_d, decimals=2)\n', (17912, 17943), True, 'import numpy as np\n'), ((17959, 18010), 'numpy.around', 'np.around', (['(self.r + self.d_d + self.d_a)'], {'decimals': '(2)'}), '(self.r + self.d_d + self.d_a, decimals=2)\n', (17968, 18010), True, 'import numpy as np\n'), ((6817, 6853), 'astropy.time.Time', 'Time', (['dt'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "(dt, format='isot', scale='utc')\n", (6821, 6853), False, 'from astropy.time import Time\n'), ((15673, 15727), 'photutils.centroids.fit_2dgaussian', 'fit_2dgaussian', (['frame.imdata[y - w:y + w, x - w:x + w]'], {}), '(frame.imdata[y - w:y + w, x - w:x + w])\n', (15687, 15727), False, 'from photutils.centroids import fit_2dgaussian\n'), ((16842, 16888), 'numpy.sqrt', 'np.sqrt', (['(gaussian.x_stddev + gaussian.y_stddev)'], {}), '(gaussian.x_stddev + gaussian.y_stddev)\n', (16849, 16888), True, 'import numpy as np\n'), ((18449, 18485), 'astropy.time.Time', 'Time', (['dt'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "(dt, format='isot', scale='utc')\n", (18453, 18485), False, 'from astropy.time import Time\n'), ((6710, 6723), 'datetime.datetime', 'datetime', (['*dt'], {}), '(*dt)\n', (6718, 6723), False, 'from datetime import datetime\n'), ((9105, 9122), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (9116, 9122), False, 'import os\n'), ((12128, 12160), 'numpy.around', 'np.around', (['s.x[self]'], {'decimals': '(2)'}), '(s.x[self], decimals=2)\n', (12137, 12160), True, 'import numpy as np\n'), ((12188, 12220), 'numpy.around', 'np.around', (['s.y[self]'], {'decimals': '(2)'}), '(s.y[self], decimals=2)\n', (12197, 12220), True, 'import numpy as np\n'), ((12248, 12272), 'numpy.around', 'np.around', (['r'], {'decimals': '(2)'}), '(r, decimals=2)\n', (12257, 12272), True, 'import numpy as np\n'), ((12300, 12330), 'numpy.around', 'np.around', (['(r + d_d)'], {'decimals': '(2)'}), '(r + d_d, decimals=2)\n', (12309, 12330), True, 'import numpy as np\n'), ((12358, 12394), 'numpy.around', 'np.around', (['(r + d_d + d_a)'], {'decimals': '(2)'}), '(r + d_d + d_a, decimals=2)\n', (12367, 12394), True, 'import numpy as np\n'), ((12993, 13021), 'skimage.feature.register_translation', 'register_translation', (['w1', 'w2'], {}), '(w1, w2)\n', (13013, 13021), False, 'from skimage.feature import register_translation\n'), ((14600, 14646), 'numpy.sqrt', 'np.sqrt', (['(gaussian.x_stddev + gaussian.y_stddev)'], {}), '(gaussian.x_stddev + gaussian.y_stddev)\n', (14607, 14646), True, 'import numpy as np\n'), ((16958, 16967), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (16964, 16967), True, 'import numpy as np\n'), ((17803, 17832), 'numpy.around', 'np.around', (['self.r'], {'decimals': '(2)'}), '(self.r, decimals=2)\n', (17812, 17832), True, 'import numpy as np\n'), ((14745, 14754), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (14751, 14754), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import random
import torch
import imgaug as ia
import imgaug.augmenters as iaa
import copy
# points = [
# [(10.5, 20.5)], # points on first image
# [(50.5, 50.5), (60.5, 60.5), (70.5, 70.5)] # points on second image
# ]
# image = cv2.imread('000000472375.jpg')
# inp_bbox = [np.array([124.71,196.18,124.71+372.85,196.18+356.81])]
'''
points = np.array([[ 80.90703725, 126.08039874, 0. ],
[ 72.72988313, 127.2840341, 0. ],
[ 86.29191076, 160.56158147, 0. ],
[ 80.87585772, 159.50228059, 0. ],
[ 81.09376061, 190.41214379, 0. ],
[ 77.63778624, 192.15852308, 0. ],
[ 84.55893103, 190.83034651, 0. ],
[ 88.24699688, 192.76283703, 0. ],
[ 70.1611101, 235.95892525, 0. ],
[106.62995965, 239.87347792, 0. ],
[ 66.48005009, 286.62669707, 0. ],
[128.05848894, 280.34743948, 0. ]])
image = cv2.imread('demo.jpg')
def show(image,points):
for i in points:
cv2.circle(image,(int(i[0]), int(i[1])), 5, (0,255,0), -1)
return image
'''
# def arguementation(image, dataset_dict, p=0.5):
def arguementation(image, p=0.5):
if random.random() > p:
return image # ,dataset_dict
# H,W,C = image.shape
# inp_bbox = [anno['bbox'] for anno in dataset_dict['annotations']]
# ia_bbox = []
# for bbox in inp_bbox:
# tmp_bbox = ia.BoundingBox(x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
# ia_bbox.append(tmp_bbox)
# ia_bbox = [ia_bbox]
images = np.array([image]).astype(np.uint8)
# image = random_flip(image)
# image = random_scale(image)
# image = random_angle_rotate(image)
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential(
[
# apply the following augmenters to most images
# iaa.Fliplr(0.5), # horizontally flip 50% of all images
# iaa.Flipud(0.2), # vertically flip 20% of all images
# crop images by -5% to 10% of their height/width
# iaa.CropAndPad(
# percent=(-0.3, 0.3),
# pad_mode='constant',
# pad_cval=(0, 0)
# ),
# iaa.Affine(
# scale={"x": (0.6, 1.4), "y": (0.6, 1.4)},
# # scale images to 80-120% of their size, individually per axis
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
# fit_output=False, # True
# order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
# cval=(0, 0), # if mode is constant, use a cval between 0 and 255
# mode='constant' # use any of scikit-image's warping modes (see 2nd image from the top for examples)
# ),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
sometimes(iaa.Superpixels(p_replace=(0, 0.1), n_segments=(200, 300))),
# convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 4)),
# blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(1, 5)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 0.75), lightness=(0.1, 1.9)), # sharpen images
iaa.Emboss(alpha=(0, 0.75), strength=(0, 1.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
# iaa.SimplexNoiseAlpha(iaa.OneOf([
# iaa.EdgeDetect(alpha=(0, 0.25)),
# iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
# ])),
# iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
# iaa.OneOf([
# iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
# #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
# ]),
# iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-20, 20), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
# iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.5, 1.5), per_channel=0.5),
# iaa.FrequencyNoiseAlpha(
# exponent=(-4, 0),
# first=iaa.Multiply((0.5, 1.5), per_channel=True),
# second=iaa.LinearContrast((0.5, 2.0))
# )
]),
iaa.LinearContrast((0.75, 1.5), per_channel=0.5), # improve or worsen the contrast
iaa.Grayscale(alpha=(0.0, 0.3)),
# sometimes(iaa.ElasticTransformation(alpha=(0.5, 1.5), sigma=0.25)), # move pixels locally around (with random strengths)
# sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.03))), # sometimes move parts of the image around
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
# images_aug, bbox_aug = seq(images=images, bounding_boxes=ia_bbox)
images_aug = seq(images=images)
# for k, bbox in enumerate(bbox_aug[0]):
# dataset_dict['annotations'][k]['bbox'][0] = max(min(bbox.x1,W-1),0)
# dataset_dict['annotations'][k]['bbox'][1] = max(min(bbox.y1,H-1),0)
# dataset_dict['annotations'][k]['bbox'][2] = max(min(bbox.x2,W-1),0)
# dataset_dict['annotations'][k]['bbox'][3] = max(min(bbox.y2,H-1),0)
# image = show(image,keypoints)
# cv2.imwrite('source.jpg',image)
# for k,i in enumerate(points_aug[0]):
# keypoints[k,0] = i[0]
# keypoints[k,1] = i[1]
# image_a = show(images_aug[0],keypoints)
# cv2.imwrite('result.jpg',image_a)
# images_aug_tensor_list = [torch.from_tensor(image).type(_dtype) for image in images_aug]
return images_aug[0] # , dataset_dict
# rst_image,bbox = arguementation(image,inp_bbox)
# cv2.rectangle(rst_image,(int(bbox[0][0]),int(bbox[0][1])),(int(bbox[0][2]),int(bbox[0][3])),(0,255,0),2)
# cv2.imwrite('demo.jpg',rst_image)
# print(image.shape,rst_image.shape)
| [
"imgaug.augmenters.AverageBlur",
"imgaug.augmenters.MedianBlur",
"imgaug.augmenters.LinearContrast",
"imgaug.augmenters.Sometimes",
"imgaug.augmenters.Superpixels",
"random.random",
"imgaug.augmenters.PerspectiveTransform",
"imgaug.augmenters.Grayscale",
"numpy.array",
"imgaug.augmenters.Add",
"... | [((1194, 1209), 'random.random', 'random.random', ([], {}), '()\n', (1207, 1209), False, 'import random\n'), ((1888, 1911), 'imgaug.augmenters.Sometimes', 'iaa.Sometimes', (['(0.5)', 'aug'], {}), '(0.5, aug)\n', (1901, 1911), True, 'import imgaug.augmenters as iaa\n'), ((1558, 1575), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (1566, 1575), True, 'import numpy as np\n'), ((4172, 4222), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0, 0.75)', 'lightness': '(0.1, 1.9)'}), '(alpha=(0, 0.75), lightness=(0.1, 1.9))\n', (4183, 4222), True, 'import imgaug.augmenters as iaa\n'), ((4269, 4315), 'imgaug.augmenters.Emboss', 'iaa.Emboss', ([], {'alpha': '(0, 0.75)', 'strength': '(0, 1.0)'}), '(alpha=(0, 0.75), strength=(0, 1.0))\n', (4279, 4315), True, 'import imgaug.augmenters as iaa\n'), ((5313, 5348), 'imgaug.augmenters.Add', 'iaa.Add', (['(-20, 20)'], {'per_channel': '(0.5)'}), '((-20, 20), per_channel=0.5)\n', (5320, 5348), True, 'import imgaug.augmenters as iaa\n'), ((6180, 6228), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(0.75, 1.5)'], {'per_channel': '(0.5)'}), '((0.75, 1.5), per_channel=0.5)\n', (6198, 6228), True, 'import imgaug.augmenters as iaa\n'), ((6291, 6322), 'imgaug.augmenters.Grayscale', 'iaa.Grayscale', ([], {'alpha': '(0.0, 0.3)'}), '(alpha=(0.0, 0.3))\n', (6304, 6322), True, 'import imgaug.augmenters as iaa\n'), ((3519, 3577), 'imgaug.augmenters.Superpixels', 'iaa.Superpixels', ([], {'p_replace': '(0, 0.1)', 'n_segments': '(200, 300)'}), '(p_replace=(0, 0.1), n_segments=(200, 300))\n', (3534, 3577), True, 'import imgaug.augmenters as iaa\n'), ((6635, 6678), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(0.01, 0.1)'}), '(scale=(0.01, 0.1))\n', (6659, 6678), True, 'import imgaug.augmenters as iaa\n'), ((3731, 3757), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['(0, 3.0)'], {}), '((0, 3.0))\n', (3747, 3757), True, 'import imgaug.augmenters as iaa\n'), ((3836, 3861), 'imgaug.augmenters.AverageBlur', 'iaa.AverageBlur', ([], {'k': '(2, 4)'}), '(k=(2, 4))\n', (3851, 3861), True, 'import imgaug.augmenters as iaa\n'), ((3990, 4014), 'imgaug.augmenters.MedianBlur', 'iaa.MedianBlur', ([], {'k': '(1, 5)'}), '(k=(1, 5))\n', (4004, 4014), True, 'import imgaug.augmenters as iaa\n'), ((5772, 5813), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.5, 1.5)'], {'per_channel': '(0.5)'}), '((0.5, 1.5), per_channel=0.5)\n', (5784, 5813), True, 'import imgaug.augmenters as iaa\n')] |
"""
%%
%% function ubezier(TRI,X,Y,Z,XP,YP,ZP,pchar,color)
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Tracé d'une surface de Bézier et de ses points de contrôle
%%
%% Données : TRI liste des facettes triangulaires de la surface
%% Données : X, Y, Z coordonnées des points de l'échantillonage
%% Données : XP, YP, ZP coordonnées des points de contrôle
%% T ensemble des valeurs du paramètre
%% pchar caractère associé aux points de contrôle
%% pcolor couleur de la courbe de contrôle
%%
"""
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
import matplotlib.cm as cm
def ubezier_raccord(X1,Y1,Z1,XP1,YP1,ZP1,pchar1,X2,Y2,Z2,XP2,YP2,ZP2,pchar2, title_raccord):
#tracé de la surface
Xr1 = X1.ravel()
Xr2 = X2.ravel()
Yr1 = Y1.ravel()
Yr2 = Y2.ravel()
Zr1 = Z1.ravel()
Zr2 = Z2.ravel()
"""
Xr2 = X2.ravel()
Yr2 = Y2.ravel()
Zr2 = Z2.ravel()
"""
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
ax.plot_trisurf(Xr1,Yr1, Zr1,lw=0.1,edgecolor="black",cmap = cm.get_cmap("Spectral"),alpha=0.5)
ax.plot_trisurf(Xr2,Yr2, Zr2,lw=0.1,edgecolor="black",cmap = cm.get_cmap("Spectral"),alpha=0.5)
# trace des points
ap = np.shape(XP1) # a taille en liste
aq = np.shape(XP2)
nb1_p = ap[0] # nb a en int coresspond au nombre de points de controles P.
nb2_p = ap[1]
nb1_q = aq[0] # nb a en int coresspond au nombre de points de controles Q.
nb2_q = aq[1]
for k1 in range(0,nb1_p) :
kk1 = k1
charrIndice1_p = str(kk1) # trnasforme en str l'indice 1 eg '1'
numP1 = pchar1 + charrIndice1_p #forme un indice type 'P1'
for k2 in range(0,nb2_p):
kk2 = k2
charrIndice2 = str(kk2) # trnasforme en str l'indice 1 eg '1'
numP2 = numP1 + charrIndice2 #forme un indice type 'P1'
epsx = 0.0
epsy = 0.0
epsz = 0.0
ax.text(XP1[k1][k2]+epsx,YP1[k1][k2]+epsy,ZP1[k1][k2]+epsz, numP2)
for k1 in range(0,nb1_q):
kk1 = k1
charrIndice1_q = str(kk1) # trnasforme en str l'indice 1 eg '1'
numP1 = pchar2 + charrIndice1_q #forme un indice type 'P1'
for k2 in range(0,nb2_q):
kk2 = k2
charrIndice2 = str(kk2) # trnasforme en str l'indice 1 eg '1'
numP2 = numP1 + charrIndice2 #forme un indice type 'P1'
epsx = 0.0
epsy = 0.0
epsz = 0.0
ax.text(XP2[k1][k2]+epsx,YP2[k1][k2]+epsy,ZP2[k1][k2]+epsz, numP2)
plt.title(title_raccord)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.cm.get_cmap",
"numpy.shape",
"matplotlib.pyplot.figure"
] | [((1133, 1145), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1143, 1145), True, 'import matplotlib.pyplot as plt\n'), ((1439, 1452), 'numpy.shape', 'np.shape', (['XP1'], {}), '(XP1)\n', (1447, 1452), True, 'import numpy as np\n'), ((1483, 1496), 'numpy.shape', 'np.shape', (['XP2'], {}), '(XP2)\n', (1491, 1496), True, 'import numpy as np\n'), ((2852, 2876), 'matplotlib.pyplot.title', 'plt.title', (['title_raccord'], {}), '(title_raccord)\n', (2861, 2876), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2896, 2898), True, 'import matplotlib.pyplot as plt\n'), ((1261, 1284), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (1272, 1284), True, 'import matplotlib.cm as cm\n'), ((1362, 1385), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (1373, 1385), True, 'import matplotlib.cm as cm\n')] |
import os
import cv2
import shutil
import numpy as np
import subprocess as sp
from imageio import imread
from typing import Any, Dict, List, Optional, Tuple
class CameraIntrinsicsHelper():
def __init__(self):
self.blurry_thresh = 100.0
self.sfm_workspace_dir = 'data/debug_sfm/'
self.sfm_images_dir = 'data/debug_sfm/images'
def is_blurry(self, image: np.ndarray) -> bool:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
var = cv2.Laplacian(gray, cv2.CV_64F).var()
if var > self.blurry_thresh:
# not blurry
return False, var
else:
return True, var
def select_good_frames_indices(self, images_files: List, images_dir: str) -> Tuple:
blurry_indicator = []
all_laplacian_variances = []
cpt_consecutive_non_blurry = 0
for image_file in images_files:
image = imread(os.path.join(images_dir, image_file))
blurry_ind_img, tmp_var = self.is_blurry(image)
blurry_indicator.append(blurry_ind_img)
all_laplacian_variances.append(tmp_var)
if not blurry_ind_img:
cpt_consecutive_non_blurry+=1
else:
cpt_consecutive_non_blurry=0
if cpt_consecutive_non_blurry>10:
break
blurry_indicator = np.array(blurry_indicator)
blurry_indicator = blurry_indicator.astype(np.int)
diff = np.diff(blurry_indicator)
nonzero_indices = np.nonzero(diff!=0)[0]
if len(nonzero_indices) > 0:
splits = np.split(blurry_indicator, nonzero_indices+1)
idx = 0
max_idx_start = -1
max_idx_end = -1
max_length = -1
for s in splits:
if s.size==0: continue
if s[0] == 0:
assert np.all(s==0)
if len(s) > max_length:
max_length = len(s)
max_idx_start = idx
max_idx_end = idx + len(s) - 1
idx += len(s)
if max_idx_start > -1:
start_idx = max_idx_start
end_idx = max_idx_end
# check the number of frame selected.
# if not enough frames are selected.
# get the set of 10 frames with the
# highest cumulative laplacian variance
if (end_idx - start_idx + 1) < 5:
print('Not enough frames selected -> Getting the set of 10 frames with max cumsum Laplacian Variance')
cumsum_10frames = [np.sum(all_laplacian_variances[i:i+10]) for i\
in range(0,len(all_laplacian_variances)-10)]
idx = np.argmax(cumsum_10frames)
start_idx = idx
end_idx = idx+9
return (start_idx, end_idx)
else:
return None
else:
if blurry_indicator[0] == 0:
#all frames are not blurry
max_idx = 0
max_length = len(blurry_indicator)
start_idx = int(len(blurry_indicator) / 2.0)
end_idx = int(len(blurry_indicator) / 2.0) + 9
return (start_idx, end_idx)
else:
# all frames are blurry
# then select the most non-blurry ones
print('ALL frames are blurry -> Getting the set of 10 frames with max cumsum Laplacian Variance')
all_laplacian_variances = np.array(all_laplacian_variances)
cumsum_10frames = [np.sum(all_laplacian_variances[i:i+10]) for i\
in range(0,len(all_laplacian_variances)-10)]
idx = np.argmax(cumsum_10frames)
start_idx = idx
end_idx = idx+9
return (start_idx, end_idx)
def select_good_frames(self, images_dir:str) -> None:
r"""
Selects good frames for intrinsics parameter estimation using COLMAP.
A good frame is non-blury - it has a variance of Laplacian greater than 100.
We aim at selecting 20 consecutive of such good frames for a better
COLMAP performance.
"""
images_files = os.listdir(images_dir)
images_files = sorted(images_files)
indices = self.select_good_frames_indices(images_files, images_dir)
if indices is not None:
cpt = 0
for i in range(indices[0], indices[1]+1, 1):
shutil.copyfile(os.path.join(images_dir, images_files[i]),
os.path.join(self.sfm_images_dir, images_files[i]))
cpt+=1
if cpt > 10: break
else:
print('NO GOOD FRAMES FOUND')
def run_colmap(self) -> List:
# run sfm
o = sp.check_output(['colmap',
"automatic_reconstructor",
"--workspace_path",
self.sfm_workspace_dir,
"--image_path",
self.sfm_images_dir,
"--camera_model",
"RADIAL_FISHEYE",
"--single_camera",
"1",
])
# check if successfull
if len(os.listdir(os.path.join(self.sfm_workspace_dir,'sparse'))) == 0:
return None
o = sp.check_output(['colmap',
"model_converter",
"--input_path",
os.path.join(self.sfm_workspace_dir, 'sparse', '0/'),
"--output_path",
os.path.join(self.sfm_workspace_dir, 'sparse', '0/'),
"--output_type",
"TXT",
])
return self.parse_colmap_intrinsics(os.path.join(self.sfm_workspace_dir, 'sparse/0/cameras.txt'))
def parse_colmap_intrinsics(self, camera_txt_filename: str)-> Dict:
# example output
#1 RADIAL_FISHEYE 1440 1080 660.294 720 540 0.0352702 0.0046637\n
with open(camera_txt_filename, 'r') as f:
for _ in range(4):
l = f.readline()
e = l.split(' ')
outputs = {
'num_cameras': e[0],
'type':e[1],
'width':e[2],
'height':e[3],
'f':e[4],
'cx':e[5],
'cy':e[6],
'k1':e[7],
'k2':e[8][:-1],
}
return outputs
def get_camera_intrinsics(self, images_dir: str) -> Tuple:
self.select_good_frames(images_dir)
return self.run_colmap()
| [
"numpy.sum",
"numpy.argmax",
"cv2.cvtColor",
"subprocess.check_output",
"numpy.split",
"numpy.nonzero",
"numpy.diff",
"numpy.array",
"os.path.join",
"os.listdir",
"numpy.all",
"cv2.Laplacian"
] | [((424, 463), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (436, 463), False, 'import cv2\n'), ((1363, 1389), 'numpy.array', 'np.array', (['blurry_indicator'], {}), '(blurry_indicator)\n', (1371, 1389), True, 'import numpy as np\n'), ((1465, 1490), 'numpy.diff', 'np.diff', (['blurry_indicator'], {}), '(blurry_indicator)\n', (1472, 1490), True, 'import numpy as np\n'), ((4356, 4378), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (4366, 4378), False, 'import os\n'), ((4947, 5150), 'subprocess.check_output', 'sp.check_output', (["['colmap', 'automatic_reconstructor', '--workspace_path', self.\n sfm_workspace_dir, '--image_path', self.sfm_images_dir,\n '--camera_model', 'RADIAL_FISHEYE', '--single_camera', '1']"], {}), "(['colmap', 'automatic_reconstructor', '--workspace_path',\n self.sfm_workspace_dir, '--image_path', self.sfm_images_dir,\n '--camera_model', 'RADIAL_FISHEYE', '--single_camera', '1'])\n", (4962, 5150), True, 'import subprocess as sp\n'), ((1517, 1538), 'numpy.nonzero', 'np.nonzero', (['(diff != 0)'], {}), '(diff != 0)\n', (1527, 1538), True, 'import numpy as np\n'), ((1599, 1646), 'numpy.split', 'np.split', (['blurry_indicator', '(nonzero_indices + 1)'], {}), '(blurry_indicator, nonzero_indices + 1)\n', (1607, 1646), True, 'import numpy as np\n'), ((6081, 6141), 'os.path.join', 'os.path.join', (['self.sfm_workspace_dir', '"""sparse/0/cameras.txt"""'], {}), "(self.sfm_workspace_dir, 'sparse/0/cameras.txt')\n", (6093, 6141), False, 'import os\n'), ((478, 509), 'cv2.Laplacian', 'cv2.Laplacian', (['gray', 'cv2.CV_64F'], {}), '(gray, cv2.CV_64F)\n', (491, 509), False, 'import cv2\n'), ((913, 949), 'os.path.join', 'os.path.join', (['images_dir', 'image_file'], {}), '(images_dir, image_file)\n', (925, 949), False, 'import os\n'), ((3628, 3661), 'numpy.array', 'np.array', (['all_laplacian_variances'], {}), '(all_laplacian_variances)\n', (3636, 3661), True, 'import numpy as np\n'), ((3845, 3871), 'numpy.argmax', 'np.argmax', (['cumsum_10frames'], {}), '(cumsum_10frames)\n', (3854, 3871), True, 'import numpy as np\n'), ((5732, 5784), 'os.path.join', 'os.path.join', (['self.sfm_workspace_dir', '"""sparse"""', '"""0/"""'], {}), "(self.sfm_workspace_dir, 'sparse', '0/')\n", (5744, 5784), False, 'import os\n'), ((5861, 5913), 'os.path.join', 'os.path.join', (['self.sfm_workspace_dir', '"""sparse"""', '"""0/"""'], {}), "(self.sfm_workspace_dir, 'sparse', '0/')\n", (5873, 5913), False, 'import os\n'), ((1878, 1892), 'numpy.all', 'np.all', (['(s == 0)'], {}), '(s == 0)\n', (1884, 1892), True, 'import numpy as np\n'), ((2808, 2834), 'numpy.argmax', 'np.argmax', (['cumsum_10frames'], {}), '(cumsum_10frames)\n', (2817, 2834), True, 'import numpy as np\n'), ((3697, 3738), 'numpy.sum', 'np.sum', (['all_laplacian_variances[i:i + 10]'], {}), '(all_laplacian_variances[i:i + 10])\n', (3703, 3738), True, 'import numpy as np\n'), ((4640, 4681), 'os.path.join', 'os.path.join', (['images_dir', 'images_files[i]'], {}), '(images_dir, images_files[i])\n', (4652, 4681), False, 'import os\n'), ((4715, 4765), 'os.path.join', 'os.path.join', (['self.sfm_images_dir', 'images_files[i]'], {}), '(self.sfm_images_dir, images_files[i])\n', (4727, 4765), False, 'import os\n'), ((5492, 5538), 'os.path.join', 'os.path.join', (['self.sfm_workspace_dir', '"""sparse"""'], {}), "(self.sfm_workspace_dir, 'sparse')\n", (5504, 5538), False, 'import os\n'), ((2652, 2693), 'numpy.sum', 'np.sum', (['all_laplacian_variances[i:i + 10]'], {}), '(all_laplacian_variances[i:i + 10])\n', (2658, 2693), True, 'import numpy as np\n')] |
import pytest
import numpy as np
import pyEMA
def test_complex_freq_to_freq_and_damp():
f = 13
x = 0.00324
fc = -x*2*np.pi*f + 1j*2*np.pi*f * np.sqrt(1-x**2)
f_, x_ = pyEMA.complex_freq_to_freq_and_damp(fc)
np.testing.assert_almost_equal(f, f_, 5)
np.testing.assert_almost_equal(x, x_, 5) | [
"pyEMA.complex_freq_to_freq_and_damp",
"numpy.sqrt",
"numpy.testing.assert_almost_equal"
] | [((187, 226), 'pyEMA.complex_freq_to_freq_and_damp', 'pyEMA.complex_freq_to_freq_and_damp', (['fc'], {}), '(fc)\n', (222, 226), False, 'import pyEMA\n'), ((232, 272), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['f', 'f_', '(5)'], {}), '(f, f_, 5)\n', (262, 272), True, 'import numpy as np\n'), ((277, 317), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x_', '(5)'], {}), '(x, x_, 5)\n', (307, 317), True, 'import numpy as np\n'), ((157, 176), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (164, 176), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 15:21:59 2020
@author: Reuben
"""
import unittest
import numpy as np
import npsolve.soft_functions as soft
class Test_lim_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000_side1(self):
val = soft.lim(self.vals[0], self.limit, side=1, scale=0.001)
self.assertEqual(val, self.limit)
def test_25_side1(self):
val = soft.lim(self.vals[1], self.limit, side=1, scale=0.001)
self.assertEqual(val, self.limit)
def test_35_side1(self):
val = soft.lim(self.vals[2], self.limit, side=1, scale=0.001)
self.assertEqual(val, self.vals[2])
def test_1000_side1(self):
val = soft.lim(self.vals[3], self.limit, side=1, scale=0.001)
self.assertEqual(val, self.vals[3])
def test_m1000_sidem1(self):
val = soft.lim(self.vals[0], self.limit, side=-1, scale=0.001)
self.assertEqual(val, self.vals[0])
def test_25_sidem1(self):
val = soft.lim(self.vals[1], self.limit, side=-1, scale=0.001)
self.assertEqual(val, self.vals[1])
def test_35_sidem1(self):
val = soft.lim(self.vals[2], self.limit, side=-1, scale=0.001)
self.assertEqual(val, self.limit)
def test_1000_sidem1(self):
val = soft.lim(self.vals[3], self.limit, side=-1, scale=0.001)
self.assertEqual(val, self.limit)
class Test_lim_numpy(Test_lim_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_floor_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000(self):
val = soft.floor(self.vals[0], self.limit, scale=0.001)
self.assertEqual(val, self.limit)
def test_25(self):
val = soft.floor(self.vals[1], self.limit, scale=0.001)
self.assertEqual(val, self.limit)
def test_35(self):
val = soft.floor(self.vals[2], self.limit, scale=0.001)
self.assertEqual(val, self.vals[2])
def test_1000(self):
val = soft.floor(self.vals[3], self.limit, scale=0.001)
self.assertEqual(val, self.vals[3])
class Test_floor_numpy(Test_floor_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_ceil_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000(self):
val = soft.ceil(self.vals[0], self.limit, scale=0.001)
self.assertEqual(val, self.vals[0])
def test_25(self):
val = soft.ceil(self.vals[1], self.limit, scale=0.001)
self.assertEqual(val, self.vals[1])
def test_35(self):
val = soft.ceil(self.vals[2], self.limit, scale=0.001)
self.assertEqual(val, self.limit)
def test_1000(self):
val = soft.ceil(self.vals[3], self.limit, scale=0.001)
self.assertEqual(val, self.limit)
class Test_ceil_numpy(Test_ceil_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_clip_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, -1.0, 2.5, 3.5, 1000]
self.lower = 1.5
self.upper = 3.0
def test_m1000(self):
val = soft.clip(self.vals[0], self.lower, self.upper, scale=0.001)
self.assertEqual(val, self.lower)
def test_m1(self):
val = soft.clip(self.vals[1], self.lower, self.upper, scale=0.001)
self.assertEqual(val, self.lower)
def test_25(self):
val = soft.clip(self.vals[2], self.lower, self.upper, scale=0.001)
self.assertEqual(val, self.vals[2])
def test_35(self):
val = soft.clip(self.vals[3], self.lower, self.upper, scale=0.001)
self.assertEqual(val, self.upper)
def test_1000(self):
val = soft.clip(self.vals[4], self.lower, self.upper, scale=0.001)
self.assertEqual(val, self.upper)
class Test_clip_numpy(Test_clip_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
class Test_posdiff_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000(self):
val = soft.posdiff(self.vals[0], self.limit, scale=0.001)
self.assertEqual(val, 0)
def test_25(self):
val = soft.posdiff(self.vals[1], self.limit, scale=0.001)
self.assertEqual(val, 0)
def test_35(self):
val = soft.posdiff(self.vals[2], self.limit, scale=0.001)
self.assertEqual(val, self.vals[2] - self.limit)
def test_1000(self):
val = soft.posdiff(self.vals[3], self.limit, scale=0.001)
self.assertEqual(val, self.vals[3] - self.limit)
class Test_posdiff_numpy(Test_posdiff_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_negdiff_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000(self):
val = soft.negdiff(self.vals[0], self.limit, scale=0.001)
self.assertEqual(val, self.vals[0] - self.limit)
def test_25(self):
val = soft.negdiff(self.vals[1], self.limit, scale=0.001)
self.assertEqual(val, self.vals[1] - self.limit)
def test_35(self):
val = soft.negdiff(self.vals[2], self.limit, scale=0.001)
self.assertEqual(val, 0)
def test_1000(self):
val = soft.negdiff(self.vals[3], self.limit, scale=0.001)
self.assertEqual(val, 0)
class Test_negdiff_numpy(Test_negdiff_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_step_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000_side1(self):
val = soft.step(self.vals[0], self.limit, side=1, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_25_side1(self):
val = soft.step(self.vals[1], self.limit, side=1, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_35_side1(self):
val = soft.step(self.vals[2], self.limit, side=1, scale=0.001)
self.assertEqual(val, 1)
def test_1000_side1(self):
val = soft.step(self.vals[3], self.limit, side=1, scale=0.001)
self.assertEqual(val, 1)
def test_m1000_sidem1(self):
val = soft.step(self.vals[0], self.limit, side=-1, scale=0.001)
self.assertEqual(val, 1)
def test_25_sidem1(self):
val = soft.step(self.vals[1], self.limit, side=-1, scale=0.001)
self.assertEqual(val, 1)
def test_35_sidem1(self):
val = soft.step(self.vals[2], self.limit, side=-1, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_1000_sidem1(self):
val = soft.step(self.vals[3], self.limit, side=-1, scale=0.001)
self.assertAlmostEqual(val, 0)
class Test_step_numpy(Test_step_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_above_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000(self):
val = soft.step(self.vals[0], self.limit, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_25(self):
val = soft.step(self.vals[1], self.limit, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_35(self):
val = soft.step(self.vals[2], self.limit, scale=0.001)
self.assertEqual(val, 1)
def test_1000(self):
val = soft.step(self.vals[3], self.limit, scale=0.001)
self.assertEqual(val, 1)
class Test_above_numpy(Test_above_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_below_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 2.5, 3.5, 1000]
self.limit = 3.0
def test_m1000(self):
val = soft.below(self.vals[0], self.limit, scale=0.001)
self.assertEqual(val, 1)
def test_25(self):
val = soft.below(self.vals[1], self.limit, scale=0.001)
self.assertEqual(val, 1)
def test_35(self):
val = soft.below(self.vals[2], self.limit, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_1000(self):
val = soft.below(self.vals[3], self.limit, scale=0.001)
self.assertAlmostEqual(val, 0)
class Test_below_numpy(Test_below_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
self.limit = 3.0
class Test_within_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, -1.0, 2.5, 3.5, 1000]
self.lower = 1.5
self.upper = 3.0
def test_m1000(self):
val = soft.within(self.vals[0], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_m1(self):
val = soft.within(self.vals[1], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_25(self):
val = soft.within(self.vals[2], self.lower, self.upper, scale=0.001)
self.assertEqual(val, 1)
def test_35(self):
val = soft.within(self.vals[3], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_1000(self):
val = soft.within(self.vals[4], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 0)
class Test_within_numpy(Test_within_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
class Test_outside_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, -1.0, 2.5, 3.5, 1000]
self.lower = 1.5
self.upper = 3.0
def test_m1000(self):
val = soft.outside(self.vals[0], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 1)
def test_m1(self):
val = soft.outside(self.vals[1], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 1)
def test_25(self):
val = soft.outside(self.vals[2], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 0)
def test_35(self):
val = soft.outside(self.vals[3], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 1)
def test_1000(self):
val = soft.outside(self.vals[4], self.lower, self.upper, scale=0.001)
self.assertAlmostEqual(val, 1)
class Test_outside_numpy(Test_outside_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
class Test_sign_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, -0.5, 0.5, 1000]
self.limit = 3.0
def test_m1000(self):
val = soft.sign(self.vals[0], scale=0.001)
self.assertAlmostEqual(val, -1)
def test_25(self):
val = soft.sign(self.vals[1], scale=0.001)
self.assertAlmostEqual(val, -1)
def test_35(self):
val = soft.sign(self.vals[2], scale=0.001)
self.assertAlmostEqual(val, 1)
def test_1000(self):
val = soft.sign(self.vals[3], scale=0.001)
self.assertAlmostEqual(val, 1)
class Test_sign_numpy(Test_sign_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals)
class Test_gaussian_scalar(unittest.TestCase):
def setUp(self):
self.vals = [-1000, 0.5, 1.5, 2.5, 1000]
self.center = 1.5
def test_m1000(self):
val = soft.gaussian(self.vals[0], center=self.center, scale=0.5)
self.assertAlmostEqual(val, 0)
def test_p5(self):
val = soft.gaussian(self.vals[1], center=self.center, scale=0.5)
self.assertAlmostEqual(val, 0.1353352832366127)
def test_1p5(self):
val = soft.gaussian(self.vals[2], center=self.center, scale=0.5)
self.assertAlmostEqual(val, 1)
def test_2p5(self):
val = soft.gaussian(self.vals[3], center=self.center, scale=0.5)
self.assertAlmostEqual(val, 0.1353352832366127)
def test_1000(self):
val = soft.gaussian(self.vals[4], center=self.center, scale=0.5)
self.assertAlmostEqual(val, 0)
class Test_gaussian_numpy(Test_gaussian_scalar):
def setUp(self):
super().setUp()
self.vals = np.array(self.vals) | [
"npsolve.soft_functions.within",
"npsolve.soft_functions.clip",
"npsolve.soft_functions.floor",
"npsolve.soft_functions.lim",
"npsolve.soft_functions.negdiff",
"npsolve.soft_functions.posdiff",
"npsolve.soft_functions.outside",
"npsolve.soft_functions.gaussian",
"npsolve.soft_functions.step",
"num... | [((349, 404), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[0]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[0], self.limit, side=1, scale=0.001)\n', (357, 404), True, 'import npsolve.soft_functions as soft\n'), ((499, 554), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[1]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[1], self.limit, side=1, scale=0.001)\n', (507, 554), True, 'import npsolve.soft_functions as soft\n'), ((641, 696), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[2]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[2], self.limit, side=1, scale=0.001)\n', (649, 696), True, 'import npsolve.soft_functions as soft\n'), ((787, 842), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[3]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[3], self.limit, side=1, scale=0.001)\n', (795, 842), True, 'import npsolve.soft_functions as soft\n'), ((935, 991), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[0]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[0], self.limit, side=-1, scale=0.001)\n', (943, 991), True, 'import npsolve.soft_functions as soft\n'), ((1089, 1145), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[1]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[1], self.limit, side=-1, scale=0.001)\n', (1097, 1145), True, 'import npsolve.soft_functions as soft\n'), ((1235, 1291), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[2]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[2], self.limit, side=-1, scale=0.001)\n', (1243, 1291), True, 'import npsolve.soft_functions as soft\n'), ((1381, 1437), 'npsolve.soft_functions.lim', 'soft.lim', (['self.vals[3]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[3], self.limit, side=-1, scale=0.001)\n', (1389, 1437), True, 'import npsolve.soft_functions as soft\n'), ((1586, 1605), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (1594, 1605), True, 'import numpy as np\n'), ((1823, 1872), 'npsolve.soft_functions.floor', 'soft.floor', (['self.vals[0]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[0], self.limit, scale=0.001)\n', (1833, 1872), True, 'import npsolve.soft_functions as soft\n'), ((1961, 2010), 'npsolve.soft_functions.floor', 'soft.floor', (['self.vals[1]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[1], self.limit, scale=0.001)\n', (1971, 2010), True, 'import npsolve.soft_functions as soft\n'), ((2091, 2140), 'npsolve.soft_functions.floor', 'soft.floor', (['self.vals[2]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[2], self.limit, scale=0.001)\n', (2101, 2140), True, 'import npsolve.soft_functions as soft\n'), ((2225, 2274), 'npsolve.soft_functions.floor', 'soft.floor', (['self.vals[3]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[3], self.limit, scale=0.001)\n', (2235, 2274), True, 'import npsolve.soft_functions as soft\n'), ((2428, 2447), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (2436, 2447), True, 'import numpy as np\n'), ((2664, 2712), 'npsolve.soft_functions.ceil', 'soft.ceil', (['self.vals[0]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[0], self.limit, scale=0.001)\n', (2673, 2712), True, 'import npsolve.soft_functions as soft\n'), ((2803, 2851), 'npsolve.soft_functions.ceil', 'soft.ceil', (['self.vals[1]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[1], self.limit, scale=0.001)\n', (2812, 2851), True, 'import npsolve.soft_functions as soft\n'), ((2934, 2982), 'npsolve.soft_functions.ceil', 'soft.ceil', (['self.vals[2]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[2], self.limit, scale=0.001)\n', (2943, 2982), True, 'import npsolve.soft_functions as soft\n'), ((3065, 3113), 'npsolve.soft_functions.ceil', 'soft.ceil', (['self.vals[3]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[3], self.limit, scale=0.001)\n', (3074, 3113), True, 'import npsolve.soft_functions as soft\n'), ((3264, 3283), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (3272, 3283), True, 'import numpy as np\n'), ((3539, 3599), 'npsolve.soft_functions.clip', 'soft.clip', (['self.vals[0]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[0], self.lower, self.upper, scale=0.001)\n', (3548, 3599), True, 'import npsolve.soft_functions as soft\n'), ((3680, 3740), 'npsolve.soft_functions.clip', 'soft.clip', (['self.vals[1]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[1], self.lower, self.upper, scale=0.001)\n', (3689, 3740), True, 'import npsolve.soft_functions as soft\n'), ((3821, 3881), 'npsolve.soft_functions.clip', 'soft.clip', (['self.vals[2]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[2], self.lower, self.upper, scale=0.001)\n', (3830, 3881), True, 'import npsolve.soft_functions as soft\n'), ((3964, 4024), 'npsolve.soft_functions.clip', 'soft.clip', (['self.vals[3]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[3], self.lower, self.upper, scale=0.001)\n', (3973, 4024), True, 'import npsolve.soft_functions as soft\n'), ((4107, 4167), 'npsolve.soft_functions.clip', 'soft.clip', (['self.vals[4]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[4], self.lower, self.upper, scale=0.001)\n', (4116, 4167), True, 'import npsolve.soft_functions as soft\n'), ((4318, 4337), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (4326, 4337), True, 'import numpy as np\n'), ((4524, 4575), 'npsolve.soft_functions.posdiff', 'soft.posdiff', (['self.vals[0]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[0], self.limit, scale=0.001)\n', (4536, 4575), True, 'import npsolve.soft_functions as soft\n'), ((4655, 4706), 'npsolve.soft_functions.posdiff', 'soft.posdiff', (['self.vals[1]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[1], self.limit, scale=0.001)\n', (4667, 4706), True, 'import npsolve.soft_functions as soft\n'), ((4778, 4829), 'npsolve.soft_functions.posdiff', 'soft.posdiff', (['self.vals[2]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[2], self.limit, scale=0.001)\n', (4790, 4829), True, 'import npsolve.soft_functions as soft\n'), ((4927, 4978), 'npsolve.soft_functions.posdiff', 'soft.posdiff', (['self.vals[3]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[3], self.limit, scale=0.001)\n', (4939, 4978), True, 'import npsolve.soft_functions as soft\n'), ((5150, 5169), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (5158, 5169), True, 'import numpy as np\n'), ((5389, 5440), 'npsolve.soft_functions.negdiff', 'soft.negdiff', (['self.vals[0]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[0], self.limit, scale=0.001)\n', (5401, 5440), True, 'import npsolve.soft_functions as soft\n'), ((5544, 5595), 'npsolve.soft_functions.negdiff', 'soft.negdiff', (['self.vals[1]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[1], self.limit, scale=0.001)\n', (5556, 5595), True, 'import npsolve.soft_functions as soft\n'), ((5691, 5742), 'npsolve.soft_functions.negdiff', 'soft.negdiff', (['self.vals[2]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[2], self.limit, scale=0.001)\n', (5703, 5742), True, 'import npsolve.soft_functions as soft\n'), ((5816, 5867), 'npsolve.soft_functions.negdiff', 'soft.negdiff', (['self.vals[3]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[3], self.limit, scale=0.001)\n', (5828, 5867), True, 'import npsolve.soft_functions as soft\n'), ((6015, 6034), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (6023, 6034), True, 'import numpy as np\n'), ((6265, 6321), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[0]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[0], self.limit, side=1, scale=0.001)\n', (6274, 6321), True, 'import npsolve.soft_functions as soft\n'), ((6413, 6469), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[1]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[1], self.limit, side=1, scale=0.001)\n', (6422, 6469), True, 'import npsolve.soft_functions as soft\n'), ((6553, 6609), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[2]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[2], self.limit, side=1, scale=0.001)\n', (6562, 6609), True, 'import npsolve.soft_functions as soft\n'), ((6689, 6745), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[3]', 'self.limit'], {'side': '(1)', 'scale': '(0.001)'}), '(self.vals[3], self.limit, side=1, scale=0.001)\n', (6698, 6745), True, 'import npsolve.soft_functions as soft\n'), ((6835, 6892), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[0]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[0], self.limit, side=-1, scale=0.001)\n', (6844, 6892), True, 'import npsolve.soft_functions as soft\n'), ((6979, 7036), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[1]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[1], self.limit, side=-1, scale=0.001)\n', (6988, 7036), True, 'import npsolve.soft_functions as soft\n'), ((7115, 7172), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[2]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[2], self.limit, side=-1, scale=0.001)\n', (7124, 7172), True, 'import npsolve.soft_functions as soft\n'), ((7259, 7316), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[3]', 'self.limit'], {'side': '(-1)', 'scale': '(0.001)'}), '(self.vals[3], self.limit, side=-1, scale=0.001)\n', (7268, 7316), True, 'import npsolve.soft_functions as soft\n'), ((7464, 7483), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (7472, 7483), True, 'import numpy as np\n'), ((7701, 7749), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[0]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[0], self.limit, scale=0.001)\n', (7710, 7749), True, 'import npsolve.soft_functions as soft\n'), ((7835, 7883), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[1]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[1], self.limit, scale=0.001)\n', (7844, 7883), True, 'import npsolve.soft_functions as soft\n'), ((7961, 8009), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[2]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[2], self.limit, scale=0.001)\n', (7970, 8009), True, 'import npsolve.soft_functions as soft\n'), ((8083, 8131), 'npsolve.soft_functions.step', 'soft.step', (['self.vals[3]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[3], self.limit, scale=0.001)\n', (8092, 8131), True, 'import npsolve.soft_functions as soft\n'), ((8283, 8302), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (8291, 8302), True, 'import numpy as np\n'), ((8528, 8577), 'npsolve.soft_functions.below', 'soft.below', (['self.vals[0]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[0], self.limit, scale=0.001)\n', (8538, 8577), True, 'import npsolve.soft_functions as soft\n'), ((8657, 8706), 'npsolve.soft_functions.below', 'soft.below', (['self.vals[1]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[1], self.limit, scale=0.001)\n', (8667, 8706), True, 'import npsolve.soft_functions as soft\n'), ((8778, 8827), 'npsolve.soft_functions.below', 'soft.below', (['self.vals[2]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[2], self.limit, scale=0.001)\n', (8788, 8827), True, 'import npsolve.soft_functions as soft\n'), ((8907, 8956), 'npsolve.soft_functions.below', 'soft.below', (['self.vals[3]', 'self.limit'], {'scale': '(0.001)'}), '(self.vals[3], self.limit, scale=0.001)\n', (8917, 8956), True, 'import npsolve.soft_functions as soft\n'), ((9114, 9133), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (9122, 9133), True, 'import numpy as np\n'), ((9391, 9453), 'npsolve.soft_functions.within', 'soft.within', (['self.vals[0]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[0], self.lower, self.upper, scale=0.001)\n', (9402, 9453), True, 'import npsolve.soft_functions as soft\n'), ((9531, 9593), 'npsolve.soft_functions.within', 'soft.within', (['self.vals[1]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[1], self.lower, self.upper, scale=0.001)\n', (9542, 9593), True, 'import npsolve.soft_functions as soft\n'), ((9671, 9733), 'npsolve.soft_functions.within', 'soft.within', (['self.vals[2]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[2], self.lower, self.upper, scale=0.001)\n', (9682, 9733), True, 'import npsolve.soft_functions as soft\n'), ((9805, 9867), 'npsolve.soft_functions.within', 'soft.within', (['self.vals[3]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[3], self.lower, self.upper, scale=0.001)\n', (9816, 9867), True, 'import npsolve.soft_functions as soft\n'), ((9947, 10009), 'npsolve.soft_functions.within', 'soft.within', (['self.vals[4]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[4], self.lower, self.upper, scale=0.001)\n', (9958, 10009), True, 'import npsolve.soft_functions as soft\n'), ((10161, 10180), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (10169, 10180), True, 'import numpy as np\n'), ((10414, 10477), 'npsolve.soft_functions.outside', 'soft.outside', (['self.vals[0]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[0], self.lower, self.upper, scale=0.001)\n', (10426, 10477), True, 'import npsolve.soft_functions as soft\n'), ((10555, 10618), 'npsolve.soft_functions.outside', 'soft.outside', (['self.vals[1]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[1], self.lower, self.upper, scale=0.001)\n', (10567, 10618), True, 'import npsolve.soft_functions as soft\n'), ((10696, 10759), 'npsolve.soft_functions.outside', 'soft.outside', (['self.vals[2]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[2], self.lower, self.upper, scale=0.001)\n', (10708, 10759), True, 'import npsolve.soft_functions as soft\n'), ((10837, 10900), 'npsolve.soft_functions.outside', 'soft.outside', (['self.vals[3]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[3], self.lower, self.upper, scale=0.001)\n', (10849, 10900), True, 'import npsolve.soft_functions as soft\n'), ((10980, 11043), 'npsolve.soft_functions.outside', 'soft.outside', (['self.vals[4]', 'self.lower', 'self.upper'], {'scale': '(0.001)'}), '(self.vals[4], self.lower, self.upper, scale=0.001)\n', (10992, 11043), True, 'import npsolve.soft_functions as soft\n'), ((11197, 11216), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (11205, 11216), True, 'import numpy as np\n'), ((11417, 11453), 'npsolve.soft_functions.sign', 'soft.sign', (['self.vals[0]'], {'scale': '(0.001)'}), '(self.vals[0], scale=0.001)\n', (11426, 11453), True, 'import npsolve.soft_functions as soft\n'), ((11532, 11568), 'npsolve.soft_functions.sign', 'soft.sign', (['self.vals[1]'], {'scale': '(0.001)'}), '(self.vals[1], scale=0.001)\n', (11541, 11568), True, 'import npsolve.soft_functions as soft\n'), ((11647, 11683), 'npsolve.soft_functions.sign', 'soft.sign', (['self.vals[2]'], {'scale': '(0.001)'}), '(self.vals[2], scale=0.001)\n', (11656, 11683), True, 'import npsolve.soft_functions as soft\n'), ((11763, 11799), 'npsolve.soft_functions.sign', 'soft.sign', (['self.vals[3]'], {'scale': '(0.001)'}), '(self.vals[3], scale=0.001)\n', (11772, 11799), True, 'import npsolve.soft_functions as soft\n'), ((11947, 11966), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (11955, 11966), True, 'import numpy as np\n'), ((12176, 12234), 'npsolve.soft_functions.gaussian', 'soft.gaussian', (['self.vals[0]'], {'center': 'self.center', 'scale': '(0.5)'}), '(self.vals[0], center=self.center, scale=0.5)\n', (12189, 12234), True, 'import npsolve.soft_functions as soft\n'), ((12312, 12370), 'npsolve.soft_functions.gaussian', 'soft.gaussian', (['self.vals[1]'], {'center': 'self.center', 'scale': '(0.5)'}), '(self.vals[1], center=self.center, scale=0.5)\n', (12325, 12370), True, 'import npsolve.soft_functions as soft\n'), ((12466, 12524), 'npsolve.soft_functions.gaussian', 'soft.gaussian', (['self.vals[2]'], {'center': 'self.center', 'scale': '(0.5)'}), '(self.vals[2], center=self.center, scale=0.5)\n', (12479, 12524), True, 'import npsolve.soft_functions as soft\n'), ((12603, 12661), 'npsolve.soft_functions.gaussian', 'soft.gaussian', (['self.vals[3]'], {'center': 'self.center', 'scale': '(0.5)'}), '(self.vals[3], center=self.center, scale=0.5)\n', (12616, 12661), True, 'import npsolve.soft_functions as soft\n'), ((12758, 12816), 'npsolve.soft_functions.gaussian', 'soft.gaussian', (['self.vals[4]'], {'center': 'self.center', 'scale': '(0.5)'}), '(self.vals[4], center=self.center, scale=0.5)\n', (12771, 12816), True, 'import npsolve.soft_functions as soft\n'), ((12972, 12991), 'numpy.array', 'np.array', (['self.vals'], {}), '(self.vals)\n', (12980, 12991), True, 'import numpy as np\n')] |
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import numpy as np
from PIL import Image
import img2vid as i2v
import glob
import yt
sys.path.append("/home/fionnlagh/forked_amrvac/amrvac/tools/python")
#from amrvac_pytools.datfiles.reading import amrvac_reader
#from amrvac_pytools.vtkfiles import read, amrplot
import amrvac_pytools as apt
path_2_shared_drive = '/run/user/1001/gvfs/smb-share:server=uosfstore.shef.ac.uk,share=shared/mhd_jet1/User/smp16fm'
path_2_file = '/j/2D/P300/B50/A60/'
file_name = 'jet_P300_B50A_60_'
Full_path = path_2_shared_drive + path_2_file + file_name
ds = apt.load_datfile(Full_path+'0020.dat')
ad = ds.load_all_data()
unit_length = 1e9 # cm
unit_temperature = 1e6 # K
unit_numberdensity = 1e9 # cm^-3
unit_density = 2.3416704877999998E-015
unit_velocity = 11645084.295622544
unit_pressure = 0.31754922400000002
unit_magenticfield = 1.9976088799077159
unit_time = unit_length/unit_velocity
unit_mass = unit_density*unit_length**3
unit_specific_energy = (unit_length/unit_time)**2
var_rho = 'rho'
var_b1 = 'b1'
var_b2 = 'b2'
contour = False
var_name = 'mag_tension_x' #'density'#
cmap = 'seismic'# 'gist_heat' #
var_rho_data = ad[var_rho]*unit_density
var_b1_data = ad[var_b1]*unit_magenticfield
var_b2_data = ad[var_b2]*unit_magenticfield
arr_rho = np.zeros((var_rho_data.shape[0], var_rho_data.shape[1], 1))
arr_b1 = np.zeros((var_b1_data.shape[0], var_b1_data.shape[1], 1))
arr_b2 = np.zeros((var_b2_data.shape[0], var_b2_data.shape[1], 1))
arr_rho[:, :, 0] = var_rho_data
arr_b1[:, :, 0] = var_b1_data
arr_b2[:, :, 0] = var_b2_data
print(var_rho_data[0],var_b2_data[0])
data = dict(density=(arr_rho, "g/cm**3"), b1=(arr_b1, "gauss"),
b2=(arr_b2, "gauss"))
bbox = np.array([[-2.5e4, 2.5e4], [0, 3e4], [-1e4, 1e4]])
ds = yt.load_uniform_grid(data, arr_rho.shape, length_unit="km",
bbox=bbox, nprocs=128)
for i in sorted(ds.field_list):
print(i)
# trying to use yt for magnetic tension. doesnt work
b1_grad_fields = ds.add_gradient_fields(('stream', 'b1'))
b2_grad_fields = ds.add_gradient_fields(('stream', 'b2'))
# trying to calc magnetic tension
def _magnetic_tension_(field, data):
dxb1 = data['b1_gradient_x']
dyb1 = data['b1_gradient_y']
dxb2 = data['b2_gradient_x']
dyb2 = data['b2_gradient_y']
b1 = data['b1']
b2 = data['b2']
mag_tension_x = b1*dxb1+b2*dyb1
mag_tension_y = b1*dxb2+b2*dyb2
return mag_tension_x
# The gradient operator requires periodic boundaries. This dataset has
# open boundary conditions. We need to hack it for now (this will be fixed
# in future version of yt)
ds.periodicity = (True, True, True)
ds.add_field(('stream','mag_tension_x'), function=_magnetic_tension_, units="gauss**2/cm", take_log=False,
sampling_type="cell")
## 2Mm
#y_limit = 1.99e4
#slc = yt.SlicePlot(ds, "z", [var_name], center=[0.0, -y_limit, 0],
# origin=(0, -3.1e4, 'domain'),
# width=((1e9, 'cm'), (2e9, 'cm')))
## works very flixciably
#y_limit = 0.5e4
#slc = yt.SlicePlot(ds, "z", [var_name], center=[0.0, y_limit/2, 0],
# width=((5e8, 'cm'), (y_limit*1e5, 'cm')),
# origin=(0, 0, 'domain'))
# 2Mm
y_limit = 0.8e4
# mag tension is not okay at lower bc
# need to remove
shift_factor = 0.099e3
slc = yt.SlicePlot(ds, "z", [var_name], center=[0.0, y_limit/2+shift_factor, 0],
width=((8e8, 'cm'), (y_limit*1e5, 'cm')),
origin=(0, 0, 'domain'))
if contour is True:
slc.annotate_contour("mag_tension_x",label=True, plot_args={"colors": "blue",
"linewidths": 4})#, clim=(-200,200))
slc.set_cmap(var_name, cmap)
slc.set_log(var_name, False)
slc.set_axes_unit("m")
if var_name == 'density' or var_name == 'rho':
#plot.set_unit(var, 'g/cm**3')
slc.set_unit(var_name, 'kg/m**3')
#slc.set_zlim(var_name, -200, 200)
#slc.annotate_grids(cmap=None)
slc.save(file_name+var_name)
| [
"sys.path.append",
"amrvac_pytools.load_datfile",
"numpy.zeros",
"yt.SlicePlot",
"numpy.array",
"yt.load_uniform_grid"
] | [((172, 240), 'sys.path.append', 'sys.path.append', (['"""/home/fionnlagh/forked_amrvac/amrvac/tools/python"""'], {}), "('/home/fionnlagh/forked_amrvac/amrvac/tools/python')\n", (187, 240), False, 'import sys\n'), ((637, 677), 'amrvac_pytools.load_datfile', 'apt.load_datfile', (["(Full_path + '0020.dat')"], {}), "(Full_path + '0020.dat')\n", (653, 677), True, 'import amrvac_pytools as apt\n'), ((1344, 1403), 'numpy.zeros', 'np.zeros', (['(var_rho_data.shape[0], var_rho_data.shape[1], 1)'], {}), '((var_rho_data.shape[0], var_rho_data.shape[1], 1))\n', (1352, 1403), True, 'import numpy as np\n'), ((1413, 1470), 'numpy.zeros', 'np.zeros', (['(var_b1_data.shape[0], var_b1_data.shape[1], 1)'], {}), '((var_b1_data.shape[0], var_b1_data.shape[1], 1))\n', (1421, 1470), True, 'import numpy as np\n'), ((1480, 1537), 'numpy.zeros', 'np.zeros', (['(var_b2_data.shape[0], var_b2_data.shape[1], 1)'], {}), '((var_b2_data.shape[0], var_b2_data.shape[1], 1))\n', (1488, 1537), True, 'import numpy as np\n'), ((1775, 1841), 'numpy.array', 'np.array', (['[[-25000.0, 25000.0], [0, 30000.0], [-10000.0, 10000.0]]'], {}), '([[-25000.0, 25000.0], [0, 30000.0], [-10000.0, 10000.0]])\n', (1783, 1841), True, 'import numpy as np\n'), ((1831, 1917), 'yt.load_uniform_grid', 'yt.load_uniform_grid', (['data', 'arr_rho.shape'], {'length_unit': '"""km"""', 'bbox': 'bbox', 'nprocs': '(128)'}), "(data, arr_rho.shape, length_unit='km', bbox=bbox,\n nprocs=128)\n", (1851, 1917), False, 'import yt\n'), ((3394, 3563), 'yt.SlicePlot', 'yt.SlicePlot', (['ds', '"""z"""', '[var_name]'], {'center': '[0.0, y_limit / 2 + shift_factor, 0]', 'width': "((800000000.0, 'cm'), (y_limit * 100000.0, 'cm'))", 'origin': "(0, 0, 'domain')"}), "(ds, 'z', [var_name], center=[0.0, y_limit / 2 + shift_factor, \n 0], width=((800000000.0, 'cm'), (y_limit * 100000.0, 'cm')), origin=(0,\n 0, 'domain'))\n", (3406, 3563), False, 'import yt\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from deepneuro.utilities.conversion import read_image_files
def create_mosaic(input_volume, output_filepath=None, label_volume=None, generate_outline=True, mask_value=0, step=1, dim=2, cols=8, label_buffer=5, rotate_90=3, flip=True):
"""This creates a mosaic of 2D images from a 3D Volume.
Parameters
----------
input_volume : TYPE
Any neuroimaging file with a filetype supported by qtim_tools, or existing numpy array.
output_filepath : None, optional
Where to save your output, in a filetype supported by matplotlib (e.g. .png). If
label_volume : None, optional
Whether to create your mosaic with an attached label filepath / numpy array. Will not perform volume transforms from header (yet)
generate_outline : bool, optional
If True, will generate outlines for label_volumes, instead of filled-in areas. Default is True.
mask_value : int, optional
Background value for label volumes. Default is 0.
step : int, optional
Will generate an image for every [step] slice. Default is 1.
dim : int, optional
Mosaic images will be sliced along this dimension. Default is 2, which often corresponds to axial.
cols : int, optional
How many columns in your output mosaic. Rows will be determined automatically. Default is 8.
label_buffer : int, optional
Images more than [label_buffer] slices away from a slice containing a label pixel will note be included. Default is 5.
rotate_90 : int, optional
If the output mosaic is incorrectly rotated, you may rotate clockwise [rotate_90] times. Default is 3.
flip : bool, optional
If the output is incorrectly flipped, you may set to True to flip the data. Default is True.
No Longer Returned
------------------
Returns
-------
output_array: N+1 or N-dimensional array
The generated mosaic array.
"""
image_numpy = read_image_files(input_volume)
if step is None:
step = 1
if label_volume is not None:
label_numpy = read_image_files(label_volume)
if generate_outline:
label_numpy = generate_label_outlines(label_numpy, dim, mask_value)
# This is fun in a wacky way, but could probably be done more concisely and effeciently.
mosaic_selections = []
for i in range(label_numpy.shape[dim]):
label_slice = np.squeeze(label_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]])
if np.sum(label_slice) != 0:
mosaic_selections += list(range(i - label_buffer, i + label_buffer))
mosaic_selections = np.unique(mosaic_selections)
mosaic_selections = mosaic_selections[mosaic_selections >= 0]
mosaic_selections = mosaic_selections[mosaic_selections <= image_numpy.shape[dim]]
mosaic_selections = mosaic_selections[::step]
color_range_image = [np.min(image_numpy), np.max(image_numpy)]
color_range_label = [np.min(label_numpy), np.max(label_numpy)]
# One day, specify rotations by affine matrix.
# Is test slice necessary? Operate directly on shape if possible.
test_slice = np.rot90(np.squeeze(image_numpy[[slice(None) if k != dim else slice(0, 1) for k in range(3)]]), rotate_90)
slice_width = test_slice.shape[1]
slice_height = test_slice.shape[0]
mosaic_image_numpy = np.zeros((int(slice_height * np.ceil(float(len(mosaic_selections)) / float(cols))), int(test_slice.shape[1] * cols)), dtype=float)
mosaic_label_numpy = np.zeros_like(mosaic_image_numpy)
row_index = 0
col_index = 0
for i in mosaic_selections:
image_slice = np.rot90(np.squeeze(image_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]]), rotate_90)
label_slice = np.rot90(np.squeeze(label_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]]), rotate_90)
# Again, specify from affine matrix if possible.
if flip:
image_slice = np.fliplr(image_slice)
label_slice = np.fliplr(label_slice)
if image_slice.size > 0:
mosaic_image_numpy[int(row_index):int(row_index + slice_height), int(col_index):int(col_index + slice_width)] = image_slice
mosaic_label_numpy[int(row_index):int(row_index + slice_height), int(col_index):int(col_index + slice_width)] = label_slice
if col_index == mosaic_image_numpy.shape[1] - slice_width:
col_index = 0
row_index += slice_height
else:
col_index += slice_width
mosaic_label_numpy = np.ma.masked_where(mosaic_label_numpy == 0, mosaic_label_numpy)
if output_filepath is not None:
plt.figure(figsize=(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.shape[1] / 100), dpi=100, frameon=False)
plt.margins(0, 0)
plt.gca().set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.imshow(mosaic_image_numpy, 'gray', vmin=color_range_image[0], vmax=color_range_image[1], interpolation='none')
plt.imshow(mosaic_label_numpy, 'jet', vmin=color_range_label[0], vmax=color_range_label[1], interpolation='none')
plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0.0, dpi=1000)
plt.clf()
plt.close()
return mosaic_image_numpy
else:
color_range_image = [np.min(image_numpy), np.max(image_numpy)]
test_slice = np.rot90(np.squeeze(image_numpy[[slice(None) if k != dim else slice(0, 1) for k in range(3)]]), rotate_90)
slice_width = test_slice.shape[1]
slice_height = test_slice.shape[0]
mosaic_selections = np.arange(image_numpy.shape[dim])[::step]
mosaic_image_numpy = np.zeros((int(slice_height * np.ceil(float(len(mosaic_selections)) / float(cols))), int(test_slice.shape[1] * cols)), dtype=float)
row_index = 0
col_index = 0
for i in mosaic_selections:
image_slice = np.squeeze(image_numpy[[slice(None) if k != dim else slice(i, i + 1) for k in range(3)]])
image_slice = np.rot90(image_slice, rotate_90)
if flip:
image_slice = np.fliplr(image_slice)
mosaic_image_numpy[int(row_index):int(row_index + slice_height), int(col_index):int(col_index + slice_width)] = image_slice
if col_index == mosaic_image_numpy.shape[1] - slice_width:
col_index = 0
row_index += slice_height
else:
col_index += slice_width
if output_filepath is not None:
plt.figure(figsize=(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.shape[1] / 100), dpi=100, frameon=False)
plt.margins(0, 0)
plt.gca().set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.imshow(mosaic_image_numpy, 'gray', vmin=color_range_image[0], vmax=color_range_image[1], interpolation='none')
plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0.0, dpi=500)
plt.clf()
plt.close()
return mosaic_image_numpy
def generate_label_outlines(label_numpy, dim=2, mask_value=0):
"""
Assumes labels are > 0 and integers.
Parameters
----------
input_volume: N-dimensional array
The volume to be queried.
mask_value: int or float
Islands composed of "mask_value" will be ignored.
return_split: bool
Whether to a return a stacked output of equal-size binary arrays for each island,
or to return one array with differently-labeled islands for each output.
truncate: bool
Whether or not to truncate the output. Irrelevant if return_split is False
truncate_padding: int
How many voxels of padding to leave when truncating.
output_filepath: str
If return_split is False, output will be saved to this file. If return_split
is True, output will be save to this file with the suffix "_[#]" for island
number
Returns
-------
output_array: N+1 or N-dimensional array
Output array(s) depending on return_split
"""
edges_kernel = np.zeros((3, 3, 3), dtype=float)
edges_kernel[1, 1, 1] = 4
if dim != 2:
edges_kernel[1, 1, 0] = -1
edges_kernel[1, 1, 2] = -1
if dim != 1:
edges_kernel[1, 0, 1] = -1
edges_kernel[1, 2, 1] = -1
if dim != 0:
edges_kernel[0, 1, 1] = -1
edges_kernel[2, 1, 1] = -1
outline_label_numpy = np.zeros_like(label_numpy, dtype=float)
for label_number in np.unique(label_numpy):
if label_number != mask_value:
sublabel_numpy = np.copy(label_numpy)
sublabel_numpy[sublabel_numpy != label_number] = 0
edge_image = signal.convolve(sublabel_numpy, edges_kernel, mode='same').astype(int)
edge_image[sublabel_numpy != label_number] = 0
edge_image[edge_image != 0] = label_number
outline_label_numpy += edge_image.astype(float)
return outline_label_numpy
if __name__ == '__main__':
pass | [
"numpy.sum",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.figure",
"numpy.rot90",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.unique",
"numpy.zeros_like",
"deepneuro.utilities.conversion.read_image_files",
"numpy.copy",
"matplotlib.pyplot.imshow",
"matplotlib.... | [((2032, 2062), 'deepneuro.utilities.conversion.read_image_files', 'read_image_files', (['input_volume'], {}), '(input_volume)\n', (2048, 2062), False, 'from deepneuro.utilities.conversion import read_image_files\n'), ((8689, 8721), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {'dtype': 'float'}), '((3, 3, 3), dtype=float)\n', (8697, 8721), True, 'import numpy as np\n'), ((9047, 9086), 'numpy.zeros_like', 'np.zeros_like', (['label_numpy'], {'dtype': 'float'}), '(label_numpy, dtype=float)\n', (9060, 9086), True, 'import numpy as np\n'), ((9112, 9134), 'numpy.unique', 'np.unique', (['label_numpy'], {}), '(label_numpy)\n', (9121, 9134), True, 'import numpy as np\n'), ((2158, 2188), 'deepneuro.utilities.conversion.read_image_files', 'read_image_files', (['label_volume'], {}), '(label_volume)\n', (2174, 2188), False, 'from deepneuro.utilities.conversion import read_image_files\n'), ((2746, 2774), 'numpy.unique', 'np.unique', (['mosaic_selections'], {}), '(mosaic_selections)\n', (2755, 2774), True, 'import numpy as np\n'), ((3666, 3699), 'numpy.zeros_like', 'np.zeros_like', (['mosaic_image_numpy'], {}), '(mosaic_image_numpy)\n', (3679, 3699), True, 'import numpy as np\n'), ((4805, 4868), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(mosaic_label_numpy == 0)', 'mosaic_label_numpy'], {}), '(mosaic_label_numpy == 0, mosaic_label_numpy)\n', (4823, 4868), True, 'import numpy as np\n'), ((3020, 3039), 'numpy.min', 'np.min', (['image_numpy'], {}), '(image_numpy)\n', (3026, 3039), True, 'import numpy as np\n'), ((3041, 3060), 'numpy.max', 'np.max', (['image_numpy'], {}), '(image_numpy)\n', (3047, 3060), True, 'import numpy as np\n'), ((3091, 3110), 'numpy.min', 'np.min', (['label_numpy'], {}), '(label_numpy)\n', (3097, 3110), True, 'import numpy as np\n'), ((3112, 3131), 'numpy.max', 'np.max', (['label_numpy'], {}), '(label_numpy)\n', (3118, 3131), True, 'import numpy as np\n'), ((4922, 5041), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.shape[1] / 100)', 'dpi': '(100)', 'frameon': '(False)'}), '(figsize=(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.\n shape[1] / 100), dpi=100, frameon=False)\n', (4932, 5041), True, 'import matplotlib.pyplot as plt\n'), ((5049, 5066), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {}), '(0, 0)\n', (5060, 5066), True, 'import matplotlib.pyplot as plt\n'), ((5246, 5365), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mosaic_image_numpy', '"""gray"""'], {'vmin': 'color_range_image[0]', 'vmax': 'color_range_image[1]', 'interpolation': '"""none"""'}), "(mosaic_image_numpy, 'gray', vmin=color_range_image[0], vmax=\n color_range_image[1], interpolation='none')\n", (5256, 5365), True, 'import matplotlib.pyplot as plt\n'), ((5373, 5491), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mosaic_label_numpy', '"""jet"""'], {'vmin': 'color_range_label[0]', 'vmax': 'color_range_label[1]', 'interpolation': '"""none"""'}), "(mosaic_label_numpy, 'jet', vmin=color_range_label[0], vmax=\n color_range_label[1], interpolation='none')\n", (5383, 5491), True, 'import matplotlib.pyplot as plt\n'), ((5512, 5587), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filepath'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.0)', 'dpi': '(1000)'}), "(output_filepath, bbox_inches='tight', pad_inches=0.0, dpi=1000)\n", (5523, 5587), True, 'import matplotlib.pyplot as plt\n'), ((5600, 5609), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5607, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5622, 5633), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5631, 5633), True, 'import matplotlib.pyplot as plt\n'), ((5710, 5729), 'numpy.min', 'np.min', (['image_numpy'], {}), '(image_numpy)\n', (5716, 5729), True, 'import numpy as np\n'), ((5731, 5750), 'numpy.max', 'np.max', (['image_numpy'], {}), '(image_numpy)\n', (5737, 5750), True, 'import numpy as np\n'), ((5995, 6028), 'numpy.arange', 'np.arange', (['image_numpy.shape[dim]'], {}), '(image_numpy.shape[dim])\n', (6004, 6028), True, 'import numpy as np\n'), ((6422, 6454), 'numpy.rot90', 'np.rot90', (['image_slice', 'rotate_90'], {}), '(image_slice, rotate_90)\n', (6430, 6454), True, 'import numpy as np\n'), ((6936, 7055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.shape[1] / 100)', 'dpi': '(100)', 'frameon': '(False)'}), '(figsize=(mosaic_image_numpy.shape[0] / 100, mosaic_image_numpy.\n shape[1] / 100), dpi=100, frameon=False)\n', (6946, 7055), True, 'import matplotlib.pyplot as plt\n'), ((7063, 7080), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {}), '(0, 0)\n', (7074, 7080), True, 'import matplotlib.pyplot as plt\n'), ((7260, 7379), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mosaic_image_numpy', '"""gray"""'], {'vmin': 'color_range_image[0]', 'vmax': 'color_range_image[1]', 'interpolation': '"""none"""'}), "(mosaic_image_numpy, 'gray', vmin=color_range_image[0], vmax=\n color_range_image[1], interpolation='none')\n", (7270, 7379), True, 'import matplotlib.pyplot as plt\n'), ((7388, 7462), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filepath'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.0)', 'dpi': '(500)'}), "(output_filepath, bbox_inches='tight', pad_inches=0.0, dpi=500)\n", (7399, 7462), True, 'import matplotlib.pyplot as plt\n'), ((7476, 7485), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7483, 7485), True, 'import matplotlib.pyplot as plt\n'), ((7498, 7509), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7507, 7509), True, 'import matplotlib.pyplot as plt\n'), ((9204, 9224), 'numpy.copy', 'np.copy', (['label_numpy'], {}), '(label_numpy)\n', (9211, 9224), True, 'import numpy as np\n'), ((2607, 2626), 'numpy.sum', 'np.sum', (['label_slice'], {}), '(label_slice)\n', (2613, 2626), True, 'import numpy as np\n'), ((4177, 4199), 'numpy.fliplr', 'np.fliplr', (['image_slice'], {}), '(image_slice)\n', (4186, 4199), True, 'import numpy as np\n'), ((4230, 4252), 'numpy.fliplr', 'np.fliplr', (['label_slice'], {}), '(label_slice)\n', (4239, 4252), True, 'import numpy as np\n'), ((5150, 5167), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (5165, 5167), True, 'import matplotlib.pyplot as plt\n'), ((5215, 5232), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (5230, 5232), True, 'import matplotlib.pyplot as plt\n'), ((6519, 6541), 'numpy.fliplr', 'np.fliplr', (['image_slice'], {}), '(image_slice)\n', (6528, 6541), True, 'import numpy as np\n'), ((7164, 7181), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (7179, 7181), True, 'import matplotlib.pyplot as plt\n'), ((7229, 7246), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (7244, 7246), True, 'import matplotlib.pyplot as plt\n'), ((5079, 5088), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5086, 5088), True, 'import matplotlib.pyplot as plt\n'), ((7093, 7102), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7100, 7102), True, 'import matplotlib.pyplot as plt\n'), ((9313, 9371), 'scipy.signal.convolve', 'signal.convolve', (['sublabel_numpy', 'edges_kernel'], {'mode': '"""same"""'}), "(sublabel_numpy, edges_kernel, mode='same')\n", (9328, 9371), False, 'from scipy import signal\n'), ((5116, 5125), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5123, 5125), True, 'import matplotlib.pyplot as plt\n'), ((5181, 5190), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5188, 5190), True, 'import matplotlib.pyplot as plt\n'), ((7130, 7139), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7137, 7139), True, 'import matplotlib.pyplot as plt\n'), ((7195, 7204), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7202, 7204), True, 'import matplotlib.pyplot as plt\n')] |
import pickle, sys, time
import numpy as np
from scipy.special import logsumexp
def add_block(b,envelope):
'''
Add single block to row-based envelope
'''
(sx,sy,ex,ey) = b
for i in range(sx,ex):
this_min = sy
this_max = ey
if i < len(envelope):
if this_min < envelope[i,0] or envelope[i,0] < 0:
envelope[i,0] = this_min
if this_max > envelope[i,1] or envelope[i,1] < 0:
envelope[i,1] = this_max
def check_envelope(envelope,U,V):
check_greater = all(envelope[:,1]>envelope[:,0])
check_overlap = all(envelope[:-1,1]-envelope[1:,0])
check_length = len(envelope) == U+2
check_range = all(envelope[:,1]<=V)
return(check_greater and check_overlap and check_length and check_range)
def get_alignment_columns(alignment):
# extract column type and read sequence indices
x_index=-1
y_index=-1
alignment_col = []
for (x,y) in alignment.T:
if (x != '-'):
x_index += 1
if (y != '-'):
y_index += 1
if (x == '-'):
label = 'i'
elif (y == '-'):
label = 'd'
else:
label = 'm'
alignment_col.append((label, x_index, y_index))
return(alignment_col)
def build_envelope(y1, y2, alignment_col, sequence_to_signal1, sequence_to_signal2, padding=150):
U = len(y1)
V = len(y2)
# get [sequence] to [signal range] mapping
sequence_to_signal_range1 = []
for i,v in enumerate(sequence_to_signal1[:-1]):
sequence_to_signal_range1.append([sequence_to_signal1[i],sequence_to_signal1[i+1]])
sequence_to_signal_range1.append([sequence_to_signal1[-1],U])
sequence_to_signal_range2 = []
for i,v in enumerate(sequence_to_signal2[:-1]):
sequence_to_signal_range2.append([sequence_to_signal2[i],sequence_to_signal2[i+1]])
sequence_to_signal_range2.append([sequence_to_signal2[-1],V])
# build alignment envelope
alignment_envelope = np.zeros(shape=(U,2),dtype=int)-1
for (i,tup) in enumerate(alignment_col):
(label, seq1, seq2) = tup
block = (int(sequence_to_signal_range1[max(seq1, 0)][0]), int(sequence_to_signal_range2[max(seq2, 0)][0]),
int(sequence_to_signal_range1[max(seq1, 0)][1]), int(sequence_to_signal_range2[max(seq2, 0)][1])
)
add_block(block, alignment_envelope)
# add a little padding to ensure some overlap
for i in range(len(alignment_envelope)):
alignment_envelope[i,0] = max(0,alignment_envelope[i,0]-padding)
alignment_envelope[i,1] = min(V,alignment_envelope[i,1]+padding)
# try and fix any problems
prev_end = 0
for i in range(len(alignment_envelope)):
if alignment_envelope[i,0] > alignment_envelope[i,1]:
alignment_envelope[i,0] = 0
# ensure some overlap
if alignment_envelope[i,0] > prev_end:
alignment_envelope[i,0] = prev_end
prev_end = alignment_envelope[i,1]
return(alignment_envelope)
def offset_envelope(full_envelope, subset):
(u1,u2,v1,v2) = subset
subset_envelope = np.copy(full_envelope[u1:u2])
subset_envelope[:,0] = subset_envelope[:,0] - v1
subset_envelope[:,1] = subset_envelope[:,1] - v1
return(subset_envelope)
def pad_envelope(envelope, U, V):
new_envelope = np.concatenate((envelope, [envelope[-1], envelope[-1]]))
for i,_ in enumerate(new_envelope):
if new_envelope[i,1] == V-1:
new_envelope[i,1] = V
new_envelope[U] = new_envelope[U-1]
new_envelope[U+1] = new_envelope[U-1]
return(new_envelope)
| [
"numpy.zeros",
"numpy.concatenate",
"numpy.copy"
] | [((3152, 3181), 'numpy.copy', 'np.copy', (['full_envelope[u1:u2]'], {}), '(full_envelope[u1:u2])\n', (3159, 3181), True, 'import numpy as np\n'), ((3370, 3426), 'numpy.concatenate', 'np.concatenate', (['(envelope, [envelope[-1], envelope[-1]])'], {}), '((envelope, [envelope[-1], envelope[-1]]))\n', (3384, 3426), True, 'import numpy as np\n'), ((2011, 2044), 'numpy.zeros', 'np.zeros', ([], {'shape': '(U, 2)', 'dtype': 'int'}), '(shape=(U, 2), dtype=int)\n', (2019, 2044), True, 'import numpy as np\n')] |
from amuse.test.amusetest import TestWithMPI
import os
import sys
import numpy
import math
from amuse.community.phantom.interface import PhantomInterface, Phantom
from amuse.datamodel import Particles
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.ic import plummer
from amuse.ic.plummer import new_plummer_model
from amuse.test.suite.codes_tests.gd_tests import (
_TestGravitationalDynamicsInterface,
)
try:
from matplotlib import pyplot
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
class TestPhantomInterface(TestWithMPI):
def gravity_code_interface(self):
return PhantomInterface
def reference_includes(self):
return "Price"
def starting_particle_index(self):
return 1
def test_initialise(self):
interface = self.gravity_code_interface()
instance = self.new_instance_of_an_optional_code(interface)
instance.initialize_code()
instance.stop()
def test_literature_reference(self):
interface = self.gravity_code_interface()
instance = self.new_instance_of_an_optional_code(interface)
instance.initialize_code()
reference_string = self.reference_includes()
self.assertTrue(
reference_string in instance.all_literature_references_string()
)
instance.stop()
def test_add_and_retrieve_particles(self):
interface = self.gravity_code_interface()
instance = self.new_instance_of_an_optional_code(interface)
instance.initialize_code()
# Phantom won't work with fewer than 7 particles!
n = 7
values = [1.0 * i for i in range(1, n)]
instance.new_particle(
values,
values,
values,
values,
values,
values,
values,
)
error = instance.commit_particles()
self.assertEqual(error, 0)
retrieved_state = instance.get_state(self.starting_particle_index())
self.assertEqual(1.0, retrieved_state['mass'])
retrieved_state = instance.get_state(
self.starting_particle_index()+n-2
)
instance.cleanup_code()
# For any particle other than a sink, Phantom has one fixed mass!
self.assertEqual(1.0, retrieved_state['mass'])
instance.stop()
def test_add_and_retrieve_sph_particles(self):
interface = self.gravity_code_interface()
instance = self.new_instance_of_an_optional_code(interface)
instance.initialize_code()
# Phantom won't work with fewer than 7 particles!
n = 100
values = [1.0 * i for i in range(1, n)]
instance.new_sph_particle(
values,
values,
values,
values,
values,
values,
values,
values,
)
error = instance.commit_particles()
self.assertEqual(error, 0)
retrieved_state = instance.get_state(self.starting_particle_index())
self.assertEqual(1.0, retrieved_state['mass'])
retrieved_state = instance.get_state(
self.starting_particle_index()+n-2
)
instance.cleanup_code()
# For any particle other than a sink, Phantom has one fixed mass!
self.assertEqual(1.0, retrieved_state['mass'])
instance.stop()
def test_parameters(self):
interface = self.gravity_code_interface()
# instance = self.new_instance_of_an_optional_code(interface)
instance = interface(redirection="none")
instance.initialize_code()
gamma, error = instance.get_gamma()
self.assertEqual(0, error)
self.assertEqual(1., gamma)
ieos, error = instance.get_ieos()
self.assertEqual(0, error)
self.assertEqual(1, ieos)
class TestPhantom(TestWithMPI):
def test_initialise(self):
instance = Phantom()
instance.stop()
def test_add_gasparticles(self):
n_particles = 10
instance = Phantom()
gas = Particles(n_particles)
gas.mass = 1 | nbody_system.mass
gas.x = numpy.arange(n_particles) | nbody_system.length
gas.y = numpy.arange(n_particles) | nbody_system.length
gas.z = 0 | nbody_system.length
gas.velocity = [0, 0, 0] | nbody_system.speed
gas.u = 0 | nbody_system.speed**2
instance.gas_particles.add_particles(gas)
self.assertEqual(10, len(instance.gas_particles))
instance.stop()
| [
"amuse.community.phantom.interface.Phantom",
"numpy.arange",
"amuse.datamodel.Particles"
] | [((3984, 3993), 'amuse.community.phantom.interface.Phantom', 'Phantom', ([], {}), '()\n', (3991, 3993), False, 'from amuse.community.phantom.interface import PhantomInterface, Phantom\n'), ((4100, 4109), 'amuse.community.phantom.interface.Phantom', 'Phantom', ([], {}), '()\n', (4107, 4109), False, 'from amuse.community.phantom.interface import PhantomInterface, Phantom\n'), ((4124, 4146), 'amuse.datamodel.Particles', 'Particles', (['n_particles'], {}), '(n_particles)\n', (4133, 4146), False, 'from amuse.datamodel import Particles\n'), ((4204, 4229), 'numpy.arange', 'numpy.arange', (['n_particles'], {}), '(n_particles)\n', (4216, 4229), False, 'import numpy\n'), ((4268, 4293), 'numpy.arange', 'numpy.arange', (['n_particles'], {}), '(n_particles)\n', (4280, 4293), False, 'import numpy\n')] |
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import LSTMStateTuple
from memory import Memory
import utility
import os
import numpy as np
class Dual_DNC:
def __init__(self, controller_class, input_size1, input_size2, output_size,
memory_words_num = 256, memory_word_size = 64, memory_read_heads = 4,
batch_size = 1, hidden_controller_dim=128,
use_mem=True, decoder_mode=False, emb_size=64,
write_protect=False, dual_emb=True, share_mem=False,
use_teacher=False, attend_dim=0, persist_mode=False):
"""
constructs a complete DNC architecture as described in the DNC paper
http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html
Parameters:
-----------
controller_class: BaseController
a concrete implementation of the BaseController class
input_size: int
the size of the input vector
output_size: int
the size of the output vector
max_sequence_length: int
the maximum length of an input sequence
memory_words_num: int
the number of words that can be stored in memory
memory_word_size: int
the size of an individual word in memory
memory_read_heads: int
the number of read heads in the memory
batch_size: int
the size of the data batch
"""
saved_args = locals()
print("saved_args is", saved_args)
self.input_size1 = input_size1
self.input_size2 = input_size2
self.output_size = output_size
self.words_num = memory_words_num
self.word_size = memory_word_size
self.read_heads = memory_read_heads
self.batch_size = batch_size
self.unpacked_input_data1 = None
self.unpacked_input_data2 = None
self.packed_output = None
self.packed_memory_view = None
self.decoder_mode = decoder_mode
self.decoder_point = tf.placeholder(tf.int32, name='decoder_point')#
self.encode1_point = tf.placeholder(tf.int32, name='encode1_point')#
self.encode2_point = tf.placeholder(tf.int32, name='encode2_point')
self.emb_size = emb_size
self.use_mem=use_mem
self.share_mem=share_mem
self.use_teacher = use_teacher
self.attend_dim = attend_dim
self.hidden_controller_dim = hidden_controller_dim
self.teacher_force = tf.placeholder(tf.bool,[None], name='teacher')
self.persist_mode = persist_mode
self.clear_mem = tf.placeholder(tf.bool, None, name='clear_mem')
if self.attend_dim>0:
self.W_a1 = tf.get_variable('W_a1', [hidden_controller_dim, self.attend_dim],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.U_a1 = tf.get_variable('U_a1', [hidden_controller_dim, self.attend_dim],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.v_a1 = tf.get_variable('v_a1', [self.attend_dim],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.W_a2 = tf.get_variable('W_a2', [hidden_controller_dim, self.attend_dim],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.U_a2 = tf.get_variable('U_a2', [hidden_controller_dim, self.attend_dim],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.v_a2 = tf.get_variable('v_a2', [self.attend_dim],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
# DNC (or NTM) should be structurized into 2 main modules:
# all the graph is setup inside these twos:
self.W_emb1_encoder = tf.get_variable('embe1_w', [self.input_size1, self.emb_size],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.W_emb2_encoder = tf.get_variable('embe2_w', [self.input_size2, self.emb_size],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.W_emb_decoder = tf.get_variable('embd_w', [self.output_size, self.emb_size],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
with tf.variable_scope('input1_scope'):
self.memory1 = Memory(self.words_num, self.word_size, self.read_heads, self.batch_size)
self.controller1 = controller_class(self.emb_size, self.output_size, self.read_heads,
self.word_size, self.batch_size, use_mem, hidden_dim=hidden_controller_dim)
with tf.variable_scope('input2_scope'):
if not share_mem:
self.memory2 = Memory(self.words_num, self.word_size, self.read_heads, self.batch_size)
else:
self.memory2=self.memory1
self.controller2 = controller_class(self.emb_size, self.output_size, self.read_heads,
self.word_size, self.batch_size, use_mem, hidden_dim=hidden_controller_dim)
with tf.variable_scope('output_scope'):
if self.attend_dim==0:
self.controller3 = controller_class(self.emb_size, self.output_size, self.read_heads,
self.word_size, self.batch_size, use_mem, is_two_mem=2,
hidden_dim=hidden_controller_dim*2)
else:
self.controller3 = controller_class(self.emb_size+hidden_controller_dim * 2, self.output_size, self.read_heads,
self.word_size, self.batch_size, use_mem, is_two_mem=2,
hidden_dim=hidden_controller_dim * 2)
self.write_protect = write_protect
# input data placeholders
self.input_data1 = tf.placeholder(tf.float32, [batch_size, None, input_size1], name='input')
self.input_data2 = tf.placeholder(tf.float32, [batch_size, None, input_size2], name='input')
self.target_output = tf.placeholder(tf.float32, [batch_size, None, output_size], name='targets')
self.mask = tf.placeholder(tf.bool, [batch_size, None], name='mask')
self.sequence_length = tf.placeholder(tf.int32, name='sequence_length')# variant length?
self.dual_emb = dual_emb
if persist_mode:
self.cur_c = []
self.assign_op_cur_c = []
self.cur_h = []
self.assign_op_cur_h = []
self.cur_mem_content = []
self.assign_op_cur_mem = []
self.cur_u = []
self.assign_op_cur_u = []
self.cur_p = []
self.assign_op_cur_p = []
self.cur_L = []
self.assign_op_cur_L = []
self.cur_ww = []
self.assign_op_cur_ww = []
self.cur_rw = []
self.assign_op_cur_rw = []
self.cur_rv = []
self.assign_op_cur_rv = []
for i in range(2):
self.cur_c += [tf.get_variable('cur_c{}'.format(i), [self.batch_size, hidden_controller_dim],
trainable=False)]
self.assign_op_cur_c += [self.cur_c[i].assign(np.ones([self.batch_size, hidden_controller_dim]) * 1e-6)]
self.cur_h += [tf.get_variable('cur_h{}'.format(i), [self.batch_size, hidden_controller_dim],
trainable=False)]
self.assign_op_cur_h += [self.cur_h[i].assign(np.ones([self.batch_size, hidden_controller_dim]) * 1e-6)]
self.cur_mem_content+=[tf.get_variable('cur_mc{}'.format(i), [self.batch_size, self.words_num, self.word_size],
trainable=False)]
self.assign_op_cur_mem+=[self.cur_mem_content[i].assign(
np.ones([self.batch_size, self.words_num, self.word_size]) * 1e-6)]
self.cur_u += [tf.get_variable('cur_u{}'.format(i), [self.batch_size, self.words_num],
trainable=False)] # initial usage vector u
self.assign_op_cur_u += [self.cur_u[i].assign(np.zeros([self.batch_size, self.words_num]))]
self.cur_p += [tf.get_variable('cur_p{}'.format(i), [self.batch_size, self.words_num],
trainable=False)] # initial precedence vector p
self.assign_op_cur_p += [self.cur_p[i].assign(np.zeros([self.batch_size, self.words_num]))]
self.cur_L += [tf.get_variable('cur_L{}'.format(i), [self.batch_size, self.words_num, self.words_num],
trainable=False)] # initial link matrix L
self.assign_op_cur_L += [self.cur_L[i].assign(np.ones([self.batch_size, self.words_num, self.words_num]) * 1e-6)]
self.cur_ww += [tf.get_variable('cur_ww{}'.format(i), [self.batch_size, self.words_num],
trainable=False)] # initial write weighting
self.assign_op_cur_ww += [self.cur_ww[i].assign(np.ones([self.batch_size, self.words_num]) * 1e-6)]
self.cur_rw += [tf.get_variable('cur_rw{}'.format(i), [self.batch_size, self.words_num, self.read_heads],
trainable=False)] # initial read weightings
self.assign_op_cur_rw += [self.cur_rw[i].assign(np.ones([self.batch_size, self.words_num, self.read_heads]) * 1e-6)]
self.cur_rv += [tf.get_variable('cur_rv{}'.format(i), [self.batch_size, self.word_size, self.read_heads],
trainable=False)] # initial read vectors
self.assign_op_cur_rv += [self.cur_rv[i].assign(np.ones([self.batch_size, self.word_size, self.read_heads]) * 1e-6)]
self.build_graph()
# The nature of DNC is to process data by step and remmeber data at each time step when necessary
# If input has sequence format --> suitable with RNN core controller --> each time step in RNN equals 1 time step in DNC
# or just feed input to MLP --> each feed is 1 time step
def _step_op(self, time, step1, step2, memory_state, controller_state=None, controller_hiddens=None):
"""
performs a step operation on the input step data
Parameters:
----------
step: Tensor (batch_size, input_size)
memory_state: Tuple
a tuple of current memory parameters
controller_state: Tuple
the state of the controller if it's recurrent
Returns: Tuple
output: Tensor (batch_size, output_size)
memory_view: dict
"""
memory_state1 = memory_state[0]
memory_state2 = memory_state[1]
last_read_vectors1 = memory_state1[6] # read values from memory
last_read_vectors2 = memory_state2[6] # read values from memory
controller_state1 = controller_state[0]
controller_state2 = controller_state[1]
# controller state is the rnn cell state pass through each time step
def c1():
def c11():
return self.controller1.process_zero()
def c12():
return self.controller1.process_input(step1, last_read_vectors1, controller_state1)
pre_output1, interface1, nn_state1 = tf.cond(time<self.encode1_point, c11, c12)
def c13():
return self.controller2.process_zero()
def c14():
return self.controller2.process_input(step2, last_read_vectors2, controller_state2)
pre_output2, interface2, nn_state2 = tf.cond(time<self.encode2_point, c13, c14)
pre_output12 = pre_output1 + pre_output2
interface12 = (interface1, interface2)
nn_state12 = (nn_state1, nn_state2)
return pre_output12, interface12, nn_state12
def c2():
con_c1=controller_state1[0]
con_h1=controller_state1[1]
con_c2 = controller_state2[0]
con_h2 = controller_state2[1]
ncontroller_state = LSTMStateTuple(tf.concat([con_c1,con_c2],axis=-1), tf.concat([con_h1,con_h2],axis=-1))
nread_vec = tf.concat([last_read_vectors1, last_read_vectors2],axis=1)
step = step1
if controller_hiddens:
from_steps=[self.encode1_point, self.encode2_point]
v_a=[self.v_a1, self.v_a2]
U_a=[self.U_a1, self.U_a2]
W_a=[self.W_a1, self.W_a2]
for cci, controller_hiddens_ in enumerate(controller_hiddens):
values = controller_hiddens_.gather(tf.range(from_steps[cci], self.decoder_point))
encoder_outputs = \
tf.reshape(values, [self.batch_size, -1, self.hidden_controller_dim]) # bs x Lin x h
v = tf.tanh(
tf.reshape(tf.matmul(tf.reshape(encoder_outputs, [-1, self.hidden_controller_dim]), U_a[cci]),
[self.batch_size, -1, self.attend_dim])
+ tf.reshape(
tf.matmul(tf.reshape(controller_state[cci][0], [-1, self.hidden_controller_dim]), W_a[cci]),
[self.batch_size, 1, self.attend_dim])) # bs.Lin x h_att
v = tf.reshape(v, [-1, self.attend_dim])
eijs = tf.matmul(v, tf.expand_dims(v_a[cci], 1)) # bs.Lin x 1
eijs = tf.reshape(eijs, [self.batch_size, -1]) # bs x Lin
exps = tf.exp(eijs)
alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1]) # bs x Lin
att = tf.reduce_sum(encoder_outputs * tf.expand_dims(alphas, 2), 1) # bs x h x 1
att = tf.reshape(att, [self.batch_size, self.hidden_controller_dim]) # bs x h
step = tf.concat([step, att], axis=-1) # bs x (decoder_is + h)
pre_output, interface, nn_state = \
self.controller3.process_input(step,
nread_vec,
ncontroller_state)
#trick split than group
c_l, c_r = tf.split(nn_state[0],num_or_size_splits=2, axis=-1)
h_l, h_r = tf.split(nn_state[1], num_or_size_splits=2, axis=-1)
return pre_output, interface, (LSTMStateTuple(c_l,h_l), LSTMStateTuple(c_r, h_r))
pre_output, interface, nn_state = tf.cond(time>=self.decoder_point, c2, c1)
interface1 = interface[0]
interface2 = interface[1]
# memory_matrix isthe copy of memory for reading process later
# do the write first
def fn1():
def fn11():
return memory_state1[1], memory_state1[4], memory_state1[0], memory_state1[3], memory_state1[2]
def fn12():
return self.memory1.write(
memory_state1[0], memory_state1[1], memory_state1[5],
memory_state1[4], memory_state1[2], memory_state1[3],
interface1['write_key'],
interface1['write_strength'],
interface1['free_gates'],
interface1['allocation_gate'],
interface1['write_gate'],
interface1['write_vector'],
interface1['erase_vector']
)
def fn13():
return memory_state2[1], memory_state2[4], memory_state2[0], memory_state2[3], memory_state2[2]
def fn14():
return self.memory2.write(
memory_state2[0], memory_state2[1], memory_state2[5],
memory_state2[4], memory_state2[2], memory_state2[3],
interface2['write_key'],
interface2['write_strength'],
interface2['free_gates'],
interface2['allocation_gate'],
interface2['write_gate'],
interface2['write_vector'],
interface2['erase_vector'])
usage_vector1, write_weighting1, memory_matrix1, link_matrix1, precedence_vector1 = \
tf.cond(time<self.encode1_point, fn11, fn12)
usage_vector2, write_weighting2, memory_matrix2, link_matrix2, precedence_vector2 = \
tf.cond(time<self.encode2_point, fn13, fn14)
usage_vector12 = (usage_vector1, usage_vector2)
write_weighting12 = (write_weighting1, write_weighting2)
memory_matrix12 = (memory_matrix1, memory_matrix2)
link_matrix12 = (link_matrix1, link_matrix2)
precedence_vector12 = (precedence_vector1, precedence_vector2)
return usage_vector12, write_weighting12, memory_matrix12, link_matrix12, precedence_vector12
def fn2():
return (memory_state1[1],memory_state2[1]), \
(memory_state1[4], memory_state2[4]), \
(memory_state1[0], memory_state2[0]), \
(memory_state1[3], memory_state2[3]), \
(memory_state1[2], memory_state2[2])
if self.write_protect:
usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector\
= tf.cond(time>=self.decoder_point, fn2, fn1)
else:
usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector = fn1()
# then do the read, read after write because the write weight is needed to produce temporal linklage to guide the reading
def r11():
return self.memory1.read_zero()
def r12():
return self.memory1.read(
memory_matrix[0],
memory_state1[5],
interface1['read_keys'],
interface1['read_strengths'],
link_matrix[0],
interface1['read_modes'],
)
def r13():
return self.memory2.read_zero()
def r14():
return self.memory2.read(
memory_matrix[1],
memory_state2[5],
interface2['read_keys'],
interface2['read_strengths'],
link_matrix[1],
interface2['read_modes'],
)
read_weightings1, read_vectors1 = tf.cond(time<self.encode1_point, r11, r12)
read_weightings2, read_vectors2 = tf.cond(time<self.encode2_point, r13, r14)
return [
# report new memory state to be updated outside the condition branch
memory_matrix, #0
# neccesary for next step to compute memory stuffs
usage_vector, #1
precedence_vector, #2
link_matrix, #3
write_weighting, #4
(read_weightings1, read_weightings2), #5
(read_vectors1, read_vectors2), #6
# the final output of dnc
self.controller3.final_output(pre_output, tf.concat([read_vectors1, read_vectors2], axis=1)), #7
# the values public info to outside
(interface1['free_gates'], interface2['free_gates']), #8
(interface1['allocation_gate'], interface2['allocation_gate']), #9
(interface1['write_gate'],interface2['write_gate']), #10
# report new state of RNN if exists, neccesary for next step to compute inner controller stuff
nn_state[0][0] if nn_state[0] is not None else tf.zeros(1), #11
nn_state[0][1] if nn_state[0] is not None else tf.zeros(1), #12
nn_state[1][0] if nn_state[1] is not None else tf.zeros(1) , # 13
nn_state[1][1] if nn_state[1] is not None else tf.zeros(1) # 14
]
'''
THIS WRAPPER FOR ONE STEP OF COMPUTATION --> INTERFACE FOR SCAN/WHILE LOOP
'''
def _loop_body(self, time, memory_state, outputs, free_gates, allocation_gates, write_gates,
read_weightings, write_weightings, usage_vectors, controller_state,
outputs_cache, controller_hiddens):
"""
the body of the DNC sequence processing loop
Parameters:
----------
time: Tensor
outputs: TensorArray
memory_state: Tuple
free_gates: TensorArray
allocation_gates: TensorArray
write_gates: TensorArray
read_weightings: TensorArray,
write_weightings: TensorArray,
usage_vectors: TensorArray,
controller_state: Tuple
Returns: Tuple containing all updated arguments
"""
# dynamic tensor array input
def fn1():
return tf.matmul(self.unpacked_input_data1.read(time), self.W_emb1_encoder)
def fn2():
def fn2_1():
return self.target_output[:,time-1,:]
def fn2_2():
return tf.one_hot(tf.argmax(outputs_cache.read(time - 1), axis=-1), depth=self.output_size)
if self.use_teacher:
feed_value=tf.cond(self.teacher_force[time-1],fn2_1,fn2_2)
else:
feed_value=fn2_2()
if self.dual_emb:
return tf.matmul(feed_value, self.W_emb_decoder)
else:
return tf.matmul(feed_value, self.W_emb1_encoder)
def fn12():
return tf.matmul(self.unpacked_input_data2.read(time), self.W_emb2_encoder)
def fn22():
return tf.zeros([self.batch_size, self.emb_size]) #here for format consistent, not used
if self.decoder_mode:
step_input1 = tf.cond(time>=self.decoder_point, fn2, fn1)
step_input2 = tf.cond(time >= self.decoder_point, fn22, fn12)
else:
step_input1 = fn1()
step_input2 = fn12()
# compute one step of controller
if self.attend_dim>0:
output_list = self._step_op(time, step_input1, step_input2, memory_state, controller_state, controller_hiddens)
else:
output_list = self._step_op(time, step_input1, step_input2, memory_state, controller_state)
# update memory parameters
new_memory_state1=[]
new_memory_state2=[]
for obj in output_list[:7]:
new_memory_state1.append(obj[0])
new_memory_state2.append(obj[1])
new_memory_state = [tuple(new_memory_state1), tuple(new_memory_state2)]
new_controller_state = [LSTMStateTuple(output_list[11], output_list[12]),
LSTMStateTuple(output_list[13], output_list[14])] # hidden and state values
controller_hiddens = [controller_hiddens[0].write(time, output_list[11]),
controller_hiddens[1].write(time, output_list[13])]
outputs = outputs.write(time, output_list[7])# new output is updated
outputs_cache = outputs_cache.write(time, output_list[7])# new output is updated
# collecting memory view for the current step
free_gates2 = [free_gates[0].write(time, output_list[8][0]),free_gates[1].write(time, output_list[8][1])]
allocation_gates2 = [allocation_gates[0].write(time, output_list[9][0]),allocation_gates[1].write(time, output_list[9][1])]
write_gates2 = [write_gates[0].write(time, output_list[10][0]),write_gates[1].write(time, output_list[10][1])]
read_weightings2 = [read_weightings[0].write(time, output_list[5][0]),read_weightings[1].write(time, output_list[5][1])]
write_weightings2 =[write_weightings[0].write(time, output_list[4][0]),write_weightings[1].write(time, output_list[4][1])]
usage_vectors2 = [usage_vectors[0].write(time, output_list[1][0]),usage_vectors[1].write(time, output_list[1][1])]
# all variables have been updated should be return for next step reference
return (
time + 1, #0
new_memory_state, #1
outputs, #2
free_gates2,allocation_gates2, write_gates2, #3 4 5
read_weightings2, write_weightings2, usage_vectors2, #6 7 8
new_controller_state, #9
outputs_cache, #10
controller_hiddens #11
)
def build_graph(self):
"""
builds the computational graph that performs a step-by-step evaluation
of the input data batches
"""
# make dynamic time step length tensor
self.unpacked_input_data1 = utility.unpack_into_tensorarray(self.input_data1, 1, self.sequence_length)
self.unpacked_input_data2 = utility.unpack_into_tensorarray(self.input_data2, 1, self.sequence_length)
# want to store all time step values of these variables
outputs = tf.TensorArray(tf.float32, self.sequence_length)
outputs_cache = tf.TensorArray(tf.float32, self.sequence_length)
free_gates = [tf.TensorArray(tf.float32, self.sequence_length),tf.TensorArray(tf.float32, self.sequence_length)]
allocation_gates = [tf.TensorArray(tf.float32, self.sequence_length), tf.TensorArray(tf.float32, self.sequence_length)]
write_gates = [tf.TensorArray(tf.float32, self.sequence_length),tf.TensorArray(tf.float32, self.sequence_length)]
read_weightings = [tf.TensorArray(tf.float32, self.sequence_length),tf.TensorArray(tf.float32, self.sequence_length)]
write_weightings = [tf.TensorArray(tf.float32, self.sequence_length),tf.TensorArray(tf.float32, self.sequence_length)]
usage_vectors = [tf.TensorArray(tf.float32, self.sequence_length),tf.TensorArray(tf.float32, self.sequence_length)]
controller_hiddens = [tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False),
tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)]
# inital state for RNN controller
controller_state1 = self.controller1.get_state() if self.controller1.has_recurrent_nn else (tf.zeros(1), tf.zeros(1))
controller_state2 = self.controller2.get_state() if self.controller2.has_recurrent_nn else (tf.zeros(1), tf.zeros(1))
memory_state = [self.memory1.init_memory(), self.memory2.init_memory()]
if self.persist_mode:
def p1():
return memory_state, controller_state1, controller_state2
def p2():
tmp=[(self.cur_mem_content[0], self.cur_u[0], self.cur_p[0],
self.cur_L[0], self.cur_ww[0], self.cur_rw[0], self.cur_rv[0]),
(self.cur_mem_content[1], self.cur_u[1], self.cur_p[1],
self.cur_L[1], self.cur_ww[1], self.cur_rw[1], self.cur_rv[1])
]
if len(memory_state[0])>len(tmp[0]):
print('cache mode')
tmp[0] = (self.cur_mem_content[0], self.cur_u[0], self.cur_p[0],
self.cur_L[0], self.cur_ww[0], self.cur_rw[0], self.cur_rv[0],
memory_state[0][-2],memory_state[0][-1])
tmp[1] = (self.cur_mem_content[1], self.cur_u[1], self.cur_p[1],
self.cur_L[1], self.cur_ww[1], self.cur_rw[1], self.cur_rv[1],
memory_state[1][-2], memory_state[1][-1])
return tmp, \
LSTMStateTuple(self.cur_c[0], self.cur_h[0]),LSTMStateTuple(self.cur_c[1], self.cur_h[1])
memory_state, controller_state1, controller_state2=tf.cond(self.clear_mem, p1, p2)
if not isinstance(controller_state1, LSTMStateTuple):
controller_state1 = LSTMStateTuple(controller_state1[0], controller_state1[1])
if not isinstance(controller_state2, LSTMStateTuple):
controller_state2 = LSTMStateTuple(controller_state2[0], controller_state2[1])
controller_state=[controller_state1, controller_state2]
# final_results = None
with tf.variable_scope("sequence_loop"):
time = tf.constant(0, dtype=tf.int32)
# use while instead of scan --> suitable with dynamic time step
final_results = tf.while_loop(
cond=lambda time, *_: time < self.sequence_length,
body=self._loop_body,
loop_vars=(
time, memory_state, outputs,
free_gates, allocation_gates, write_gates,
read_weightings, write_weightings,
usage_vectors, controller_state, outputs_cache,controller_hiddens
), # do not need to provide intial values, the initial value lies in the variables themselves
parallel_iterations=1,
swap_memory=True,
)
dependencies = []
if self.controller1.has_recurrent_nn:
# tensor array of pair of hidden and state values of rnn
dependencies.append(self.controller1.update_state(final_results[9][0]))
if self.controller2.has_recurrent_nn:
# tensor array of pair of hidden and state values of rnn
dependencies.append(self.controller2.update_state(final_results[9][1]))
with tf.control_dependencies(dependencies):
# convert output tensor array to normal tensor
self.packed_output = utility.pack_into_tensor(final_results[2], axis=1)
self.packed_memory_view = {
'free_gates1': utility.pack_into_tensor(final_results[3][0], axis=1),
'free_gates2': utility.pack_into_tensor(final_results[3][1], axis=1),
'allocation_gates1': utility.pack_into_tensor(final_results[4][0], axis=1),
'allocation_gates2': utility.pack_into_tensor(final_results[4][1], axis=1),
'write_gates1': utility.pack_into_tensor(final_results[5][0], axis=1),
'write_gates2': utility.pack_into_tensor(final_results[5][1], axis=1),
'read_weightings1': utility.pack_into_tensor(final_results[6][0], axis=1),
'read_weightings2': utility.pack_into_tensor(final_results[6][1], axis=1),
'write_weightings1': utility.pack_into_tensor(final_results[7][0], axis=1),
'write_weightings2': utility.pack_into_tensor(final_results[7][1], axis=1),
'usage_vectors1': utility.pack_into_tensor(final_results[8][0], axis=1),
'usage_vectors2': utility.pack_into_tensor(final_results[8][1], axis=1),
}
def get_outputs(self):
"""
returns the graph nodes for the output and memory view
Returns: Tuple
outputs: Tensor (batch_size, time_steps, output_size)
memory_view: dict
"""
return self.packed_output, self.packed_memory_view
def assign_pretrain_emb1_encoder(self, sess, lookup_mat):
assign_op_W_emb_encoder = self.W_emb1_encoder.assign(lookup_mat)
sess.run([assign_op_W_emb_encoder])
def assign_pretrain_emb2_encoder(self, sess, lookup_mat):
assign_op_W_emb_encoder = self.W_emb2_encoder.assign(lookup_mat)
sess.run([assign_op_W_emb_encoder])
def assign_pretrain_emb_decoder(self, sess, lookup_mat):
assign_op_W_emb_decoder = self.W_emb_decoder.assign(lookup_mat)
sess.run([assign_op_W_emb_decoder])
def build_loss_function(self, optimizer=None,clip_s=10):
print('build loss....')
if optimizer is None:
optimizer = tf.train.AdamOptimizer()
output, _ = self.get_outputs()
prob = tf.nn.softmax(output, dim=-1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=tf.slice(self.target_output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]),
logits=tf.slice(output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]), dim=-1)
)
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_value(grad, -clip_s, clip_s), var)
apply_gradients = optimizer.apply_gradients(gradients)
return output, prob, loss, apply_gradients
def build_loss_function_multi_label(self, optimizer=None, clip_s=10):
print('build loss....')
if optimizer is None:
optimizer = tf.train.AdamOptimizer()
output, _ = self.get_outputs()
prob = tf.nn.sigmoid(output)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.slice(self.target_output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]),
logits=tf.slice(output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]))
)
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_value(grad, -clip_s, clip_s), var)
apply_gradients = optimizer.apply_gradients(gradients)
return output, prob, loss, apply_gradients
def build_loss_function_mask(self, optimizer=None, clip_s=10):
print('build loss mask....')
if optimizer is None:
optimizer = tf.train.AdamOptimizer()
output, _ = self.get_outputs()
prob = tf.nn.softmax(output, dim=-1)
score=tf.nn.softmax_cross_entropy_with_logits(
labels=self.target_output,
logits=output, dim=-1)
score_flatten=tf.reshape(score,[-1])
mask_flatten=tf.reshape(self.mask,[-1])
mask_score=tf.boolean_mask(score_flatten, mask_flatten)
loss = tf.reduce_mean(mask_score)
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_value(grad, -clip_s, clip_s), var)
apply_gradients = optimizer.apply_gradients(gradients)
return output, prob, loss, apply_gradients
def print_config(self):
return 'din_sout{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(self.use_mem,
self.decoder_mode,
self.write_protect,
self.words_num,
self.word_size,
self.share_mem,
self.use_teacher,
self.persist_mode,
self.attend_dim)
@staticmethod
def save(session, ckpts_dir, name):
"""
saves the current values of the model's parameters to a checkpoint
Parameters:
----------
session: tf.Session
the tensorflow session to save
ckpts_dir: string
the path to the checkpoints directories
name: string
the name of the checkpoint subdirectory
"""
checkpoint_dir = os.path.join(ckpts_dir, name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
tf.train.Saver(tf.trainable_variables()).save(session, os.path.join(checkpoint_dir, 'model.ckpt'))
@staticmethod
def restore(session, ckpts_dir, name):
"""
session: tf.Session
the tensorflow session to restore into
ckpts_dir: string
the path to the checkpoints directories
name: string
the name of the checkpoint subdirectory
"""
tf.train.Saver(tf.trainable_variables()).restore(session, os.path.join(ckpts_dir, name, 'model.ckpt'))
def clear_current_mem(self,sess):
if self.persist_mode:
for i in range(2):
sess.run([self.assign_op_cur_mem[i], self.assign_op_cur_u[i], self.assign_op_cur_p[i],
self.assign_op_cur_L[i], self.assign_op_cur_ww[i], self.assign_op_cur_rw[i],
self.assign_op_cur_rv[i]])
sess.run([self.assign_op_cur_c[i], self.assign_op_cur_h[i]])
@staticmethod
def get_bool_rand_incremental(size_seq, prob_true_min=0, prob_true_max=0.25):
ret = []
for i in range(size_seq):
prob_true = (prob_true_max - prob_true_min) / size_seq * i
if np.random.rand() < prob_true:
ret.append(True)
else:
ret.append(False)
return np.asarray(ret)
@staticmethod
def get_bool_rand(size_seq, prob_true=0.1):
ret = []
for i in range(size_seq):
if np.random.rand() < prob_true:
ret.append(True)
else:
ret.append(False)
return np.asarray(ret)
@staticmethod
def get_bool_rand_curriculum(size_seq, epoch, k=0.99, type='exp'):
if type == 'exp':
prob_true = k ** epoch
elif type == 'sig':
prob_true = k / (k + np.exp(epoch / k))
ret = []
for i in range(size_seq):
if np.random.rand() < prob_true:
ret.append(True)
else:
ret.append(False)
return np.asarray(ret) | [
"tensorflow.cond",
"tensorflow.slice",
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.trainable_variables",
"tensorflow.reshape",
"numpy.ones",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"numpy.exp",
"tensorflow.split",
"os.path.join",
"memory.Memory",
"utilit... | [((2047, 2093), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""decoder_point"""'}), "(tf.int32, name='decoder_point')\n", (2061, 2093), True, 'import tensorflow as tf\n'), ((2124, 2170), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""encode1_point"""'}), "(tf.int32, name='encode1_point')\n", (2138, 2170), True, 'import tensorflow as tf\n'), ((2201, 2247), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""encode2_point"""'}), "(tf.int32, name='encode2_point')\n", (2215, 2247), True, 'import tensorflow as tf\n'), ((2507, 2554), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', '[None]'], {'name': '"""teacher"""'}), "(tf.bool, [None], name='teacher')\n", (2521, 2554), True, 'import tensorflow as tf\n'), ((2620, 2667), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', 'None'], {'name': '"""clear_mem"""'}), "(tf.bool, None, name='clear_mem')\n", (2634, 2667), True, 'import tensorflow as tf\n'), ((6186, 6259), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, None, input_size1]'], {'name': '"""input"""'}), "(tf.float32, [batch_size, None, input_size1], name='input')\n", (6200, 6259), True, 'import tensorflow as tf\n'), ((6287, 6360), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, None, input_size2]'], {'name': '"""input"""'}), "(tf.float32, [batch_size, None, input_size2], name='input')\n", (6301, 6360), True, 'import tensorflow as tf\n'), ((6390, 6465), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, None, output_size]'], {'name': '"""targets"""'}), "(tf.float32, [batch_size, None, output_size], name='targets')\n", (6404, 6465), True, 'import tensorflow as tf\n'), ((6486, 6542), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', '[batch_size, None]'], {'name': '"""mask"""'}), "(tf.bool, [batch_size, None], name='mask')\n", (6500, 6542), True, 'import tensorflow as tf\n'), ((6574, 6622), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""sequence_length"""'}), "(tf.int32, name='sequence_length')\n", (6588, 6622), True, 'import tensorflow as tf\n'), ((14968, 15011), 'tensorflow.cond', 'tf.cond', (['(time >= self.decoder_point)', 'c2', 'c1'], {}), '(time >= self.decoder_point, c2, c1)\n', (14975, 15011), True, 'import tensorflow as tf\n'), ((18766, 18810), 'tensorflow.cond', 'tf.cond', (['(time < self.encode1_point)', 'r11', 'r12'], {}), '(time < self.encode1_point, r11, r12)\n', (18773, 18810), True, 'import tensorflow as tf\n'), ((18852, 18896), 'tensorflow.cond', 'tf.cond', (['(time < self.encode2_point)', 'r13', 'r14'], {}), '(time < self.encode2_point, r13, r14)\n', (18859, 18896), True, 'import tensorflow as tf\n'), ((24808, 24882), 'utility.unpack_into_tensorarray', 'utility.unpack_into_tensorarray', (['self.input_data1', '(1)', 'self.sequence_length'], {}), '(self.input_data1, 1, self.sequence_length)\n', (24839, 24882), False, 'import utility\n'), ((24919, 24993), 'utility.unpack_into_tensorarray', 'utility.unpack_into_tensorarray', (['self.input_data2', '(1)', 'self.sequence_length'], {}), '(self.input_data2, 1, self.sequence_length)\n', (24950, 24993), False, 'import utility\n'), ((25077, 25125), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25091, 25125), True, 'import tensorflow as tf\n'), ((25150, 25198), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25164, 25198), True, 'import tensorflow as tf\n'), ((31859, 31888), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (31872, 31888), True, 'import tensorflow as tf\n'), ((32912, 32933), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['output'], {}), '(output)\n', (32925, 32933), True, 'import tensorflow as tf\n'), ((33946, 33975), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (33959, 33975), True, 'import tensorflow as tf\n'), ((33991, 34085), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'self.target_output', 'logits': 'output', 'dim': '(-1)'}), '(labels=self.target_output, logits=\n output, dim=-1)\n', (34030, 34085), True, 'import tensorflow as tf\n'), ((34128, 34151), 'tensorflow.reshape', 'tf.reshape', (['score', '[-1]'], {}), '(score, [-1])\n', (34138, 34151), True, 'import tensorflow as tf\n'), ((34172, 34199), 'tensorflow.reshape', 'tf.reshape', (['self.mask', '[-1]'], {}), '(self.mask, [-1])\n', (34182, 34199), True, 'import tensorflow as tf\n'), ((34218, 34262), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['score_flatten', 'mask_flatten'], {}), '(score_flatten, mask_flatten)\n', (34233, 34262), True, 'import tensorflow as tf\n'), ((34280, 34306), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['mask_score'], {}), '(mask_score)\n', (34294, 34306), True, 'import tensorflow as tf\n'), ((35644, 35673), 'os.path.join', 'os.path.join', (['ckpts_dir', 'name'], {}), '(ckpts_dir, name)\n', (35656, 35673), False, 'import os\n'), ((37104, 37119), 'numpy.asarray', 'np.asarray', (['ret'], {}), '(ret)\n', (37114, 37119), True, 'import numpy as np\n'), ((37383, 37398), 'numpy.asarray', 'np.asarray', (['ret'], {}), '(ret)\n', (37393, 37398), True, 'import numpy as np\n'), ((37826, 37841), 'numpy.asarray', 'np.asarray', (['ret'], {}), '(ret)\n', (37836, 37841), True, 'import numpy as np\n'), ((4538, 4571), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input1_scope"""'], {}), "('input1_scope')\n", (4555, 4571), True, 'import tensorflow as tf\n'), ((4600, 4672), 'memory.Memory', 'Memory', (['self.words_num', 'self.word_size', 'self.read_heads', 'self.batch_size'], {}), '(self.words_num, self.word_size, self.read_heads, self.batch_size)\n', (4606, 4672), False, 'from memory import Memory\n'), ((4905, 4938), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input2_scope"""'], {}), "('input2_scope')\n", (4922, 4938), True, 'import tensorflow as tf\n'), ((5369, 5402), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output_scope"""'], {}), "('output_scope')\n", (5386, 5402), True, 'import tensorflow as tf\n'), ((11775, 11819), 'tensorflow.cond', 'tf.cond', (['(time < self.encode1_point)', 'c11', 'c12'], {}), '(time < self.encode1_point, c11, c12)\n', (11782, 11819), True, 'import tensorflow as tf\n'), ((12073, 12117), 'tensorflow.cond', 'tf.cond', (['(time < self.encode2_point)', 'c13', 'c14'], {}), '(time < self.encode2_point, c13, c14)\n', (12080, 12117), True, 'import tensorflow as tf\n'), ((12654, 12713), 'tensorflow.concat', 'tf.concat', (['[last_read_vectors1, last_read_vectors2]'], {'axis': '(1)'}), '([last_read_vectors1, last_read_vectors2], axis=1)\n', (12663, 12713), True, 'import tensorflow as tf\n'), ((14702, 14754), 'tensorflow.split', 'tf.split', (['nn_state[0]'], {'num_or_size_splits': '(2)', 'axis': '(-1)'}), '(nn_state[0], num_or_size_splits=2, axis=-1)\n', (14710, 14754), True, 'import tensorflow as tf\n'), ((14777, 14829), 'tensorflow.split', 'tf.split', (['nn_state[1]'], {'num_or_size_splits': '(2)', 'axis': '(-1)'}), '(nn_state[1], num_or_size_splits=2, axis=-1)\n', (14785, 14829), True, 'import tensorflow as tf\n'), ((16671, 16717), 'tensorflow.cond', 'tf.cond', (['(time < self.encode1_point)', 'fn11', 'fn12'], {}), '(time < self.encode1_point, fn11, fn12)\n', (16678, 16717), True, 'import tensorflow as tf\n'), ((16830, 16876), 'tensorflow.cond', 'tf.cond', (['(time < self.encode2_point)', 'fn13', 'fn14'], {}), '(time < self.encode2_point, fn13, fn14)\n', (16837, 16876), True, 'import tensorflow as tf\n'), ((17762, 17807), 'tensorflow.cond', 'tf.cond', (['(time >= self.decoder_point)', 'fn2', 'fn1'], {}), '(time >= self.decoder_point, fn2, fn1)\n', (17769, 17807), True, 'import tensorflow as tf\n'), ((21849, 21891), 'tensorflow.zeros', 'tf.zeros', (['[self.batch_size, self.emb_size]'], {}), '([self.batch_size, self.emb_size])\n', (21857, 21891), True, 'import tensorflow as tf\n'), ((21988, 22033), 'tensorflow.cond', 'tf.cond', (['(time >= self.decoder_point)', 'fn2', 'fn1'], {}), '(time >= self.decoder_point, fn2, fn1)\n', (21995, 22033), True, 'import tensorflow as tf\n'), ((22058, 22105), 'tensorflow.cond', 'tf.cond', (['(time >= self.decoder_point)', 'fn22', 'fn12'], {}), '(time >= self.decoder_point, fn22, fn12)\n', (22065, 22105), True, 'import tensorflow as tf\n'), ((22833, 22881), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['output_list[11]', 'output_list[12]'], {}), '(output_list[11], output_list[12])\n', (22847, 22881), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((22915, 22963), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['output_list[13]', 'output_list[14]'], {}), '(output_list[13], output_list[14])\n', (22929, 22963), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((25221, 25269), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25235, 25269), True, 'import tensorflow as tf\n'), ((25270, 25318), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25284, 25318), True, 'import tensorflow as tf\n'), ((25348, 25396), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25362, 25396), True, 'import tensorflow as tf\n'), ((25398, 25446), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25412, 25446), True, 'import tensorflow as tf\n'), ((25471, 25519), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25485, 25519), True, 'import tensorflow as tf\n'), ((25520, 25568), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25534, 25568), True, 'import tensorflow as tf\n'), ((25597, 25645), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25611, 25645), True, 'import tensorflow as tf\n'), ((25646, 25694), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25660, 25694), True, 'import tensorflow as tf\n'), ((25724, 25772), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25738, 25772), True, 'import tensorflow as tf\n'), ((25773, 25821), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25787, 25821), True, 'import tensorflow as tf\n'), ((25848, 25896), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25862, 25896), True, 'import tensorflow as tf\n'), ((25897, 25945), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {}), '(tf.float32, self.sequence_length)\n', (25911, 25945), True, 'import tensorflow as tf\n'), ((25977, 26049), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {'clear_after_read': '(False)'}), '(tf.float32, self.sequence_length, clear_after_read=False)\n', (25991, 26049), True, 'import tensorflow as tf\n'), ((26081, 26153), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32', 'self.sequence_length'], {'clear_after_read': '(False)'}), '(tf.float32, self.sequence_length, clear_after_read=False)\n', (26095, 26153), True, 'import tensorflow as tf\n'), ((27810, 27841), 'tensorflow.cond', 'tf.cond', (['self.clear_mem', 'p1', 'p2'], {}), '(self.clear_mem, p1, p2)\n', (27817, 27841), True, 'import tensorflow as tf\n'), ((27937, 27995), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['controller_state1[0]', 'controller_state1[1]'], {}), '(controller_state1[0], controller_state1[1])\n', (27951, 27995), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((28090, 28148), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['controller_state2[0]', 'controller_state2[1]'], {}), '(controller_state2[0], controller_state2[1])\n', (28104, 28148), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((28262, 28296), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sequence_loop"""'], {}), "('sequence_loop')\n", (28279, 28296), True, 'import tensorflow as tf\n'), ((28317, 28347), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (28328, 28347), True, 'import tensorflow as tf\n'), ((28453, 28784), 'tensorflow.while_loop', 'tf.while_loop', ([], {'cond': '(lambda time, *_: time < self.sequence_length)', 'body': 'self._loop_body', 'loop_vars': '(time, memory_state, outputs, free_gates, allocation_gates, write_gates,\n read_weightings, write_weightings, usage_vectors, controller_state,\n outputs_cache, controller_hiddens)', 'parallel_iterations': '(1)', 'swap_memory': '(True)'}), '(cond=lambda time, *_: time < self.sequence_length, body=self.\n _loop_body, loop_vars=(time, memory_state, outputs, free_gates,\n allocation_gates, write_gates, read_weightings, write_weightings,\n usage_vectors, controller_state, outputs_cache, controller_hiddens),\n parallel_iterations=1, swap_memory=True)\n', (28466, 28784), True, 'import tensorflow as tf\n'), ((29490, 29527), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['dependencies'], {}), '(dependencies)\n', (29513, 29527), True, 'import tensorflow as tf\n'), ((29621, 29671), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[2]'], {'axis': '(1)'}), '(final_results[2], axis=1)\n', (29645, 29671), False, 'import utility\n'), ((31780, 31804), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (31802, 31804), True, 'import tensorflow as tf\n'), ((32833, 32857), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (32855, 32857), True, 'import tensorflow as tf\n'), ((33867, 33891), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (33889, 33891), True, 'import tensorflow as tf\n'), ((35690, 35720), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (35704, 35720), False, 'import os\n'), ((35734, 35761), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (35745, 35761), False, 'import os\n'), ((35826, 35868), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model.ckpt"""'], {}), "(checkpoint_dir, 'model.ckpt')\n", (35838, 35868), False, 'import os\n'), ((36253, 36296), 'os.path.join', 'os.path.join', (['ckpts_dir', 'name', '"""model.ckpt"""'], {}), "(ckpts_dir, name, 'model.ckpt')\n", (36265, 36296), False, 'import os\n'), ((4071, 4121), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (4100, 4121), True, 'import tensorflow as tf\n'), ((4272, 4322), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (4301, 4322), True, 'import tensorflow as tf\n'), ((4471, 4521), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (4500, 4521), True, 'import tensorflow as tf\n'), ((5001, 5073), 'memory.Memory', 'Memory', (['self.words_num', 'self.word_size', 'self.read_heads', 'self.batch_size'], {}), '(self.words_num, self.word_size, self.read_heads, self.batch_size)\n', (5007, 5073), False, 'from memory import Memory\n'), ((12558, 12594), 'tensorflow.concat', 'tf.concat', (['[con_c1, con_c2]'], {'axis': '(-1)'}), '([con_c1, con_c2], axis=-1)\n', (12567, 12594), True, 'import tensorflow as tf\n'), ((12594, 12630), 'tensorflow.concat', 'tf.concat', (['[con_h1, con_h2]'], {'axis': '(-1)'}), '([con_h1, con_h2], axis=-1)\n', (12603, 12630), True, 'import tensorflow as tf\n'), ((19404, 19453), 'tensorflow.concat', 'tf.concat', (['[read_vectors1, read_vectors2]'], {'axis': '(1)'}), '([read_vectors1, read_vectors2], axis=1)\n', (19413, 19453), True, 'import tensorflow as tf\n'), ((19892, 19903), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (19900, 19903), True, 'import tensorflow as tf\n'), ((19968, 19979), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (19976, 19979), True, 'import tensorflow as tf\n'), ((20044, 20055), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (20052, 20055), True, 'import tensorflow as tf\n'), ((20122, 20133), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (20130, 20133), True, 'import tensorflow as tf\n'), ((21420, 21471), 'tensorflow.cond', 'tf.cond', (['self.teacher_force[time - 1]', 'fn2_1', 'fn2_2'], {}), '(self.teacher_force[time - 1], fn2_1, fn2_2)\n', (21427, 21471), True, 'import tensorflow as tf\n'), ((21575, 21616), 'tensorflow.matmul', 'tf.matmul', (['feed_value', 'self.W_emb_decoder'], {}), '(feed_value, self.W_emb_decoder)\n', (21584, 21616), True, 'import tensorflow as tf\n'), ((21658, 21700), 'tensorflow.matmul', 'tf.matmul', (['feed_value', 'self.W_emb1_encoder'], {}), '(feed_value, self.W_emb1_encoder)\n', (21667, 21700), True, 'import tensorflow as tf\n'), ((26299, 26310), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (26307, 26310), True, 'import tensorflow as tf\n'), ((26312, 26323), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (26320, 26323), True, 'import tensorflow as tf\n'), ((26425, 26436), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (26433, 26436), True, 'import tensorflow as tf\n'), ((26438, 26449), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (26446, 26449), True, 'import tensorflow as tf\n'), ((29743, 29796), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[3][0]'], {'axis': '(1)'}), '(final_results[3][0], axis=1)\n', (29767, 29796), False, 'import utility\n'), ((29829, 29882), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[3][1]'], {'axis': '(1)'}), '(final_results[3][1], axis=1)\n', (29853, 29882), False, 'import utility\n'), ((29921, 29974), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[4][0]'], {'axis': '(1)'}), '(final_results[4][0], axis=1)\n', (29945, 29974), False, 'import utility\n'), ((30013, 30066), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[4][1]'], {'axis': '(1)'}), '(final_results[4][1], axis=1)\n', (30037, 30066), False, 'import utility\n'), ((30100, 30153), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[5][0]'], {'axis': '(1)'}), '(final_results[5][0], axis=1)\n', (30124, 30153), False, 'import utility\n'), ((30187, 30240), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[5][1]'], {'axis': '(1)'}), '(final_results[5][1], axis=1)\n', (30211, 30240), False, 'import utility\n'), ((30278, 30331), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[6][0]'], {'axis': '(1)'}), '(final_results[6][0], axis=1)\n', (30302, 30331), False, 'import utility\n'), ((30369, 30422), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[6][1]'], {'axis': '(1)'}), '(final_results[6][1], axis=1)\n', (30393, 30422), False, 'import utility\n'), ((30461, 30514), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[7][0]'], {'axis': '(1)'}), '(final_results[7][0], axis=1)\n', (30485, 30514), False, 'import utility\n'), ((30553, 30606), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[7][1]'], {'axis': '(1)'}), '(final_results[7][1], axis=1)\n', (30577, 30606), False, 'import utility\n'), ((30642, 30695), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[8][0]'], {'axis': '(1)'}), '(final_results[8][0], axis=1)\n', (30666, 30695), False, 'import utility\n'), ((30731, 30784), 'utility.pack_into_tensor', 'utility.pack_into_tensor', (['final_results[8][1]'], {'axis': '(1)'}), '(final_results[8][1], axis=1)\n', (30755, 30784), False, 'import utility\n'), ((36974, 36990), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (36988, 36990), True, 'import numpy as np\n'), ((37253, 37269), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (37267, 37269), True, 'import numpy as np\n'), ((37696, 37712), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (37710, 37712), True, 'import numpy as np\n'), ((2839, 2889), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (2868, 2889), True, 'import tensorflow as tf\n'), ((3027, 3077), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (3056, 3077), True, 'import tensorflow as tf\n'), ((3193, 3243), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (3222, 3243), True, 'import tensorflow as tf\n'), ((3388, 3438), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (3417, 3438), True, 'import tensorflow as tf\n'), ((3582, 3632), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (3611, 3632), True, 'import tensorflow as tf\n'), ((3750, 3800), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-1)', 'maxval': '(1)'}), '(minval=-1, maxval=1)\n', (3779, 3800), True, 'import tensorflow as tf\n'), ((13217, 13286), 'tensorflow.reshape', 'tf.reshape', (['values', '[self.batch_size, -1, self.hidden_controller_dim]'], {}), '(values, [self.batch_size, -1, self.hidden_controller_dim])\n', (13227, 13286), True, 'import tensorflow as tf\n'), ((13801, 13837), 'tensorflow.reshape', 'tf.reshape', (['v', '[-1, self.attend_dim]'], {}), '(v, [-1, self.attend_dim])\n', (13811, 13837), True, 'import tensorflow as tf\n'), ((13948, 13987), 'tensorflow.reshape', 'tf.reshape', (['eijs', '[self.batch_size, -1]'], {}), '(eijs, [self.batch_size, -1])\n', (13958, 13987), True, 'import tensorflow as tf\n'), ((14027, 14039), 'tensorflow.exp', 'tf.exp', (['eijs'], {}), '(eijs)\n', (14033, 14039), True, 'import tensorflow as tf\n'), ((14260, 14322), 'tensorflow.reshape', 'tf.reshape', (['att', '[self.batch_size, self.hidden_controller_dim]'], {}), '(att, [self.batch_size, self.hidden_controller_dim])\n', (14270, 14322), True, 'import tensorflow as tf\n'), ((14360, 14391), 'tensorflow.concat', 'tf.concat', (['[step, att]'], {'axis': '(-1)'}), '([step, att], axis=-1)\n', (14369, 14391), True, 'import tensorflow as tf\n'), ((14873, 14897), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['c_l', 'h_l'], {}), '(c_l, h_l)\n', (14887, 14897), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((14898, 14922), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['c_r', 'h_r'], {}), '(c_r, h_r)\n', (14912, 14922), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((27657, 27701), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['self.cur_c[0]', 'self.cur_h[0]'], {}), '(self.cur_c[0], self.cur_h[0])\n', (27671, 27701), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((27702, 27746), 'tensorflow.python.ops.rnn_cell.LSTMStateTuple', 'LSTMStateTuple', (['self.cur_c[1]', 'self.cur_h[1]'], {}), '(self.cur_c[1], self.cur_h[1])\n', (27716, 27746), False, 'from tensorflow.python.ops.rnn_cell import LSTMStateTuple\n'), ((31980, 32121), 'tensorflow.slice', 'tf.slice', (['self.target_output', '[0, self.decoder_point, 0]', '[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]'], {}), '(self.target_output, [0, self.decoder_point, 0], [self.batch_size, \n self.sequence_length - self.decoder_point, self.output_size])\n', (31988, 32121), True, 'import tensorflow as tf\n'), ((32165, 32294), 'tensorflow.slice', 'tf.slice', (['output', '[0, self.decoder_point, 0]', '[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]'], {}), '(output, [0, self.decoder_point, 0], [self.batch_size, self.\n sequence_length - self.decoder_point, self.output_size])\n', (32173, 32294), True, 'import tensorflow as tf\n'), ((32510, 32549), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-clip_s)', 'clip_s'], {}), '(grad, -clip_s, clip_s)\n', (32526, 32549), True, 'import tensorflow as tf\n'), ((33025, 33166), 'tensorflow.slice', 'tf.slice', (['self.target_output', '[0, self.decoder_point, 0]', '[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]'], {}), '(self.target_output, [0, self.decoder_point, 0], [self.batch_size, \n self.sequence_length - self.decoder_point, self.output_size])\n', (33033, 33166), True, 'import tensorflow as tf\n'), ((33210, 33339), 'tensorflow.slice', 'tf.slice', (['output', '[0, self.decoder_point, 0]', '[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]'], {}), '(output, [0, self.decoder_point, 0], [self.batch_size, self.\n sequence_length - self.decoder_point, self.output_size])\n', (33218, 33339), True, 'import tensorflow as tf\n'), ((33546, 33585), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-clip_s)', 'clip_s'], {}), '(grad, -clip_s, clip_s)\n', (33562, 33585), True, 'import tensorflow as tf\n'), ((34480, 34519), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-clip_s)', 'clip_s'], {}), '(grad, -clip_s, clip_s)\n', (34496, 34519), True, 'import tensorflow as tf\n'), ((35786, 35810), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (35808, 35810), True, 'import tensorflow as tf\n'), ((36210, 36234), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (36232, 36234), True, 'import tensorflow as tf\n'), ((8550, 8593), 'numpy.zeros', 'np.zeros', (['[self.batch_size, self.words_num]'], {}), '([self.batch_size, self.words_num])\n', (8558, 8593), True, 'import numpy as np\n'), ((8854, 8897), 'numpy.zeros', 'np.zeros', (['[self.batch_size, self.words_num]'], {}), '([self.batch_size, self.words_num])\n', (8862, 8897), True, 'import numpy as np\n'), ((13106, 13151), 'tensorflow.range', 'tf.range', (['from_steps[cci]', 'self.decoder_point'], {}), '(from_steps[cci], self.decoder_point)\n', (13114, 13151), True, 'import tensorflow as tf\n'), ((13878, 13905), 'tensorflow.expand_dims', 'tf.expand_dims', (['v_a[cci]', '(1)'], {}), '(v_a[cci], 1)\n', (13892, 13905), True, 'import tensorflow as tf\n'), ((37611, 37628), 'numpy.exp', 'np.exp', (['(epoch / k)'], {}), '(epoch / k)\n', (37617, 37628), True, 'import numpy as np\n'), ((7580, 7629), 'numpy.ones', 'np.ones', (['[self.batch_size, hidden_controller_dim]'], {}), '([self.batch_size, hidden_controller_dim])\n', (7587, 7629), True, 'import numpy as np\n'), ((7874, 7923), 'numpy.ones', 'np.ones', (['[self.batch_size, hidden_controller_dim]'], {}), '([self.batch_size, hidden_controller_dim])\n', (7881, 7923), True, 'import numpy as np\n'), ((8228, 8286), 'numpy.ones', 'np.ones', (['[self.batch_size, self.words_num, self.word_size]'], {}), '([self.batch_size, self.words_num, self.word_size])\n', (8235, 8286), True, 'import numpy as np\n'), ((9169, 9227), 'numpy.ones', 'np.ones', (['[self.batch_size, self.words_num, self.words_num]'], {}), '([self.batch_size, self.words_num, self.words_num])\n', (9176, 9227), True, 'import numpy as np\n'), ((9497, 9539), 'numpy.ones', 'np.ones', (['[self.batch_size, self.words_num]'], {}), '([self.batch_size, self.words_num])\n', (9504, 9539), True, 'import numpy as np\n'), ((9826, 9885), 'numpy.ones', 'np.ones', (['[self.batch_size, self.words_num, self.read_heads]'], {}), '([self.batch_size, self.words_num, self.read_heads])\n', (9833, 9885), True, 'import numpy as np\n'), ((10169, 10228), 'numpy.ones', 'np.ones', (['[self.batch_size, self.word_size, self.read_heads]'], {}), '([self.batch_size, self.word_size, self.read_heads])\n', (10176, 10228), True, 'import numpy as np\n'), ((14087, 14109), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['exps', '(1)'], {}), '(exps, 1)\n', (14100, 14109), True, 'import tensorflow as tf\n'), ((14190, 14215), 'tensorflow.expand_dims', 'tf.expand_dims', (['alphas', '(2)'], {}), '(alphas, 2)\n', (14204, 14215), True, 'import tensorflow as tf\n'), ((13383, 13444), 'tensorflow.reshape', 'tf.reshape', (['encoder_outputs', '[-1, self.hidden_controller_dim]'], {}), '(encoder_outputs, [-1, self.hidden_controller_dim])\n', (13393, 13444), True, 'import tensorflow as tf\n'), ((13608, 13678), 'tensorflow.reshape', 'tf.reshape', (['controller_state[cci][0]', '[-1, self.hidden_controller_dim]'], {}), '(controller_state[cci][0], [-1, self.hidden_controller_dim])\n', (13618, 13678), True, 'import tensorflow as tf\n')] |
import matplotlib as plt
plt.use('agg')
import sys
import numpy as np
import ngene as ng
import pylab as plt
import ccgpack as ccg
from glob import glob
import tensorflow as tf
from random import choice,shuffle
from matplotlib.colors import LogNorm
#print( ' *cnn* : cnn without any dropout , with kernel size = 5, filters =36, iters = 300 , Gu:[1e-9 , 1e-6]. applied on ffp10 simulations ' )
n_conv = int(sys.argv[1])
dofilt = sys.argv[2]
def get_slice(data,nx,ny):
"""Slice matrix in x and y direction"""
lx,ly = data.shape
if nx==0 or nx==lx:
slx = slice(0, lx)
else:
idx = np.random.randint(0, lx - nx)
slx = slice(idx, (idx+nx))
if ny==0 or ny==ly:
sly = slice(0, ly)
else:
idy = np.random.randint(0, ly - ny)
sly = slice(idy, (idy+ny))
return slx, sly
class DataProvider(object):
def __init__(self,n_files,s_files,gmus,
nx=0,ny=0,n_buffer=10,
reload_rate=100,filt=None):
self.n_files = n_files
self.s_files = s_files
nmin = min(len(n_files),len(s_files))
if n_buffer>= nmin:
n_buffer = nmin
self.reload_rate = 0
else:
self.reload_rate = reload_rate
self.nx,self.ny = nx,ny
self.n_buffer = n_buffer
self.gmus = gmus
if filt is None:
def filt(x):
return x
self.filt = filt
self.counter = 0
self.reload()
def reload(self):
print('Data provider is reloading...')
self.n_set = []
self.s_set = []
# self.d_set = []
ninds = np.arange(len(self.n_files))
sinds = np.arange(len(self.s_files))
shuffle(ninds)
shuffle(sinds)
for i in range(self.n_buffer):
filen = self.n_files[ninds[i]]
files = self.s_files[sinds[i]]
self.n_set.append(np.load(filen))
signal = np.load(files)
self.s_set.append(signal)
# if self.filt:
# self.d_set.append(self.filt(signal))
# else:
# self.d_set.append(signal)
#
def get_data(self):
self.counter += 1
if self.reload_rate:
if self.counter%self.reload_rate==0:
self.reload()
n = choice(self.n_set)
sind = choice(np.arange(self.n_buffer))
s = self.s_set[sind]
# d = self.d_set[sind]
return n,s#,d
def pre_process(self, n, s, gmu):
nslice = get_slice(n,self.nx,self.ny)
n = n[nslice]
sslice = get_slice(s,self.nx,self.ny)
s = s[sslice]
sn = n + gmu*s
sn = self.filt(sn)
# d = d[sslice]
sn = np.expand_dims(sn,-1)
# d = np.expand_dims(d,-1)
return sn#,d
def __call__(self, n, gmus=None):
if gmus is None:
gmus = self.gmus
# x,y = self.get_data()
X = []
Y = []
for i in range(n):
n,s = self.get_data()
gmu = choice(gmus)
sn = self.pre_process(n,s,gmu)
X.append(sn-sn+gmu)
Y.append(-np.log(gmu+1e-30))
X = np.array(X)
Y = np.array(Y)
return X,Y#[:,None]
def arch_maker(x,n_conv):
#x_in = tf.placeholder(tf.float32,[None,nx,ny,n_channel])
#y_true = tf.placeholder(tf.float32,[None , n_channel])
#learning_rate = tf.placeholder(tf.float32)
for _ in range(n_conv):
x = tf.layers.conv2d(x,filters=16,kernel_size=5,
strides=(1, 1),padding='same',
activation=tf.nn.relu)
x = tf.layers.average_pooling2d(x,pool_size=2,strides=2)
print(x)
x = tf.contrib.layers.flatten(x)
print(x)
x = tf.layers.dense(x, 10 , activation=tf.nn.relu)
print(x)
y = tf.layers.dense(x, 1 , activation=tf.nn.relu)
print(x)
return y
def arch(x):
return arch_maker(x,n_conv)
#def loss(y_true,x_out):
# return 1e3*tf.reduce_mean(tf.pow(y_true-x_out,2))
training_epochs = 10
iterations=10
n_s = 50
learning_rate = 0.05
g_files = sorted(glob('./data/training_set/healpix_p/*.npy'))
s_files = sorted(glob('./data/training_set/string_p/*.npy'))
if len(g_files)*len(s_files)==0:
print('Somthing is wrong with initiation.')
exit()
if dofilt[0]=='y':
def filt(x):
return ccg.filters(x,edd_method='sch')
else:
filt = None
#omin,omax = np.log(self.gmb[0]),np.log(self.gmb[1])
#gmus = np.exp(np.random.uniform(omin,omax,n))
gmus = [0]+list(5*10**np.linspace(-8 , -5 , 10))
dp = DataProvider(g_files,s_files,
gmus=gmus,
nx=50,ny=50,n_buffer=10,
reload_rate=10e5,filt=filt)
#x,y = dp(4,gmus=np.array(4*[1e-3]))
#print x.shape
#print y.shape
#fig,ax=plt.subplots(1,1,figsize=(5,5))
#ax.imshow(x[0,:,:,0],norm=LogNorm(),cmap=plt.get_cmap('jet'))
#plt.title('G + Gu*S')
#plt.savefig('x_lognorm ')
#fig,ax=plt.subplots(1,1,figsize=(5,5))
#ax.imshow(x[0,:,:,0])
#plt.title('G + Gu*S')
#plt.savefig('x')
#exit()
model_add='./model/'+str(n_conv)+'_layers_'+dofilt+'/'
model = ng.Model(dp,restore=1,
model_add=model_add,
arch=arch)#,loss=loss)
learning_rate = learning_rate/(1.02)**1000
for ii in range(4000):
model.train(data_provider=dp,training_epochs=training_epochs,
iterations=iterations,n_s=n_s,
learning_rate=learning_rate, verbose=1)
learning_rate = learning_rate/1.02
#x,y = dp_total(1000)
#pred = sess.run(y_out, feed_dict={x_in: x})
#d=abs(pred-y)/y
#delta=np.mean(d)
#print('accuracy =' , 100*(1-delta))
#r_squared = 1 - ( np.sum((y-pred)**2)/ np.sum((y-np.mean(y))**2) )
#print('r_squared =' ,r_squared)
#measurable= (1-r_squared) * (1e-6 - 1e-9)
#print('min_measurable=' , measurable)
#"""
#plt.loglog(y , y , 'r--' , label='Gu_pred = Gu_fid')
#plt.axvline(x = measurable*1e9 , color='g' , label= 'min measurable')
#plt.loglog(y , pred,'b.')
#plt.xlabel('Gu_fid')
#plt.ylabel('Gu_pred')
#plt.title('ff10')
##plt.legend(bbox_to_anchor=(1.05, 1),loc =2 , borderaxespad=0.)
#plt.savefig( '1' , bbox_inches='tight')
#"""
#end_time = time.time()
#print('duration=' , timedelta(seconds=end_time - start_time))
#print('Done! :) ')
| [
"numpy.load",
"numpy.log",
"tensorflow.contrib.layers.flatten",
"random.shuffle",
"tensorflow.layers.dense",
"ccgpack.filters",
"random.choice",
"numpy.expand_dims",
"tensorflow.layers.average_pooling2d",
"numpy.random.randint",
"pylab.use",
"numpy.array",
"tensorflow.layers.conv2d",
"glob... | [((25, 39), 'pylab.use', 'plt.use', (['"""agg"""'], {}), "('agg')\n", (32, 39), True, 'import pylab as plt\n'), ((5376, 5431), 'ngene.Model', 'ng.Model', (['dp'], {'restore': '(1)', 'model_add': 'model_add', 'arch': 'arch'}), '(dp, restore=1, model_add=model_add, arch=arch)\n', (5384, 5431), True, 'import ngene as ng\n'), ((3944, 3972), 'tensorflow.contrib.layers.flatten', 'tf.contrib.layers.flatten', (['x'], {}), '(x)\n', (3969, 3972), True, 'import tensorflow as tf\n'), ((3994, 4039), 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(10)'], {'activation': 'tf.nn.relu'}), '(x, 10, activation=tf.nn.relu)\n', (4009, 4039), True, 'import tensorflow as tf\n'), ((4062, 4106), 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(1)'], {'activation': 'tf.nn.relu'}), '(x, 1, activation=tf.nn.relu)\n', (4077, 4106), True, 'import tensorflow as tf\n'), ((4353, 4396), 'glob.glob', 'glob', (['"""./data/training_set/healpix_p/*.npy"""'], {}), "('./data/training_set/healpix_p/*.npy')\n", (4357, 4396), False, 'from glob import glob\n'), ((4415, 4457), 'glob.glob', 'glob', (['"""./data/training_set/string_p/*.npy"""'], {}), "('./data/training_set/string_p/*.npy')\n", (4419, 4457), False, 'from glob import glob\n'), ((632, 661), 'numpy.random.randint', 'np.random.randint', (['(0)', '(lx - nx)'], {}), '(0, lx - nx)\n', (649, 661), True, 'import numpy as np\n'), ((807, 836), 'numpy.random.randint', 'np.random.randint', (['(0)', '(ly - ny)'], {}), '(0, ly - ny)\n', (824, 836), True, 'import numpy as np\n'), ((1855, 1869), 'random.shuffle', 'shuffle', (['ninds'], {}), '(ninds)\n', (1862, 1869), False, 'from random import choice, shuffle\n'), ((1878, 1892), 'random.shuffle', 'shuffle', (['sinds'], {}), '(sinds)\n', (1885, 1892), False, 'from random import choice, shuffle\n'), ((2469, 2487), 'random.choice', 'choice', (['self.n_set'], {}), '(self.n_set)\n', (2475, 2487), False, 'from random import choice, shuffle\n'), ((2893, 2915), 'numpy.expand_dims', 'np.expand_dims', (['sn', '(-1)'], {}), '(sn, -1)\n', (2907, 2915), True, 'import numpy as np\n'), ((3383, 3394), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3391, 3394), True, 'import numpy as np\n'), ((3407, 3418), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3415, 3418), True, 'import numpy as np\n'), ((3694, 3800), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'filters': '(16)', 'kernel_size': '(5)', 'strides': '(1, 1)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(x, filters=16, kernel_size=5, strides=(1, 1), padding=\n 'same', activation=tf.nn.relu)\n", (3710, 3800), True, 'import tensorflow as tf\n'), ((3865, 3919), 'tensorflow.layers.average_pooling2d', 'tf.layers.average_pooling2d', (['x'], {'pool_size': '(2)', 'strides': '(2)'}), '(x, pool_size=2, strides=2)\n', (3892, 3919), True, 'import tensorflow as tf\n'), ((4604, 4636), 'ccgpack.filters', 'ccg.filters', (['x'], {'edd_method': '"""sch"""'}), "(x, edd_method='sch')\n", (4615, 4636), True, 'import ccgpack as ccg\n'), ((2085, 2099), 'numpy.load', 'np.load', (['files'], {}), '(files)\n', (2092, 2099), True, 'import numpy as np\n'), ((2510, 2534), 'numpy.arange', 'np.arange', (['self.n_buffer'], {}), '(self.n_buffer)\n', (2519, 2534), True, 'import numpy as np\n'), ((3229, 3241), 'random.choice', 'choice', (['gmus'], {}), '(gmus)\n', (3235, 3241), False, 'from random import choice, shuffle\n'), ((2048, 2062), 'numpy.load', 'np.load', (['filen'], {}), '(filen)\n', (2055, 2062), True, 'import numpy as np\n'), ((4781, 4804), 'numpy.linspace', 'np.linspace', (['(-8)', '(-5)', '(10)'], {}), '(-8, -5, 10)\n', (4792, 4804), True, 'import numpy as np\n'), ((3339, 3358), 'numpy.log', 'np.log', (['(gmu + 1e-30)'], {}), '(gmu + 1e-30)\n', (3345, 3358), True, 'import numpy as np\n')] |
#import json
#import os
#import random
import numpy as np
from Snake import Snake
from Board import Board
class Direction():
"""class providing outcomes of any given direction our snake may travel in """
def __init__(self, vectorI, vectorJ, boardData):
self.i=vectorI
self.j=vectorJ
self.s=Snake(boardData)
self.b=Board(boardData)
self.turn=boardData['turn']
self.opponents = boardData['board']['snakes']
def collideWall(self):
"""
Returns
-------
bool
whether direction results in collision with a wall.
"""
x = self.s.headX
y = self.s.headY
if(x+self.i==-1 or x+self.i==self.b.width):
return True
elif(y+self.j==-1 or y+self.j==self.b.height):
return True
return False
def collideSelf(self):
"""
Returns
-------
bool
whether direction results in collision with our neck.
"""
if self.turn >= 1: #no neck on the first turn
if(self.s.headX+self.i==self.s.neckX) & (self.s.headY+self.j==self.s.neckY):
return True
return False
def collideOpponent(self):
"""
Returns
-------
bool
whether direction results in collision with rival snake
"""
#TODO add cells surrounding opponent's head to forbidden list
#TODO allow eating opponent's neck
forbidden = []
for snake in self.opponents:
for pos in snake['body']:
forbidden.append((pos['x'],pos['y']))
nextPos = (self.s.headX+self.i,self.s.headY+self.j)
if(nextPos in forbidden):
return True
return False
def numOpponents():
"""returns the number of opponent snakes in a given direction"""
#TODO
return 0
def numBody(self):
"""
Returns
-------
int
Number of body tiles in given direction
int
Minimum number of turns to any body tile in given direction
"""
bodyCount=0
nTurns = []
for snake in self.b.snakes:
for tile in snake['body']:
if(self.i < 0): #moving left
if(tile['x']<self.s.headX):
bodyCount += 1
nTurns.append(abs(self.s.headX-tile['x'])+abs(self.s.headY-tile['y']))
space = (self.s.headX)*(self.b.height)
elif(self.i > 0): #moving right
if(tile['x']>self.s.headX):
bodyCount += 1
nTurns.append(abs(self.s.headX-tile['x'])+abs(self.s.headY-tile['y']))
space = (self.b.width-self.s.headX-1)*(self.b.height)
elif(self.j > 0): #moving down
if(tile['y']>self.s.headY):
bodyCount += 1
nTurns.append(abs(self.s.headX-tile['x'])+abs(self.s.headY-tile['y']))
space = (self.b.height-self.s.headY-1)*(self.b.width)
elif(self.j < 0): #moving up
if(tile['y']<self.s.headY):
bodyCount += 1
nTurns.append(abs(self.s.headX-tile['x'])+abs(self.s.headY-tile['y']))
space = (self.s.headY)*(self.b.width)
density = bodyCount/space
if(len(nTurns) > 0):
minim = min(nTurns)
mean = np.mean(nTurns)
else:
minim = 0
mean = self.b.height+self.b.width
return density,minim,mean
def numFood(self):
"""
Returns
-------
int
Density of food sources in given direction
int
Minimum number of turns to any food in given direction
int
Average number of turns to food in given direction
"""
foodCount=0
nTurns = []
for food in self.b.foodSources:
if(self.i < 0): #moving left
if(food['x']<self.s.headX):
foodCount += 1
nTurns.append(abs(self.s.headX-food['x'])+abs(self.s.headY-food['y']))
space = (self.s.headX)*(self.b.height)
elif(self.i > 0): #moving right
if(food['x']>self.s.headX):
foodCount += 1
nTurns.append(abs(self.s.headX-food['x'])+abs(self.s.headY-food['y']))
space = (self.b.width-self.s.headX-1)*(self.b.height)
elif(self.j > 0): #moving down
if(food['y']>self.s.headY):
foodCount += 1
nTurns.append(abs(self.s.headX-food['x'])+abs(self.s.headY-food['y']))
space = (self.b.height-self.s.headY-1)*(self.b.width)
elif(self.j < 0): #moving up
if(food['y']<self.s.headY):
foodCount += 1
nTurns.append(abs(self.s.headX-food['x'])+abs(self.s.headY-food['y']))
space = (self.s.headY)*(self.b.width)
density = foodCount/space
if(len(nTurns) > 0):
minim = min(nTurns)
mean = np.mean(nTurns)
else:
minim = self.b.height+self.b.width
mean = self.b.height+self.b.width
return density,minim,mean
def getReward(self):
"""returns reward (benefit of travel) for given direction"""
if(self.collideWall() or self.collideSelf() or self.collideOpponent()):
reward = -999
else:
reward = (0.2*self.numFood()[0]+ # food density
0.4*(1-self.numFood()[1]/(self.b.width+self.b.height))+ # min turns to food
0.2*(1-self.numFood()[2]/(self.b.width+self.b.height))+ # mean turns to food
0.1*-self.numBody()[0]+ # body density
0.1*-(1-self.numBody()[1]/(self.b.width+self.b.height)))# min turns to body
#logging
print('On turn {0}, rewards for direction ({1},{2}) were:'.format(self.turn,self.i,self.j))
print('food density: ',0.2*self.numFood()[0])
print('food min t: ',0.4*(1-self.numFood()[1]/(self.b.width+self.b.height)))
print('food mean t:',0.2*(1-self.numFood()[2]/(self.b.width+self.b.height)))
print('body density: ',0.1*-self.numBody()[0])
print('min turns to body: ',0.1*(self.numBody()[1]/(self.b.width+self.b.height)))
return reward
| [
"numpy.mean",
"Snake.Snake",
"Board.Board"
] | [((345, 361), 'Snake.Snake', 'Snake', (['boardData'], {}), '(boardData)\n', (350, 361), False, 'from Snake import Snake\n'), ((378, 394), 'Board.Board', 'Board', (['boardData'], {}), '(boardData)\n', (383, 394), False, 'from Board import Board\n'), ((3735, 3750), 'numpy.mean', 'np.mean', (['nTurns'], {}), '(nTurns)\n', (3742, 3750), True, 'import numpy as np\n'), ((5520, 5535), 'numpy.mean', 'np.mean', (['nTurns'], {}), '(nTurns)\n', (5527, 5535), True, 'import numpy as np\n')] |
# to come
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def _plotResult(test_predictions, label_data):
plt.figure(figsize=(20, 15), dpi=60)
MEDIUM_SIZE = 10
BIGGER_SIZE = 14
plt.rc('font', size=MEDIUM_SIZE)
plt.rc('axes', titlesize=BIGGER_SIZE)
plt.rc('axes', labelsize=BIGGER_SIZE)
plt.rc('xtick', labelsize=BIGGER_SIZE)
plt.rc('ytick', labelsize=BIGGER_SIZE)
plt.rc('legend', fontsize=BIGGER_SIZE)
plt.rc('figure', titlesize=BIGGER_SIZE)
plt.subplot(2, 1, 1)
plt.plot(test_predictions, 'k', linewidth=4)
plt.plot(label_data, '0.2')
plt.title('Prediction of Model and Ground Truth')
plt.ylabel('Arc Minute')
plt.xlabel('Sample number')
plt.legend(['Model Prediction', 'Ground Truth'], loc='lower right'),
plt.grid(True)
plt.plot(test_predictions)
plt.ylim(-6.1, 6.1)
plt.subplot(2, 1, 2)
plt.title('Differenz of Prediction and Ground Truth')
plt.ylabel('Arc Minute')
plt.xlabel('Sample number')
plt.stem(np.linspace(0,100,101), label_data - test_predictions, 'k', use_line_collection=True, markerfmt='ko', basefmt='k')
plt.ylim(-3.1, 3.1)
plt.grid(True)
plt.plot([0, 101], [1, 1], '--k')
plt.plot([0, 101],[-1, -1] ,'--k')
plt.show()
def main():
model = tf.keras.models.load_model('SaveModel.h5')
print(model.summary())
test_data = np.load('dataTest.npy')
label_data = np.load('labelTest.npy')
if tf.keras.backend.image_data_format() == 'channel_first':
test_data = test_data.reshape(test_data.shape[0], 1, test_data.shape[1], test_data.shape[2])
else:
test_data = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], 1)
test_predictions = model.predict(test_data).flatten()
_plotResult(test_predictions, label_data)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.load",
"matplotlib.pyplot.show",
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotli... | [((137, 173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)', 'dpi': '(60)'}), '(figsize=(20, 15), dpi=60)\n', (147, 173), True, 'import matplotlib.pyplot as plt\n'), ((221, 253), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'MEDIUM_SIZE'}), "('font', size=MEDIUM_SIZE)\n", (227, 253), True, 'import matplotlib.pyplot as plt\n'), ((266, 303), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'BIGGER_SIZE'}), "('axes', titlesize=BIGGER_SIZE)\n", (272, 303), True, 'import matplotlib.pyplot as plt\n'), ((313, 350), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'BIGGER_SIZE'}), "('axes', labelsize=BIGGER_SIZE)\n", (319, 350), True, 'import matplotlib.pyplot as plt\n'), ((359, 397), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'BIGGER_SIZE'}), "('xtick', labelsize=BIGGER_SIZE)\n", (365, 397), True, 'import matplotlib.pyplot as plt\n'), ((406, 444), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'BIGGER_SIZE'}), "('ytick', labelsize=BIGGER_SIZE)\n", (412, 444), True, 'import matplotlib.pyplot as plt\n'), ((453, 491), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'BIGGER_SIZE'}), "('legend', fontsize=BIGGER_SIZE)\n", (459, 491), True, 'import matplotlib.pyplot as plt\n'), ((500, 539), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'BIGGER_SIZE'}), "('figure', titlesize=BIGGER_SIZE)\n", (506, 539), True, 'import matplotlib.pyplot as plt\n'), ((546, 566), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (557, 566), True, 'import matplotlib.pyplot as plt\n'), ((571, 615), 'matplotlib.pyplot.plot', 'plt.plot', (['test_predictions', '"""k"""'], {'linewidth': '(4)'}), "(test_predictions, 'k', linewidth=4)\n", (579, 615), True, 'import matplotlib.pyplot as plt\n'), ((620, 647), 'matplotlib.pyplot.plot', 'plt.plot', (['label_data', '"""0.2"""'], {}), "(label_data, '0.2')\n", (628, 647), True, 'import matplotlib.pyplot as plt\n'), ((652, 701), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction of Model and Ground Truth"""'], {}), "('Prediction of Model and Ground Truth')\n", (661, 701), True, 'import matplotlib.pyplot as plt\n'), ((706, 730), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Arc Minute"""'], {}), "('Arc Minute')\n", (716, 730), True, 'import matplotlib.pyplot as plt\n'), ((735, 762), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample number"""'], {}), "('Sample number')\n", (745, 762), True, 'import matplotlib.pyplot as plt\n'), ((840, 854), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (848, 854), True, 'import matplotlib.pyplot as plt\n'), ((859, 885), 'matplotlib.pyplot.plot', 'plt.plot', (['test_predictions'], {}), '(test_predictions)\n', (867, 885), True, 'import matplotlib.pyplot as plt\n'), ((890, 909), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-6.1)', '(6.1)'], {}), '(-6.1, 6.1)\n', (898, 909), True, 'import matplotlib.pyplot as plt\n'), ((914, 934), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (925, 934), True, 'import matplotlib.pyplot as plt\n'), ((939, 992), 'matplotlib.pyplot.title', 'plt.title', (['"""Differenz of Prediction and Ground Truth"""'], {}), "('Differenz of Prediction and Ground Truth')\n", (948, 992), True, 'import matplotlib.pyplot as plt\n'), ((997, 1021), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Arc Minute"""'], {}), "('Arc Minute')\n", (1007, 1021), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample number"""'], {}), "('Sample number')\n", (1036, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1186, 1205), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3.1)', '(3.1)'], {}), '(-3.1, 3.1)\n', (1194, 1205), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1224), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1218, 1224), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1262), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 101]', '[1, 1]', '"""--k"""'], {}), "([0, 101], [1, 1], '--k')\n", (1237, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1267, 1302), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 101]', '[-1, -1]', '"""--k"""'], {}), "([0, 101], [-1, -1], '--k')\n", (1275, 1302), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1316), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1314, 1316), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1385), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""SaveModel.h5"""'], {}), "('SaveModel.h5')\n", (1369, 1385), True, 'import tensorflow as tf\n'), ((1431, 1454), 'numpy.load', 'np.load', (['"""dataTest.npy"""'], {}), "('dataTest.npy')\n", (1438, 1454), True, 'import numpy as np\n'), ((1472, 1496), 'numpy.load', 'np.load', (['"""labelTest.npy"""'], {}), "('labelTest.npy')\n", (1479, 1496), True, 'import numpy as np\n'), ((767, 834), 'matplotlib.pyplot.legend', 'plt.legend', (["['Model Prediction', 'Ground Truth']"], {'loc': '"""lower right"""'}), "(['Model Prediction', 'Ground Truth'], loc='lower right')\n", (777, 834), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1091), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(101)'], {}), '(0, 100, 101)\n', (1078, 1091), True, 'import numpy as np\n'), ((1505, 1541), 'tensorflow.keras.backend.image_data_format', 'tf.keras.backend.image_data_format', ([], {}), '()\n', (1539, 1541), True, 'import tensorflow as tf\n')] |
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
import plotly.express as px
pio.templates.default = "simple_white"
# for some reason that's the only way i get it to display
# pio.renderers.default = "svg"
# limit nparray display to 3 decimal places
np.set_printoptions(precision=3)
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
rand_data = np.random.normal(loc=10, scale=1, size=1000)
est = UnivariateGaussian()
est.fit(rand_data)
print(f"({est.mu_:.3f}, {est.var_:.3f})")
# Question 2 - Empirically showing sample mean is consistent
tmp_est = UnivariateGaussian()
exp_dist = np.zeros((2, 100))
for i, n in enumerate(range(10, 1001, 10)):
tmp_est.fit(rand_data[:n])
exp_dist[:, i] = n, np.abs(10 - tmp_est.mu_)
fig1 = px.scatter(x=exp_dist[0, :], y=exp_dist[1, :],
title="Abs. distance of estimated expectation from true expectation as a function of sample size",
labels={'x': 'Sample Size', 'y': 'Absolute distance'})
fig1.update_layout(title_x=0.5)
fig1.show()
# Question 3 - Plotting Empirical PDF of fitted model
pdfs = np.zeros((2, 1000))
pdfs[0, :] = rand_data
pdfs[1, :] = est.pdf(rand_data)
fig2 = px.scatter(x=pdfs[0, :], y=pdfs[1, :],
title="PDF by sample value",
labels={'x': 'Sample value', 'y': 'PDF'})
fig2.update_layout(title_x=0.5)
fig2.show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
mu = [0, 0, 4, 0]
cov = [[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]]
rand_data = np.random.multivariate_normal(mu, cov, 1000)
esti = MultivariateGaussian()
esti.fit(rand_data)
print(esti.mu_)
print(esti.cov_)
# Question 5 - Likelihood evaluation
f1 = np.linspace(-10, 10, 200)
f3 = np.linspace(-10, 10, 200)
likeli_matrix = np.zeros((200, 200))
num_of_samples = 4
# Print the progression percentage of creating the heatmap
print_prog = False
for i in range(200):
for j in range(200):
likeli_matrix[i, j] = MultivariateGaussian.log_likelihood(np.array([f1[i], 0, f3[j], 0]), np.array(cov),
rand_data[:num_of_samples, :])
if print_prog and (i % 10) == 0:
print(f"{i / 2}% . . .")
fig4 = px.imshow(likeli_matrix, labels=dict(x="f1", y="f3", color="Log-Likelihood"),
x=f1,
y=f3,
color_continuous_scale=px.colors.sequential.YlOrRd,
origin="lower",
title="Log likelihood as a function of expectation estimator parameters", )
fig4.update_layout(title_x=0.5)
fig4.show()
# Question 6 - Maximum likelihood
amax_tuple = np.unravel_index(np.argmax(likeli_matrix), likeli_matrix.shape)
print(f"\nchecked over {num_of_samples} samples\n"
f"\nmaximum log-likelihood: {np.amax(likeli_matrix):.3f}\n"
f"argmax's are:\n"
f"f1={f1[amax_tuple[1]]:.3f} f3={f3[amax_tuple[0]]:.3f}")
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
| [
"IMLearn.learners.UnivariateGaussian",
"numpy.set_printoptions",
"numpy.random.seed",
"numpy.abs",
"numpy.argmax",
"numpy.zeros",
"IMLearn.learners.MultivariateGaussian",
"numpy.amax",
"numpy.random.multivariate_normal",
"numpy.array",
"numpy.random.normal",
"numpy.linspace",
"plotly.express... | [((363, 395), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (382, 395), True, 'import numpy as np\n'), ((506, 550), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(10)', 'scale': '(1)', 'size': '(1000)'}), '(loc=10, scale=1, size=1000)\n', (522, 550), True, 'import numpy as np\n'), ((562, 582), 'IMLearn.learners.UnivariateGaussian', 'UnivariateGaussian', ([], {}), '()\n', (580, 582), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((737, 757), 'IMLearn.learners.UnivariateGaussian', 'UnivariateGaussian', ([], {}), '()\n', (755, 757), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((774, 792), 'numpy.zeros', 'np.zeros', (['(2, 100)'], {}), '((2, 100))\n', (782, 792), True, 'import numpy as np\n'), ((946, 1156), 'plotly.express.scatter', 'px.scatter', ([], {'x': 'exp_dist[0, :]', 'y': 'exp_dist[1, :]', 'title': '"""Abs. distance of estimated expectation from true expectation as a function of sample size"""', 'labels': "{'x': 'Sample Size', 'y': 'Absolute distance'}"}), "(x=exp_dist[0, :], y=exp_dist[1, :], title=\n 'Abs. distance of estimated expectation from true expectation as a function of sample size'\n , labels={'x': 'Sample Size', 'y': 'Absolute distance'})\n", (956, 1156), True, 'import plotly.express as px\n'), ((1320, 1339), 'numpy.zeros', 'np.zeros', (['(2, 1000)'], {}), '((2, 1000))\n', (1328, 1339), True, 'import numpy as np\n'), ((1419, 1533), 'plotly.express.scatter', 'px.scatter', ([], {'x': 'pdfs[0, :]', 'y': 'pdfs[1, :]', 'title': '"""PDF by sample value"""', 'labels': "{'x': 'Sample value', 'y': 'PDF'}"}), "(x=pdfs[0, :], y=pdfs[1, :], title='PDF by sample value', labels=\n {'x': 'Sample value', 'y': 'PDF'})\n", (1429, 1533), True, 'import plotly.express as px\n'), ((1878, 1922), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'cov', '(1000)'], {}), '(mu, cov, 1000)\n', (1907, 1922), True, 'import numpy as np\n'), ((1935, 1957), 'IMLearn.learners.MultivariateGaussian', 'MultivariateGaussian', ([], {}), '()\n', (1955, 1957), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((2080, 2105), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(200)'], {}), '(-10, 10, 200)\n', (2091, 2105), True, 'import numpy as np\n'), ((2116, 2141), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(200)'], {}), '(-10, 10, 200)\n', (2127, 2141), True, 'import numpy as np\n'), ((2163, 2183), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (2171, 2183), True, 'import numpy as np\n'), ((3455, 3472), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3469, 3472), True, 'import numpy as np\n'), ((3144, 3168), 'numpy.argmax', 'np.argmax', (['likeli_matrix'], {}), '(likeli_matrix)\n', (3153, 3168), True, 'import numpy as np\n'), ((907, 931), 'numpy.abs', 'np.abs', (['(10 - tmp_est.mu_)'], {}), '(10 - tmp_est.mu_)\n', (913, 931), True, 'import numpy as np\n'), ((2427, 2457), 'numpy.array', 'np.array', (['[f1[i], 0, f3[j], 0]'], {}), '([f1[i], 0, f3[j], 0])\n', (2435, 2457), True, 'import numpy as np\n'), ((2459, 2472), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (2467, 2472), True, 'import numpy as np\n'), ((3287, 3309), 'numpy.amax', 'np.amax', (['likeli_matrix'], {}), '(likeli_matrix)\n', (3294, 3309), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# test_simulateSNR.py
# This module provides the tests for the simulateSNR function.
# Copyright 2014 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""This module provides the test class for the simulateSNR() function.
"""
from __future__ import division, print_function
import unittest
import pkg_resources
import scipy.io
import numpy as np
import deltasigma as ds
from scipy.signal import lti
class TestSimulateSNR(unittest.TestCase):
"""Test class for simulateSNR()"""
def setUp(self):
pass
def test_simulateSNR_1(self):
"""Test function for simulateSNR() 1/4"""
# first test: f0 = 0
# Load test references
fname = pkg_resources.resource_filename(__name__,
"test_data/test_snr_amp.mat")
amp_ref = scipy.io.loadmat(fname)['amp'].reshape((-1,))
snr_ref = scipy.io.loadmat(fname)['snr'].reshape((-1,))
amp_user_ref = scipy.io.loadmat(fname)['amp_user'].reshape((-1,))
snr_user_ref = scipy.io.loadmat(fname)['snr_user'].reshape((-1,))
order = 4
osr = 256
nlev = 2
f0 = 0.22
Hinf = 1.25
form = 'CRFB'
ntf = ds.synthesizeNTF(order, osr, 2, Hinf, f0)
a1, g1, b1, c1 = ds.realizeNTF(ntf, form)
ABCD = ds.stuffABCD(a1, g1, b1, c1, form)
ABCD_ref = np.array([[1., -1.6252, 0, 0, -0.0789, 0.0789],
[1., -0.6252, 0, 0, -0.0756, 0.0756],
[0, 1., 1., -1.6252, -0.2758, 0.2758],
[0, 1., 1., -0.6252, 0.0843, -0.0843],
[0, 0, 0, 1., 1., 0]])
self.assertTrue(np.allclose(ABCD, ABCD_ref, atol=9e-5, rtol=1e-4))
# bonus test, mapABCD - realizeNTF - stuffABCD
a2, g2, b2, c2 = ds.mapABCD(ABCD, form)
self.assertTrue(np.allclose(a1, a2, atol=1e-5, rtol=1e-5))
self.assertTrue(np.allclose(g1, g2, atol=1e-5, rtol=1e-5))
self.assertTrue(np.allclose(b1, b2, atol=1e-5, rtol=1e-5))
self.assertTrue(np.allclose(c1, c2, atol=1e-5, rtol=1e-5))
# We do three tests:
# SNR from ABCD matrix
# SNR from NTF
# SNR from LTI obj with user specified amplitudes
snr, amp = ds.simulateSNR(ABCD, osr, None, f0, nlev)
self.assertTrue(np.allclose(snr, snr_ref, atol=1, rtol=5e-2))
self.assertTrue(np.allclose(amp, amp_ref, atol=5e-1, rtol=1e-2))
snr2, amp2 = ds.simulateSNR(ntf, osr, None, f0, nlev)
self.assertTrue(np.allclose(snr2, snr_ref, atol=1e-5, rtol=1e-5))
self.assertTrue(np.allclose(amp2, amp_ref, atol=1e-5, rtol=1e-5))
amp_user = np.linspace(-100, 0, 200)[::10]
snr_user, amp_user = ds.simulateSNR(lti(*ntf), osr=osr, amp=amp_user,
f0=f0, nlev=nlev)
self.assertTrue(np.allclose(snr_user, snr_user_ref[::10], atol=1e-5,
rtol=1e-5))
self.assertTrue(np.allclose(amp_user, amp_user_ref[::10], atol=1e-5,
rtol=1e-5))
def test_simulateSNR_2(self):
"""Test function for simulateSNR() 2/4"""
# next test: f0 = 0
# Load test references
fname = pkg_resources.resource_filename(__name__,
"test_data/test_snr_amp2.mat")
amp_ref = scipy.io.loadmat(fname)['amp'].reshape((-1,))
snr_ref = scipy.io.loadmat(fname)['snr'].reshape((-1,))
ABCD_ref = scipy.io.loadmat(fname)['ABCD'].reshape((4, 5))
order = 3
osr = 256
nlev = 2
f0 = 0.
Hinf = 1.25
form = 'CIFB'
ntf = ds.synthesizeNTF(order, osr, 2, Hinf, f0)
a1, g1, b1, c1 = ds.realizeNTF(ntf, form)
a1_ref = [0.008863535715733, 0.093216950269955, 0.444473912607388]
g1_ref = [9.035620546615189e-05]
b1_ref = [0.008863535715733, 0.093216950269955, 0.444473912607388, 1.]
c1_ref = [1., 1., 1.]
self.assertTrue(np.allclose(a1, a1_ref, atol=1e-9, rtol=5e-5))
self.assertTrue(np.allclose(g1, g1_ref, atol=1e-9, rtol=5e-5))
self.assertTrue(np.allclose(b1, b1_ref, atol=1e-9, rtol=1e-4))
self.assertTrue(np.allclose(c1, c1_ref, atol=1e-9, rtol=2e-5))
ABCD = ds.stuffABCD(a1, g1, b1, c1, form)
self.assertTrue(np.allclose(ABCD, ABCD_ref, atol=9e-5, rtol=1e-4))
snr, amp = ds.simulateSNR(ABCD, osr, None, f0, nlev)
self.assertTrue(np.allclose(snr, snr_ref, atol=1e-5, rtol=1e-5))
self.assertTrue(np.allclose(amp, amp_ref, atol=1e-5, rtol=1e-5))
def test_simulateSNR_3(self):
"""Test function for simulateSNR() 3/4"""
# next test: amp is a scalar
fname = pkg_resources.resource_filename(__name__,
"test_data/test_snr_amp2.mat")
amp_ref = scipy.io.loadmat(fname)['amp'].reshape((-1,))[0]
snr_ref = scipy.io.loadmat(fname)['snr'].reshape((-1,))[0]
ABCD = scipy.io.loadmat(fname)['ABCD'].reshape((4, 5))
order = 3
osr = 256
nlev = 2
f0 = 0.
Hinf = 1.25
form = 'CIFB'
ntf = ds.synthesizeNTF(order, osr, 2, Hinf, f0)
snr, amp = ds.simulateSNR(ABCD, osr, amp_ref, f0, nlev)
self.assertTrue(np.allclose(snr, snr_ref, atol=1e-5, rtol=1e-5))
self.assertTrue(np.allclose(amp, amp_ref, atol=1e-5, rtol=1e-5))
def test_simulateSNR_4(self):
"""Test function for simulateSNR() 4/4"""
SNR_ref = np.array([23.0421, 32.1100, 43.3758, 53.1791,
65.5504, 70.5023, 73.4608, 76.2416, 77.8770,
78.2733, 79.3729, 79.5728, 80.8729, 82.7461,
83.0723, 84.8488, 84.3327])
AMP_ref = np.array([-70, -60, -50, -40, -30, -20, -15, -10, -9, -8, -7,
-6, -5, -4, -3, -2, -1, 0])
order = 4
osr = 32
M = 8
NG = -50
ING = -10
f0 = 1./ 16
quadrature = 1
form = 'PFB'
nlev = M + 1
z0 = np.exp(1j*2*np.pi*f0)
bw = 1./ osr
delta = 2
FullScale = M
ntf0 = ds.synthesizeQNTF(order, osr, f0, NG, ING)
ABCD = ds.realizeQNTF(ntf0, form, True)
#print(ABCD)
#ds.PlotExampleSpectrum(ntf0, M, osr, f0, quadrature=True)
a, b = ds.simulateSNR(ABCD, osr, None, f0, nlev);
assert np.allclose(a[6:], SNR_ref, atol=10, rtol=1e-3)
assert np.allclose(b[5:], AMP_ref, atol=1)
| [
"deltasigma.realizeNTF",
"deltasigma.simulateSNR",
"deltasigma.mapABCD",
"numpy.allclose",
"pkg_resources.resource_filename",
"deltasigma.stuffABCD",
"numpy.array",
"deltasigma.synthesizeNTF",
"numpy.exp",
"deltasigma.realizeQNTF",
"numpy.linspace",
"deltasigma.synthesizeQNTF",
"scipy.signal... | [((1146, 1217), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""test_data/test_snr_amp.mat"""'], {}), "(__name__, 'test_data/test_snr_amp.mat')\n", (1177, 1217), False, 'import pkg_resources\n'), ((1671, 1712), 'deltasigma.synthesizeNTF', 'ds.synthesizeNTF', (['order', 'osr', '(2)', 'Hinf', 'f0'], {}), '(order, osr, 2, Hinf, f0)\n', (1687, 1712), True, 'import deltasigma as ds\n'), ((1738, 1762), 'deltasigma.realizeNTF', 'ds.realizeNTF', (['ntf', 'form'], {}), '(ntf, form)\n', (1751, 1762), True, 'import deltasigma as ds\n'), ((1778, 1812), 'deltasigma.stuffABCD', 'ds.stuffABCD', (['a1', 'g1', 'b1', 'c1', 'form'], {}), '(a1, g1, b1, c1, form)\n', (1790, 1812), True, 'import deltasigma as ds\n'), ((1833, 2036), 'numpy.array', 'np.array', (['[[1.0, -1.6252, 0, 0, -0.0789, 0.0789], [1.0, -0.6252, 0, 0, -0.0756, \n 0.0756], [0, 1.0, 1.0, -1.6252, -0.2758, 0.2758], [0, 1.0, 1.0, -0.6252,\n 0.0843, -0.0843], [0, 0, 0, 1.0, 1.0, 0]]'], {}), '([[1.0, -1.6252, 0, 0, -0.0789, 0.0789], [1.0, -0.6252, 0, 0, -\n 0.0756, 0.0756], [0, 1.0, 1.0, -1.6252, -0.2758, 0.2758], [0, 1.0, 1.0,\n -0.6252, 0.0843, -0.0843], [0, 0, 0, 1.0, 1.0, 0]])\n', (1841, 2036), True, 'import numpy as np\n'), ((2292, 2314), 'deltasigma.mapABCD', 'ds.mapABCD', (['ABCD', 'form'], {}), '(ABCD, form)\n', (2302, 2314), True, 'import deltasigma as ds\n'), ((2744, 2785), 'deltasigma.simulateSNR', 'ds.simulateSNR', (['ABCD', 'osr', 'None', 'f0', 'nlev'], {}), '(ABCD, osr, None, f0, nlev)\n', (2758, 2785), True, 'import deltasigma as ds\n'), ((2950, 2990), 'deltasigma.simulateSNR', 'ds.simulateSNR', (['ntf', 'osr', 'None', 'f0', 'nlev'], {}), '(ntf, osr, None, f0, nlev)\n', (2964, 2990), True, 'import deltasigma as ds\n'), ((3740, 3812), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""test_data/test_snr_amp2.mat"""'], {}), "(__name__, 'test_data/test_snr_amp2.mat')\n", (3771, 3812), False, 'import pkg_resources\n'), ((4183, 4224), 'deltasigma.synthesizeNTF', 'ds.synthesizeNTF', (['order', 'osr', '(2)', 'Hinf', 'f0'], {}), '(order, osr, 2, Hinf, f0)\n', (4199, 4224), True, 'import deltasigma as ds\n'), ((4251, 4275), 'deltasigma.realizeNTF', 'ds.realizeNTF', (['ntf', 'form'], {}), '(ntf, form)\n', (4264, 4275), True, 'import deltasigma as ds\n'), ((4801, 4835), 'deltasigma.stuffABCD', 'ds.stuffABCD', (['a1', 'g1', 'b1', 'c1', 'form'], {}), '(a1, g1, b1, c1, form)\n', (4813, 4835), True, 'import deltasigma as ds\n'), ((4930, 4971), 'deltasigma.simulateSNR', 'ds.simulateSNR', (['ABCD', 'osr', 'None', 'f0', 'nlev'], {}), '(ABCD, osr, None, f0, nlev)\n', (4944, 4971), True, 'import deltasigma as ds\n'), ((5256, 5328), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""test_data/test_snr_amp2.mat"""'], {}), "(__name__, 'test_data/test_snr_amp2.mat')\n", (5287, 5328), False, 'import pkg_resources\n'), ((5700, 5741), 'deltasigma.synthesizeNTF', 'ds.synthesizeNTF', (['order', 'osr', '(2)', 'Hinf', 'f0'], {}), '(order, osr, 2, Hinf, f0)\n', (5716, 5741), True, 'import deltasigma as ds\n'), ((5762, 5806), 'deltasigma.simulateSNR', 'ds.simulateSNR', (['ABCD', 'osr', 'amp_ref', 'f0', 'nlev'], {}), '(ABCD, osr, amp_ref, f0, nlev)\n', (5776, 5806), True, 'import deltasigma as ds\n'), ((6056, 6226), 'numpy.array', 'np.array', (['[23.0421, 32.11, 43.3758, 53.1791, 65.5504, 70.5023, 73.4608, 76.2416, \n 77.877, 78.2733, 79.3729, 79.5728, 80.8729, 82.7461, 83.0723, 84.8488, \n 84.3327]'], {}), '([23.0421, 32.11, 43.3758, 53.1791, 65.5504, 70.5023, 73.4608, \n 76.2416, 77.877, 78.2733, 79.3729, 79.5728, 80.8729, 82.7461, 83.0723, \n 84.8488, 84.3327])\n', (6064, 6226), True, 'import numpy as np\n'), ((6322, 6416), 'numpy.array', 'np.array', (['[-70, -60, -50, -40, -30, -20, -15, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0]'], {}), '([-70, -60, -50, -40, -30, -20, -15, -10, -9, -8, -7, -6, -5, -4, -\n 3, -2, -1, 0])\n', (6330, 6416), True, 'import numpy as np\n'), ((6623, 6652), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * f0)'], {}), '(1.0j * 2 * np.pi * f0)\n', (6629, 6652), True, 'import numpy as np\n'), ((6721, 6763), 'deltasigma.synthesizeQNTF', 'ds.synthesizeQNTF', (['order', 'osr', 'f0', 'NG', 'ING'], {}), '(order, osr, f0, NG, ING)\n', (6738, 6763), True, 'import deltasigma as ds\n'), ((6779, 6811), 'deltasigma.realizeQNTF', 'ds.realizeQNTF', (['ntf0', 'form', '(True)'], {}), '(ntf0, form, True)\n', (6793, 6811), True, 'import deltasigma as ds\n'), ((6915, 6956), 'deltasigma.simulateSNR', 'ds.simulateSNR', (['ABCD', 'osr', 'None', 'f0', 'nlev'], {}), '(ABCD, osr, None, f0, nlev)\n', (6929, 6956), True, 'import deltasigma as ds\n'), ((6973, 7021), 'numpy.allclose', 'np.allclose', (['a[6:]', 'SNR_ref'], {'atol': '(10)', 'rtol': '(0.001)'}), '(a[6:], SNR_ref, atol=10, rtol=0.001)\n', (6984, 7021), True, 'import numpy as np\n'), ((7036, 7071), 'numpy.allclose', 'np.allclose', (['b[5:]', 'AMP_ref'], {'atol': '(1)'}), '(b[5:], AMP_ref, atol=1)\n', (7047, 7071), True, 'import numpy as np\n'), ((2160, 2212), 'numpy.allclose', 'np.allclose', (['ABCD', 'ABCD_ref'], {'atol': '(9e-05)', 'rtol': '(0.0001)'}), '(ABCD, ABCD_ref, atol=9e-05, rtol=0.0001)\n', (2171, 2212), True, 'import numpy as np\n'), ((2339, 2382), 'numpy.allclose', 'np.allclose', (['a1', 'a2'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(a1, a2, atol=1e-05, rtol=1e-05)\n', (2350, 2382), True, 'import numpy as np\n'), ((2406, 2449), 'numpy.allclose', 'np.allclose', (['g1', 'g2'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(g1, g2, atol=1e-05, rtol=1e-05)\n', (2417, 2449), True, 'import numpy as np\n'), ((2473, 2516), 'numpy.allclose', 'np.allclose', (['b1', 'b2'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(b1, b2, atol=1e-05, rtol=1e-05)\n', (2484, 2516), True, 'import numpy as np\n'), ((2540, 2583), 'numpy.allclose', 'np.allclose', (['c1', 'c2'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(c1, c2, atol=1e-05, rtol=1e-05)\n', (2551, 2583), True, 'import numpy as np\n'), ((2810, 2854), 'numpy.allclose', 'np.allclose', (['snr', 'snr_ref'], {'atol': '(1)', 'rtol': '(0.05)'}), '(snr, snr_ref, atol=1, rtol=0.05)\n', (2821, 2854), True, 'import numpy as np\n'), ((2880, 2926), 'numpy.allclose', 'np.allclose', (['amp', 'amp_ref'], {'atol': '(0.5)', 'rtol': '(0.01)'}), '(amp, amp_ref, atol=0.5, rtol=0.01)\n', (2891, 2926), True, 'import numpy as np\n'), ((3015, 3065), 'numpy.allclose', 'np.allclose', (['snr2', 'snr_ref'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(snr2, snr_ref, atol=1e-05, rtol=1e-05)\n', (3026, 3065), True, 'import numpy as np\n'), ((3089, 3139), 'numpy.allclose', 'np.allclose', (['amp2', 'amp_ref'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(amp2, amp_ref, atol=1e-05, rtol=1e-05)\n', (3100, 3139), True, 'import numpy as np\n'), ((3158, 3183), 'numpy.linspace', 'np.linspace', (['(-100)', '(0)', '(200)'], {}), '(-100, 0, 200)\n', (3169, 3183), True, 'import numpy as np\n'), ((3234, 3243), 'scipy.signal.lti', 'lti', (['*ntf'], {}), '(*ntf)\n', (3237, 3243), False, 'from scipy.signal import lti\n'), ((3354, 3419), 'numpy.allclose', 'np.allclose', (['snr_user', 'snr_user_ref[::10]'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(snr_user, snr_user_ref[::10], atol=1e-05, rtol=1e-05)\n', (3365, 3419), True, 'import numpy as np\n'), ((3479, 3544), 'numpy.allclose', 'np.allclose', (['amp_user', 'amp_user_ref[::10]'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(amp_user, amp_user_ref[::10], atol=1e-05, rtol=1e-05)\n', (3490, 3544), True, 'import numpy as np\n'), ((4525, 4572), 'numpy.allclose', 'np.allclose', (['a1', 'a1_ref'], {'atol': '(1e-09)', 'rtol': '(5e-05)'}), '(a1, a1_ref, atol=1e-09, rtol=5e-05)\n', (4536, 4572), True, 'import numpy as np\n'), ((4596, 4643), 'numpy.allclose', 'np.allclose', (['g1', 'g1_ref'], {'atol': '(1e-09)', 'rtol': '(5e-05)'}), '(g1, g1_ref, atol=1e-09, rtol=5e-05)\n', (4607, 4643), True, 'import numpy as np\n'), ((4667, 4715), 'numpy.allclose', 'np.allclose', (['b1', 'b1_ref'], {'atol': '(1e-09)', 'rtol': '(0.0001)'}), '(b1, b1_ref, atol=1e-09, rtol=0.0001)\n', (4678, 4715), True, 'import numpy as np\n'), ((4738, 4785), 'numpy.allclose', 'np.allclose', (['c1', 'c1_ref'], {'atol': '(1e-09)', 'rtol': '(2e-05)'}), '(c1, c1_ref, atol=1e-09, rtol=2e-05)\n', (4749, 4785), True, 'import numpy as np\n'), ((4860, 4912), 'numpy.allclose', 'np.allclose', (['ABCD', 'ABCD_ref'], {'atol': '(9e-05)', 'rtol': '(0.0001)'}), '(ABCD, ABCD_ref, atol=9e-05, rtol=0.0001)\n', (4871, 4912), True, 'import numpy as np\n'), ((4996, 5045), 'numpy.allclose', 'np.allclose', (['snr', 'snr_ref'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(snr, snr_ref, atol=1e-05, rtol=1e-05)\n', (5007, 5045), True, 'import numpy as np\n'), ((5069, 5118), 'numpy.allclose', 'np.allclose', (['amp', 'amp_ref'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(amp, amp_ref, atol=1e-05, rtol=1e-05)\n', (5080, 5118), True, 'import numpy as np\n'), ((5831, 5880), 'numpy.allclose', 'np.allclose', (['snr', 'snr_ref'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(snr, snr_ref, atol=1e-05, rtol=1e-05)\n', (5842, 5880), True, 'import numpy as np\n'), ((5904, 5953), 'numpy.allclose', 'np.allclose', (['amp', 'amp_ref'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(amp, amp_ref, atol=1e-05, rtol=1e-05)\n', (5915, 5953), True, 'import numpy as np\n')] |
import numpy as np
from skimage.transform import resize
from segmentation_net.tf_record import _bytes_feature, _int64_feature
# from Preprocessing.Normalization import PrepNormalizer
from useful_wsi import get_image
def generate_unet_possible(i):
"""
I was a bit lazy and instead of deriving the formula, I just simulated possible size...
Parameters
----------
i: integer,
correspond to width (and height as it is square) of the lowest resolution encoding 3D feature map.
Returns
-------
A possible size from integer i.
"""
def block(j):
"""
Increase resolution for a convolution block
"""
return (j+4) * 2
return block(block(block(block(i)))) + 4
def possible_values(n):
"""
Generates a list of possible values for unet resolution sizes from a list from 0 to n-1.
Parameters
----------
n: integer, size
Returns
-------
A list of possible values for the unet model.
"""
x = range(n)
return list(map(generate_unet_possible, x))
def closest_number(val, num_list):
"""
Return closest element to val in num_list.
Parameters
----------
val: integer, float,
value to find closest element from num_list.
num_list: list,
list from which to find the closest element.
Returns
-------
A element of num_list.
"""
return min(num_list, key=lambda x: abs(x-val))
def get_input(l):
"""
Slices an input of list_pos (which is a list of list..)
"""
_, x, y, w, h, l, _ = l.split(' ') #first is the line number, last one is name
return int(x), int(y), int(w), int(h), int(l)
class resizer_unet:
"""
Class to deal with the image resizing to correspond to the correst width and height of the unet.
Parameters
----------
slide: string,
wsi raw data.
inputs: list,
parameter list
Returns
-------
object with methods for resizing image for unet and resize back
"""
def __init__(self, slide, inputs):#, NORM):
"""
Slices crop from the slide, converts it to array and records orginal shape.
Generates a list to 300 and find the closest shape. (hopefully 300 is big enough)
"""
image = get_image(slide, inputs)
image = np.array(image)[:, :, 0:3]
self.original_image = image
self.x_orig, self.y_orig = self.original_image.shape[0:2]
self.possible_unet = possible_values(300)
self.closest_unet_shape()
#self.n = NORM
def prep_image(self, image):
"""
Preparing the image.
"""
image = self.preprocess(image)
return image
def closest_unet_shape(self):
"""
Finds closests unet shape.
"""
self.x_new = closest_number(self.x_orig, self.possible_unet)
self.y_new = closest_number(self.y_orig, self.possible_unet)
self.x_lab, self.y_lab = self.x_orig - 92 * 2, self.y_orig - 92 * 2
def transform_for_analyse(self):
"""
Transform the image to correct unet size and preps the image.
"""
img = self.original_image.copy()
if img.shape[0:2] != (self.x_new, self.y_new):
img = resize(img, (self.x_new, self.y_new), preserve_range=True).astype(img.dtype)
img = self.prep_image(img)
return img
def transform_back_pred(self, image):
"""
To transform back.
"""
if image.shape[0:2] != (self.x_lab, self.y_lab):
image = resize(image, (self.x_lab, self.y_lab),
order=0, preserve_range=True).astype(image.dtype)
else:
print("not resizeing")
return image
def preprocess(self, image):
"""
Here for preprocessing
"""
# transform = False
# if image.mean() > 230:
# if image.std() < 10:
# transform = False
# if transform:
# image = self.n.transform(image)
return image
def get_image_from_slide(slide, inp):#, n):
"""
I was a bit lazy and instead of deriving the formula, I just simulated possible size...
Parameters
----------
slide: string,
string corresponding to the path to the raw wsi.
inp: list,
input parameter corresponding to a list of elements
model: keras model,
Returns
-------
A the raw image and the segmentation.
A resized img for segmentation.
"""
img_obj = resizer_unet(slide, inp)#, n)
return img_obj.transform_for_analyse()
| [
"skimage.transform.resize",
"numpy.array",
"useful_wsi.get_image"
] | [((2295, 2319), 'useful_wsi.get_image', 'get_image', (['slide', 'inputs'], {}), '(slide, inputs)\n', (2304, 2319), False, 'from useful_wsi import get_image\n'), ((2336, 2351), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2344, 2351), True, 'import numpy as np\n'), ((3273, 3331), 'skimage.transform.resize', 'resize', (['img', '(self.x_new, self.y_new)'], {'preserve_range': '(True)'}), '(img, (self.x_new, self.y_new), preserve_range=True)\n', (3279, 3331), False, 'from skimage.transform import resize\n'), ((3575, 3644), 'skimage.transform.resize', 'resize', (['image', '(self.x_lab, self.y_lab)'], {'order': '(0)', 'preserve_range': '(True)'}), '(image, (self.x_lab, self.y_lab), order=0, preserve_range=True)\n', (3581, 3644), False, 'from skimage.transform import resize\n')] |
import os
import time
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
# import the training utilities
from model_utils import load_data_set, train
# define the methods
methods = {'Kumaraswamy', 'Nalisnick', 'Dirichlet', 'Softmax', 'KingmaM2'}
# specify if you want to save plots (other than learning curve)--might be slower
SAVE_PLOTS = False
# define the architectures
architectures = {
'mnist': {
'2c_2d': {
'enc_arch': {'conv': [{'k_size': 5, 'out_chan': 5}, {'k_size': 3, 'out_chan': 10}],
'full': [200, 200]},
'dec_arch': {'full': [200, 200],
'conv_start_chans': 10,
'conv': [{'k_size': 3, 'out_chan': 5}, {'k_size': 5, 'out_chan': 1}]},
'learn_rate': 1e-3
},
},
'svhn_cropped': {
'2c_2d': {
'enc_arch': {'conv': [{'k_size': 5, 'out_chan': 15}, {'k_size': 3, 'out_chan': 30}],
'full': [200, 200]},
'dec_arch': {'full': [200, 200],
'conv_start_chans': 30,
'conv': [{'k_size': 3, 'out_chan': 15}, {'k_size': 5, 'out_chan': 3}]},
'learn_rate': 1e-4
},
},
}
if __name__ == '__main__':
# model assumptions
data_model = 'Gaussian'
covariance_structure = 'diag'
# training constants
n_epochs = 750
b_size = 250
# add parser arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--num_runs', type=int, default=4, help='number of runs')
parser.add_argument('--data_set', type=str, default='svhn_cropped', help='data set name = {mnist, svhn_cropped}')
parser.add_argument('--dir_prefix', type=str, default='new_results_ss_', help='results directory prefix')
parser.add_argument('--num_labelled', type=int, default=1000, help='number of labels')
parser.add_argument('--dim_z', type=int, default=50, help='latent encoding dimensions')
# parse the arguments
args = parser.parse_args()
print('Num. runs = {:d}'.format(args.num_runs))
print('Data set = ', args.data_set)
print('Num. labelled = {:d}'.format(args.num_labelled))
print('Latent dims = {:d}'.format(args.dim_z))
# if results directory doesn't yet exist for this data set, make one
dir_results = os.path.join(os.getcwd(), args.dir_prefix + args.data_set)
if not os.path.exists(dir_results):
os.mkdir(dir_results)
# loop over the runs
cnt = 0
for _ in range(args.num_runs):
# get use the time for this seed
seed = int(time.time())
# make a fresh directory for this run
dir_run = str(seed)
dir_run = os.path.join(dir_results, dir_run)
os.mkdir(dir_run)
# make the method directory
dir_labels = os.path.join(dir_run, 'num_labelled_' + str(args.num_labelled))
os.mkdir(dir_labels)
# loop over the methods
for method in methods:
# make the method directory
dir_method = os.path.join(dir_labels, method)
os.mkdir(dir_method)
# loop over the architectures
for arch in architectures[args.data_set]:
# make the architecture directory
dir_arch = os.path.join(dir_method, arch)
os.mkdir(dir_arch)
# skip Kingma M2 method if dim(z) == 0 since that model does not support this configuration
if method == 'KingmaM2' and args.dim_z == 0:
continue
# make the latent dimension directory
dir_dim_z = os.path.join(dir_arch, 'dim_z_' + str(args.dim_z))
os.mkdir(dir_dim_z)
# print update
cnt += 1
print('\n' + '*' * 100)
print('num labels =', args.num_labelled,
'| method =', method,
'| arch =', arch,
'| dim_z =', args.dim_z)
# set random seed
np.random.seed(seed)
tf.random.set_random_seed(seed)
# load the data set (custom split method)
unlabelled_set, labelled_set, valid_set, test_set, set_info = load_data_set(
data_set_name=args.data_set,
px_z=data_model,
num_validation=10000,
num_labelled=args.num_labelled,
balanced=True,
batch_size=b_size)
# configure the common VAE elements
config = {
'dim_x': list(set_info.features['image'].shape),
'num_classes': set_info.features['label'].num_classes,
'dim_z': args.dim_z,
'K': set_info.features['label'].num_classes,
'enc_arch': architectures[args.data_set][arch]['enc_arch'],
'dec_arch': architectures[args.data_set][arch]['dec_arch'],
'learn_rate': architectures[args.data_set][arch]['learn_rate'],
'px_z': data_model,
'covariance_structure': covariance_structure,
'dropout_rate': 0.0,
'save_dir': dir_dim_z,
'save_plots': SAVE_PLOTS}
# run training
train(method=method,
config=config,
unlabelled_set=unlabelled_set,
labelled_set=labelled_set,
valid_set=valid_set,
test_set=test_set,
n_epochs=n_epochs)
# reset the graph
tf.reset_default_graph()
# close all plots
plt.close('all')
print('\n' + '*' * 100)
print('Completed {:d} trainings!'.format(cnt))
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.getcwd",
"tensorflow.reset_default_graph",
"model_utils.load_data_set",
"matplotlib.pyplot.close",
"os.path.exists",
"model_utils.train",
"time.time",
"os.path.join",
"tensorflow.random.set_random_seed"
] | [((1503, 1528), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1526, 1528), False, 'import argparse\n'), ((2388, 2399), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2397, 2399), False, 'import os\n'), ((2445, 2472), 'os.path.exists', 'os.path.exists', (['dir_results'], {}), '(dir_results)\n', (2459, 2472), False, 'import os\n'), ((2482, 2503), 'os.mkdir', 'os.mkdir', (['dir_results'], {}), '(dir_results)\n', (2490, 2503), False, 'import os\n'), ((2744, 2778), 'os.path.join', 'os.path.join', (['dir_results', 'dir_run'], {}), '(dir_results, dir_run)\n', (2756, 2778), False, 'import os\n'), ((2787, 2804), 'os.mkdir', 'os.mkdir', (['dir_run'], {}), '(dir_run)\n', (2795, 2804), False, 'import os\n'), ((2935, 2955), 'os.mkdir', 'os.mkdir', (['dir_labels'], {}), '(dir_labels)\n', (2943, 2955), False, 'import os\n'), ((2638, 2649), 'time.time', 'time.time', ([], {}), '()\n', (2647, 2649), False, 'import time\n'), ((3086, 3118), 'os.path.join', 'os.path.join', (['dir_labels', 'method'], {}), '(dir_labels, method)\n', (3098, 3118), False, 'import os\n'), ((3131, 3151), 'os.mkdir', 'os.mkdir', (['dir_method'], {}), '(dir_method)\n', (3139, 3151), False, 'import os\n'), ((3327, 3357), 'os.path.join', 'os.path.join', (['dir_method', 'arch'], {}), '(dir_method, arch)\n', (3339, 3357), False, 'import os\n'), ((3374, 3392), 'os.mkdir', 'os.mkdir', (['dir_arch'], {}), '(dir_arch)\n', (3382, 3392), False, 'import os\n'), ((3742, 3761), 'os.mkdir', 'os.mkdir', (['dir_dim_z'], {}), '(dir_dim_z)\n', (3750, 3761), False, 'import os\n'), ((4098, 4118), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4112, 4118), True, 'import numpy as np\n'), ((4135, 4166), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['seed'], {}), '(seed)\n', (4160, 4166), True, 'import tensorflow as tf\n'), ((4304, 4456), 'model_utils.load_data_set', 'load_data_set', ([], {'data_set_name': 'args.data_set', 'px_z': 'data_model', 'num_validation': '(10000)', 'num_labelled': 'args.num_labelled', 'balanced': '(True)', 'batch_size': 'b_size'}), '(data_set_name=args.data_set, px_z=data_model, num_validation=\n 10000, num_labelled=args.num_labelled, balanced=True, batch_size=b_size)\n', (4317, 4456), False, 'from model_utils import load_data_set, train\n'), ((5431, 5591), 'model_utils.train', 'train', ([], {'method': 'method', 'config': 'config', 'unlabelled_set': 'unlabelled_set', 'labelled_set': 'labelled_set', 'valid_set': 'valid_set', 'test_set': 'test_set', 'n_epochs': 'n_epochs'}), '(method=method, config=config, unlabelled_set=unlabelled_set,\n labelled_set=labelled_set, valid_set=valid_set, test_set=test_set,\n n_epochs=n_epochs)\n', (5436, 5591), False, 'from model_utils import load_data_set, train\n'), ((5767, 5791), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5789, 5791), True, 'import tensorflow as tf\n'), ((5843, 5859), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5852, 5859), True, 'from matplotlib import pyplot as plt\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as style
def autolabel(rects, plot_axes):
"""
Attach a text label above each bar displaying its width
"""
totals = []
for i in rects:
totals.append(i.get_width())
total = sum(totals)
for rect in rects[:-1]:
height = rect.get_height()
if rect.get_width() > 0:
plot_axes.text(rect.get_width(), rect.get_y() + height/2, "%.2f" % rect.get_width(), fontsize=7, color='black', alpha=0.8, ha='center', va='bottom')
plot_axes.text(rects[-1].get_width(), rects[-1].get_y() + height/2, "%.2f" % rects[-1].get_width(), fontsize=7, ha='center', va='bottom', weight='bold', style='italic')
def get_geometric_mean(dataset, metric):
"""
Habibs geometric mean
"""
import numpy as np
import math
zeroes = []
non_zeroes = []
sum_of_logs = 0.0
for index, row in dataset.iterrows():
if row[metric] > 0:
non_zeroes.append(row[metric])
sum_of_logs += np.log2(row[metric])
else:
zeroes.append(row[metric])
m = len(zeroes)
n = len(non_zeroes)
nbynplusm = n/(n + m)
right_side_of_exp = (1/(n + m)) * sum_of_logs
exp_value = math.exp(right_side_of_exp)
geometric_mean = nbynplusm * exp_value
return geometric_mean
style.use(['ggplot', 'fivethirtyeight'])
colors = ['#DA7C30', '#396AB1', '#CC2529', '#47CDDA']
c2f_main = pd.read_csv('../docker_reports/Code2flow.csv')
c2f = c2f_main[['Category', 'Precision']]
pyan_main = pd.read_csv('../docker_reports/Pyan.csv')
pyan = pyan_main[['Category', 'Precision']]
walaNCFA_main = pd.read_csv('../docker_reports/WalaNCFA.csv')
walaNCFA = walaNCFA_main[['Category', 'Precision']]
c2f_mean = c2f.groupby(['Category'], as_index=False).mean()
# c2f_mean.loc[len(c2f_mean)] = ['Weighted Average', get_weighted_geometric_mean(c2f_main)]
c2f_mean.loc[len(c2f_mean)] = ['Average', get_geometric_mean(c2f_main, "Precision")]
pyan_mean = pyan.groupby(['Category'], as_index=False).mean()
# pyan_mean.loc[len(pyan_mean)] = ['Weighted Average', get_weighted_geometric_mean(pyan_main)]
pyan_mean.loc[len(pyan_mean)] = ['Average', get_geometric_mean(pyan_main, "Precision")]
walaNCFA_mean = walaNCFA.groupby(['Category'], as_index=False).mean()
# walaNCFA_mean.loc[len(walaNCFA_mean)] = ['Weighted Average', get_weighted_geometric_mean(walaNCFA_main)]
walaNCFA_mean.loc[len(walaNCFA_mean)] = ['Average', get_geometric_mean(walaNCFA_main, "Precision")]
c2f_precision = c2f_mean[['Category', 'Precision']].copy()
c2f_precision.replace({"code_generation": "run_time_code_generation"}, inplace=True)
pyan_precision = pyan_mean[['Category', 'Precision']].copy()
pyan_precision.replace({"code_generation": "run_time_code_generation"}, inplace=True)
walaNCFA_precision = walaNCFA_mean[['Category', 'Precision']].copy()
walaNCFA_precision.replace({"code_generation": "run_time_code_generation"}, inplace=True)
label_fontsize = 10
title_fontsize = 11
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, sharey=True, figsize=(9, 4))
c2f_precision.plot(kind='barh', y='Precision', x='Category', color=colors[0], alpha=0.6, ax=ax0)
ax0.set_title('Code2flow', fontsize=title_fontsize)
ax0.set_xlabel('Precision', fontsize=label_fontsize)
ax0.set_ylabel('Benchmark Category', fontsize=label_fontsize)
ax0.set_xlim([0, 1])
pyan_precision.plot(kind='barh', y='Precision', x='Category', color=colors[1], alpha=0.6, ax=ax1)
ax1.set_title('Pyan', fontsize=title_fontsize)
ax1.set_xlabel('Precision', fontsize=label_fontsize)
ax1.set_xlim([0, 1])
walaNCFA_precision.plot(kind='barh', y='Precision', x='Category', color=colors[2], alpha=0.6, ax=ax2)
ax2.set_title('Wala NCFA', fontsize=title_fontsize)
ax2.set_xlabel('Precision', fontsize=label_fontsize)
ax2.set_xlim([0, 1])
ax0.legend().set_visible(False)
ax1.legend().set_visible(False)
ax2.legend().set_visible(False)
tick_label_size = 8
ax0.tick_params(labelsize=tick_label_size)
ax1.tick_params(labelsize=tick_label_size)
ax2.tick_params(labelsize=tick_label_size)
#Setting weight for Average row
ylabels = ax0.get_yticklabels()
modified_ylabels = []
for i in ylabels:
if 'Average' in i.get_text():
i.set_weight("bold")
i.set_style("italic")
modified_ylabels.append(i)
ax0.set_yticklabels(modified_ylabels)
#Adding values next to the bars
autolabel(ax0.patches, ax0)
autolabel(ax1.patches, ax1)
autolabel(ax2.patches, ax2)
# autolabel(ax3.patches, ax3)
fig.savefig('precision_synthetic_test.png', transparent=False, dpi=150, bbox_inches="tight") | [
"math.exp",
"matplotlib.style.use",
"pandas.read_csv",
"numpy.log2",
"matplotlib.pyplot.subplots"
] | [((1353, 1393), 'matplotlib.style.use', 'style.use', (["['ggplot', 'fivethirtyeight']"], {}), "(['ggplot', 'fivethirtyeight'])\n", (1362, 1393), True, 'import matplotlib.style as style\n'), ((1461, 1507), 'pandas.read_csv', 'pd.read_csv', (['"""../docker_reports/Code2flow.csv"""'], {}), "('../docker_reports/Code2flow.csv')\n", (1472, 1507), True, 'import pandas as pd\n'), ((1562, 1603), 'pandas.read_csv', 'pd.read_csv', (['"""../docker_reports/Pyan.csv"""'], {}), "('../docker_reports/Pyan.csv')\n", (1573, 1603), True, 'import pandas as pd\n'), ((1664, 1709), 'pandas.read_csv', 'pd.read_csv', (['"""../docker_reports/WalaNCFA.csv"""'], {}), "('../docker_reports/WalaNCFA.csv')\n", (1675, 1709), True, 'import pandas as pd\n'), ((3039, 3098), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'sharey': '(True)', 'figsize': '(9, 4)'}), '(nrows=1, ncols=3, sharey=True, figsize=(9, 4))\n', (3051, 3098), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1281), 'math.exp', 'math.exp', (['right_side_of_exp'], {}), '(right_side_of_exp)\n', (1262, 1281), False, 'import math\n'), ((1044, 1064), 'numpy.log2', 'np.log2', (['row[metric]'], {}), '(row[metric])\n', (1051, 1064), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
def lqr_inf(Fm, fv, Cm, cv, discount=0.99, K=100):
"""
Infinite Horizon LQR
"""
K, k, Vxx, Vx, Qtt, Qt = lqr_fin(K, Fm, fv, Cm, cv, discount=discount)
return K[0], k[0], Vxx[0], Vx[0], Qtt[0], Qt[0]
def lqr_fin(T, Fm, fv, Cm, cv, discount=1.0):
"""
Discrete time, finite horizon LQR solver
The dynamical system is described as
x_{t+1} = Fm * x_t + fv
The cost function is
0.5 * [x, u]^T * Cm * [x, u] + cv * [x, u]
Args:
T (int): Time horizon
Fm (np.ndarray): A dX x (dX+dU) dynamics matrix
fv (np.ndarray): A dX x 1 dynamics bias
Cm (np.ndarray): A (dX+dU) x (dX+dU) quadratic cost term
cv (np.ndarray): A (dX+dU) x 1 linear cost term
Returns:
K: Policy parameters (linear term)
k: Policy parameters (bias term)
Vxx: Value function (quadratic term). The value is given by 0.5*x^T*Vxx*x + x^T*Vx (constant term is ignored)
Vx: Value function (linear term)
Qtt: Q-value function (quadratic term). The Q-value is given by 0.5*[x, u]^T*Qtt*[x, u] + [x, u]^T*Qt (constant term is ignored)
Qt: Q-value function (linear term)
"""
dX, dXdU = Fm.shape
dU = dXdU - dX
idx_x = slice(0, dX)
idx_u = slice(dX, dX+dU)
Vxx = np.zeros((T, dX, dX))
Vx = np.zeros((T, dX))
Qtt = np.zeros((T, dX+dU, dX+dU))
Qt = np.zeros((T, dX+dU))
K = np.zeros((T, dU, dX))
k = np.zeros((T, dU))
# Compute state-action-state function at each time step.
for t in range(T - 1, -1, -1):
# Add in the cost.
Qtt[t] = Cm[:,:] # (X+U) x (X+U)
Qt[t] = cv[:] # (X+U) x 1
# Add in the value function from the next time step.
if t < T - 1:
Qtt[t] += Fm.T.dot(discount*Vxx[t+1, :, :]).dot(Fm)
Qt[t] += Fm.T.dot(discount*Vx[t+1, :] + discount*Vxx[t+1, :, :].dot(fv))
# Symmetrize quadratic component.
Qtt[t] = 0.5 * (Qtt[t] + Qtt[t].T)
inv_term = Qtt[t, idx_u, idx_u]
k_term = Qt[t, idx_u]
K_term = Qtt[t, idx_u, idx_x]
# Compute Cholesky decomposition of Q function action
# component.
U = sp.linalg.cholesky(inv_term)
L = U.T
# Compute mean terms
k[t, :] = -sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, k_term, lower=True)
)
K[t, :, :] = -sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, K_term, lower=True)
)
Vxx[t, :, :] = Qtt[t, idx_x, idx_x] + \
Qtt[t, idx_x, idx_u].dot(K[t, :, :])
Vx[t, :] = Qt[t, idx_x] + \
Qtt[t, idx_x, idx_u].dot(k[t, :])
Vxx[t, :, :] = 0.5 * (Vxx[t, :, :] + Vxx[t, :, :].T)
return K, k, Vxx, Vx, Qtt, Qt
def solve_lqr_env(lqrenv, T=None, discount=1.0, solve_itrs=500):
#Fm, fv, Cm, cv
dX, dU = lqrenv.dO, lqrenv.dA
Fm = lqrenv.dynamics
fv = np.zeros(dX)
Cm = np.zeros((dX+dU, dX+dU))
Cm[0:dX, 0:dX] = lqrenv.rew_Q
Cm[dX:dX+dU, dX:dX+dU] = lqrenv.rew_R
Cm = -2*Cm
cv = np.zeros((dX+dU))
cv[0:dX] = -lqrenv.rew_q
if T is None:
K, k, V, v, Q, q = lqr_inf(Fm, fv, Cm, cv, discount=discount, K=solve_itrs)
else:
K, k, V, v, Q, q = lqr_fin(T, Fm, fv, Cm, cv, discount=discount)
return K, k, -0.5*V, -v, -0.5*Q, -q
| [
"scipy.linalg.cholesky",
"numpy.zeros",
"scipy.linalg.solve_triangular"
] | [((1322, 1343), 'numpy.zeros', 'np.zeros', (['(T, dX, dX)'], {}), '((T, dX, dX))\n', (1330, 1343), True, 'import numpy as np\n'), ((1353, 1370), 'numpy.zeros', 'np.zeros', (['(T, dX)'], {}), '((T, dX))\n', (1361, 1370), True, 'import numpy as np\n'), ((1381, 1412), 'numpy.zeros', 'np.zeros', (['(T, dX + dU, dX + dU)'], {}), '((T, dX + dU, dX + dU))\n', (1389, 1412), True, 'import numpy as np\n'), ((1418, 1440), 'numpy.zeros', 'np.zeros', (['(T, dX + dU)'], {}), '((T, dX + dU))\n', (1426, 1440), True, 'import numpy as np\n'), ((1447, 1468), 'numpy.zeros', 'np.zeros', (['(T, dU, dX)'], {}), '((T, dU, dX))\n', (1455, 1468), True, 'import numpy as np\n'), ((1477, 1494), 'numpy.zeros', 'np.zeros', (['(T, dU)'], {}), '((T, dU))\n', (1485, 1494), True, 'import numpy as np\n'), ((2978, 2990), 'numpy.zeros', 'np.zeros', (['dX'], {}), '(dX)\n', (2986, 2990), True, 'import numpy as np\n'), ((3001, 3029), 'numpy.zeros', 'np.zeros', (['(dX + dU, dX + dU)'], {}), '((dX + dU, dX + dU))\n', (3009, 3029), True, 'import numpy as np\n'), ((3127, 3144), 'numpy.zeros', 'np.zeros', (['(dX + dU)'], {}), '(dX + dU)\n', (3135, 3144), True, 'import numpy as np\n'), ((2218, 2246), 'scipy.linalg.cholesky', 'sp.linalg.cholesky', (['inv_term'], {}), '(inv_term)\n', (2236, 2246), True, 'import scipy as sp\n'), ((2355, 2404), 'scipy.linalg.solve_triangular', 'sp.linalg.solve_triangular', (['L', 'k_term'], {'lower': '(True)'}), '(L, k_term, lower=True)\n', (2381, 2404), True, 'import scipy as sp\n'), ((2480, 2529), 'scipy.linalg.solve_triangular', 'sp.linalg.solve_triangular', (['L', 'K_term'], {'lower': '(True)'}), '(L, K_term, lower=True)\n', (2506, 2529), True, 'import scipy as sp\n')] |
import numpy as np
import torch
import logging
import pickle
from dataclasses import dataclass, field
from typing import List, Optional
from skimage.filters import threshold_otsu
from cellmincer.opto_utils import crop_center
logger = logging.getLogger()
@dataclass
class PaddedMovieTorch:
t_padding: int
x_padding: int
y_padding: int
original_n_frames: int
original_width: int
original_height: int
padded_movie_txy: torch.Tensor
def mean_xy(self):
return self.padded_movie_txy.mean(0)
def std_xy(self):
return self.padded_movie_txy.std(0)
@dataclass
class OptopatchGlobalFeatureContainer:
norm_scale: float = 1.0
feature_name_list: List[str] = field(default_factory=list)
feature_depth_list: List[int] = field(default_factory=list)
feature_array_list: List[np.ndarray] = field(default_factory=list)
def unpad_frame(frame_xy: torch.Tensor, padded_movie: PaddedMovieTorch) -> torch.Tensor:
return frame_xy[
padded_movie.x_padding:(padded_movie.x_padding + padded_movie.original_width),
padded_movie.y_padding:(padded_movie.y_padding + padded_movie.original_height)]
def is_power_of_two(x: int) -> bool:
return (x & (x - 1) == 0) and x != 0
def smallest_power_of_two(x: int) -> int:
return 2 ** (x-1).bit_length()
def generate_padded_movie(
orig_movie_txy_np: np.ndarray,
min_t_padding: int,
min_x_padding: int,
min_y_padding: int,
padding_mode: str,
power_of_two: bool,
device: torch.device,
dtype=torch.dtype) -> PaddedMovieTorch:
original_n_frames = orig_movie_txy_np.shape[0]
original_width = orig_movie_txy_np.shape[1]
original_height = orig_movie_txy_np.shape[2]
assert original_width % 2 == 0
assert original_height % 2 == 0
padded_n_frames = original_n_frames + 2 * min_t_padding
padded_width = original_width + 2 * min_x_padding
padded_height = original_height + 2 * min_y_padding
if power_of_two:
assert is_power_of_two(original_width)
assert is_power_of_two(original_height)
x_padding = smallest_power_of_two(min_x_padding)
y_padding = smallest_power_of_two(min_y_padding)
t_padding = min_t_padding
else:
x_padding = min_x_padding
y_padding = min_y_padding
t_padding = min_t_padding
padded_movie_txy_np = np.pad(
orig_movie_txy_np,
pad_width=((t_padding, t_padding), (x_padding, x_padding), (y_padding, y_padding)),
mode=padding_mode)
padded_movie_txy_torch = torch.tensor(
padded_movie_txy_np,
device=device,
dtype=dtype)
return PaddedMovieTorch(
t_padding=t_padding,
x_padding=x_padding,
y_padding=y_padding,
original_n_frames=original_n_frames,
original_width=original_width,
original_height=original_height,
padded_movie_txy=padded_movie_txy_torch)
def get_trend_movie(
padded_movie: PaddedMovieTorch,
order: int,
trend_func: str = 'mean') -> torch.Tensor:
assert padded_movie.t_padding >= order
trend_movie_txy = torch.zeros(
(padded_movie.original_n_frames + 2 * padded_movie.t_padding - 2 * order,
padded_movie.original_width + 2 * padded_movie.x_padding,
padded_movie.original_height + 2 * padded_movie.y_padding),
device=padded_movie.padded_movie_txy.device,
dtype=padded_movie.padded_movie_txy.dtype)
# calculate temporal moving average
if trend_func == 'mean':
for i_t in range(trend_movie_txy.shape[0]):
trend_movie_txy[i_t, ...] = torch.mean(
padded_movie.padded_movie_txy[i_t:(i_t + 2 * order + 1), ...],
dim=0)
elif trend_func == 'median':
for i_t in range(trend_movie_txy.shape[0]):
trend_movie_txy[i_t, ...] = torch.median(
padded_movie.padded_movie_txy[i_t:(i_t + 2 * order + 1), ...],
dim=0)[0]
return PaddedMovieTorch(
t_padding=padded_movie.t_padding - order,
x_padding=padded_movie.x_padding,
y_padding=padded_movie.y_padding,
original_n_frames=padded_movie.original_n_frames,
original_width=padded_movie.original_width,
original_height=padded_movie.original_height,
padded_movie_txy=trend_movie_txy)
def calculate_cross(
padded_movie: PaddedMovieTorch,
trend_movie: Optional[PaddedMovieTorch],
dt: int,
dx: int,
dy: int,
normalize: bool) -> torch.Tensor:
assert abs(dt) <= padded_movie.t_padding
assert abs(dx) <= padded_movie.x_padding
assert abs(dy) <= padded_movie.y_padding
displaced_movie_txy = padded_movie.padded_movie_txy[
(padded_movie.t_padding + dt):(padded_movie.t_padding + dt + padded_movie.original_n_frames),
(padded_movie.x_padding + dx):(padded_movie.x_padding + dx + padded_movie.original_width),
(padded_movie.y_padding + dy):(padded_movie.y_padding + dy + padded_movie.original_height)]
original_movie_txy = padded_movie.padded_movie_txy[
(padded_movie.t_padding):(padded_movie.t_padding + padded_movie.original_n_frames),
(padded_movie.x_padding):(padded_movie.x_padding + padded_movie.original_width),
(padded_movie.y_padding):(padded_movie.y_padding + padded_movie.original_height)]
norm_xy = 1.
# subtract trend
if trend_movie is not None:
displaced_trend_movie_txy = trend_movie.padded_movie_txy[
(trend_movie.t_padding + dt):(trend_movie.t_padding + dt + trend_movie.original_n_frames),
(trend_movie.x_padding + dx):(trend_movie.x_padding + dx + trend_movie.original_width),
(trend_movie.y_padding + dy):(trend_movie.y_padding + dy + trend_movie.original_height)]
original_trend_movie_txy = trend_movie.padded_movie_txy[
(trend_movie.t_padding):(trend_movie.t_padding + trend_movie.original_n_frames),
(trend_movie.x_padding):(trend_movie.x_padding + trend_movie.original_width),
(trend_movie.y_padding):(trend_movie.y_padding + trend_movie.original_height)]
cross_inner_xy = torch.mean(
(displaced_movie_txy - displaced_movie_txy.mean(0)
- displaced_trend_movie_txy + displaced_trend_movie_txy.mean(0)) *
(original_movie_txy - original_movie_txy.mean(0)
- original_trend_movie_txy + original_trend_movie_txy.mean(0)),
dim=0)
if normalize:
norm_xy = (
torch.std(displaced_movie_txy - displaced_trend_movie_txy, dim=0) *
torch.std(original_movie_txy - original_trend_movie_txy, dim=0))
else:
cross_inner_xy = torch.mean(
(displaced_movie_txy - displaced_movie_txy.mean(0)) *
(original_movie_txy - original_movie_txy.mean(0)),
dim=0)
if normalize:
norm_xy = (
torch.std(displaced_movie_txy, dim=0) *
torch.std(original_movie_txy, dim=0))
return cross_inner_xy / norm_xy
def get_spatially_downsampled(
padded_movie: PaddedMovieTorch,
mode: str) -> PaddedMovieTorch:
assert mode in {'max_pool', 'avg_pool'}
assert padded_movie.x_padding % 2 == 0
assert padded_movie.y_padding % 2 == 0
assert padded_movie.original_width % 2 == 0
assert padded_movie.original_height % 2 == 0
if mode == 'avg_pool':
downsampled_movie_txy = torch.nn.functional.avg_pool2d(
padded_movie.padded_movie_txy.unsqueeze(0),
kernel_size=2,
stride=2).squeeze(0)
elif mode == 'max_pool':
downsampled_movie_txy = torch.nn.functional.max_pool2d(
padded_movie.padded_movie_txy.unsqueeze(0),
kernel_size=2,
stride=2).squeeze(0)
else:
raise RuntimeError("Should not reach here!")
return PaddedMovieTorch(
t_padding=padded_movie.t_padding,
x_padding=padded_movie.x_padding // 2,
y_padding=padded_movie.y_padding // 2,
original_n_frames=padded_movie.original_n_frames,
original_width=padded_movie.original_width // 2,
original_height=padded_movie.original_height // 2,
padded_movie_txy=downsampled_movie_txy)
def upsample_to_numpy(frame_xy: torch.Tensor, depth: int):
if depth == 0:
return frame_xy.cpu().numpy()
else:
return torch.nn.functional.interpolate(
frame_xy.unsqueeze(0).unsqueeze(0),
mode='bilinear',
align_corners=False,
scale_factor=2).squeeze(0).squeeze(0).cpu().numpy()
def get_continuous_1d_mask(mask_t: np.ndarray) -> np.ndarray:
new_mask_t = mask_t.copy()
converged = False
assert len(mask_t) >= 3
while True:
converged = True
for i_t in range(1, len(mask_t) - 1):
if (~new_mask_t[i_t]) and (new_mask_t[i_t - 1] and new_mask_t[i_t + 1]):
converged = False
break
if (new_mask_t[i_t]) and ((~new_mask_t[i_t - 1]) and (~new_mask_t[i_t + 1])):
converged = False
break
if converged:
break
else:
new_mask_t[1:] = new_mask_t[1:] | new_mask_t[:-1]
return new_mask_t
class OptopatchGlobalFeatureExtractor:
def __init__(
self,
ws_base: 'OptopatchBaseWorkspace',
logger: logging.Logger,
select_active_t_range: bool = True,
max_depth: int = 3,
detrending_order: int = 10,
trend_func: str = 'mean',
downsampling_mode: str = 'avg_pool',
padding_mode: str = 'reflect',
eps: float = 1e-6,
device: torch.device = torch.device("cuda"),
dtype: torch.dtype = torch.float32):
self.ws_base = ws_base
self.logger = logger
self.select_active_t_range = select_active_t_range
self.max_depth = max_depth
self.detrending_order = detrending_order
self.trend_func = trend_func
self.downsampling_mode = downsampling_mode
self.padding_mode = padding_mode
self.eps = eps
self.device = device
self.dtype = dtype
# containers
self.active_mask_t = self.determine_active_t_range(
ws_base, select_active_t_range)
self.features = OptopatchGlobalFeatureContainer()
# populate features
self._populate_features()
def log_info(self, msg: str):
logger.warning(msg)
@staticmethod
def determine_active_t_range(
ws_base: 'OptopatchBaseWorkspace',
select_active_t_range: bool):
m_t = np.mean(ws_base.movie_txy, axis=(-1, -2))
if select_active_t_range:
threshold = 0.5 * threshold_otsu(m_t)
active_mask_t = get_continuous_1d_mask(m_t > threshold)
else:
active_mask_t = np.ones((ws_base.n_frames,), dtype=np.bool)
return active_mask_t
def _populate_features(self):
# pad the original movie to power of two
input_movie_txy = self.ws_base.movie_txy[self.active_mask_t, ...]
input_movie_std_scale = np.std(input_movie_txy)
original_width = input_movie_txy.shape[-2]
original_height = input_movie_txy.shape[-1]
assert original_width % 2 == 0
assert original_height % 2 == 0
x_padding = (smallest_power_of_two(original_width) - original_width) // 2
y_padding = (smallest_power_of_two(original_height) - original_height) // 2
pow_two_padded_input_movie_txy = np.pad(
input_movie_txy,
pad_width=((0, 0), (x_padding, x_padding), (y_padding, y_padding)),
mode=self.padding_mode)
# pad additional for easy calculation of spatio-temporal cross-correlations
padded_movie = generate_padded_movie(
orig_movie_txy_np=pow_two_padded_input_movie_txy,
min_t_padding=2 * self.detrending_order,
min_x_padding=2 ** self.max_depth,
min_y_padding=2 ** self.max_depth,
padding_mode=self.padding_mode,
power_of_two=True,
device=self.device,
dtype=self.dtype)
# normalization scale
self.features.norm_scale = input_movie_std_scale
# neighbors to consider in calculating cross-correlations
corr_displacement_list = []
for dt in [0, 1]:
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if (dt, dx, dy) != (0, 0, 0):
corr_displacement_list.append((dt, dx, dy))
prev_padded_movie = padded_movie
prev_trend_movie = get_trend_movie(
padded_movie=prev_padded_movie,
order=self.detrending_order,
trend_func=self.trend_func)
for depth in range(self.max_depth + 1):
if depth > 0:
self.log_info(f'Downsampling to depth {depth}...')
current_padded_movie = get_spatially_downsampled(
padded_movie=prev_padded_movie,
mode=self.downsampling_mode)
current_trend_movie = get_spatially_downsampled(
padded_movie=prev_trend_movie,
mode=self.downsampling_mode)
else:
current_padded_movie = prev_padded_movie
current_trend_movie = prev_trend_movie
# calculate detrended std
current_detrended_var_xy = calculate_cross(
padded_movie=current_padded_movie,
trend_movie=current_trend_movie,
dt=0, dx=0, dy=0,
normalize=False)
current_detrended_std_xy = current_detrended_var_xy.sqrt() / input_movie_std_scale
self.features.feature_array_list.append(crop_center(
upsample_to_numpy(current_detrended_std_xy, depth),
target_width=original_width,
target_height=original_height))
self.features.feature_depth_list.append(depth)
self.features.feature_name_list.append(f'detrended_std_{depth}')
# calculate trend std
current_trend_var_xy = calculate_cross(
padded_movie=current_trend_movie,
trend_movie=None,
dt=0, dx=0, dy=0,
normalize=False)
current_trend_std_xy = current_trend_var_xy.sqrt() / input_movie_std_scale
self.features.feature_array_list.append(crop_center(
upsample_to_numpy(current_trend_std_xy, depth),
target_width=original_width,
target_height=original_height))
self.features.feature_depth_list.append(depth)
self.features.feature_name_list.append(f'trend_std_{depth}')
# calculate trend mean
current_trend_mean_xy = unpad_frame(
current_trend_movie.padded_movie_txy.mean(0),
current_trend_movie) / input_movie_std_scale
self.features.feature_array_list.append(crop_center(
upsample_to_numpy(current_trend_mean_xy, depth),
target_width=original_width,
target_height=original_height))
self.features.feature_depth_list.append(depth)
self.features.feature_name_list.append(f'trend_mean_{depth}')
for (dt, dx, dy) in corr_displacement_list:
self.log_info(f'Calculating x-corr ({dt}, {dx}, {dy}) at depth {depth} for detrended movie...')
current_cross_corr_xy = calculate_cross(
padded_movie=current_padded_movie,
trend_movie=current_trend_movie,
dt=dt, dx=dx, dy=dy,
normalize=False)
# normed_current_cross_corr_xy = current_cross_corr_xy
normed_current_cross_corr_xy = current_cross_corr_xy / (self.eps + current_detrended_var_xy)
self.features.feature_array_list.append(crop_center(
upsample_to_numpy(normed_current_cross_corr_xy, depth),
target_width=original_width,
target_height=original_height))
self.features.feature_depth_list.append(depth)
self.features.feature_name_list.append(f'detrended_corr_{depth}_{dt}_{dx}_{dy}')
self.log_info(f'Calculating x-corr ({dt}, {dx}, {dy}) at depth {depth} for the trend...')
current_cross_corr_xy = calculate_cross(
padded_movie=current_trend_movie,
trend_movie=None,
dt=dt, dx=dx, dy=dy,
normalize=False)
# normed_current_cross_corr_xy = current_cross_corr_xy
normed_current_cross_corr_xy = current_cross_corr_xy / (self.eps + current_trend_var_xy)
self.features.feature_array_list.append(crop_center(
upsample_to_numpy(normed_current_cross_corr_xy, depth),
target_width=original_width,
target_height=original_height))
self.features.feature_depth_list.append(depth)
self.features.feature_name_list.append(f'trend_corr_{depth}_{dt}_{dx}_{dy}') | [
"numpy.pad",
"torch.mean",
"torch.median",
"skimage.filters.threshold_otsu",
"numpy.std",
"numpy.ones",
"logging.getLogger",
"dataclasses.field",
"numpy.mean",
"torch.std",
"torch.device",
"torch.zeros",
"torch.tensor"
] | [((237, 256), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (254, 256), False, 'import logging\n'), ((725, 752), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (730, 752), False, 'from dataclasses import dataclass, field\n'), ((789, 816), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (794, 816), False, 'from dataclasses import dataclass, field\n'), ((860, 887), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (865, 887), False, 'from dataclasses import dataclass, field\n'), ((2426, 2558), 'numpy.pad', 'np.pad', (['orig_movie_txy_np'], {'pad_width': '((t_padding, t_padding), (x_padding, x_padding), (y_padding, y_padding))', 'mode': 'padding_mode'}), '(orig_movie_txy_np, pad_width=((t_padding, t_padding), (x_padding,\n x_padding), (y_padding, y_padding)), mode=padding_mode)\n', (2432, 2558), True, 'import numpy as np\n'), ((2610, 2671), 'torch.tensor', 'torch.tensor', (['padded_movie_txy_np'], {'device': 'device', 'dtype': 'dtype'}), '(padded_movie_txy_np, device=device, dtype=dtype)\n', (2622, 2671), False, 'import torch\n'), ((3197, 3508), 'torch.zeros', 'torch.zeros', (['(padded_movie.original_n_frames + 2 * padded_movie.t_padding - 2 * order, \n padded_movie.original_width + 2 * padded_movie.x_padding, padded_movie.\n original_height + 2 * padded_movie.y_padding)'], {'device': 'padded_movie.padded_movie_txy.device', 'dtype': 'padded_movie.padded_movie_txy.dtype'}), '((padded_movie.original_n_frames + 2 * padded_movie.t_padding - \n 2 * order, padded_movie.original_width + 2 * padded_movie.x_padding, \n padded_movie.original_height + 2 * padded_movie.y_padding), device=\n padded_movie.padded_movie_txy.device, dtype=padded_movie.\n padded_movie_txy.dtype)\n', (3208, 3508), False, 'import torch\n'), ((9953, 9973), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (9965, 9973), False, 'import torch\n'), ((10942, 10983), 'numpy.mean', 'np.mean', (['ws_base.movie_txy'], {'axis': '(-1, -2)'}), '(ws_base.movie_txy, axis=(-1, -2))\n', (10949, 10983), True, 'import numpy as np\n'), ((11441, 11464), 'numpy.std', 'np.std', (['input_movie_txy'], {}), '(input_movie_txy)\n', (11447, 11464), True, 'import numpy as np\n'), ((11865, 11985), 'numpy.pad', 'np.pad', (['input_movie_txy'], {'pad_width': '((0, 0), (x_padding, x_padding), (y_padding, y_padding))', 'mode': 'self.padding_mode'}), '(input_movie_txy, pad_width=((0, 0), (x_padding, x_padding), (\n y_padding, y_padding)), mode=self.padding_mode)\n', (11871, 11985), True, 'import numpy as np\n'), ((3694, 3772), 'torch.mean', 'torch.mean', (['padded_movie.padded_movie_txy[i_t:i_t + 2 * order + 1, ...]'], {'dim': '(0)'}), '(padded_movie.padded_movie_txy[i_t:i_t + 2 * order + 1, ...], dim=0)\n', (3704, 3772), False, 'import torch\n'), ((11178, 11221), 'numpy.ones', 'np.ones', (['(ws_base.n_frames,)'], {'dtype': 'np.bool'}), '((ws_base.n_frames,), dtype=np.bool)\n', (11185, 11221), True, 'import numpy as np\n'), ((6678, 6743), 'torch.std', 'torch.std', (['(displaced_movie_txy - displaced_trend_movie_txy)'], {'dim': '(0)'}), '(displaced_movie_txy - displaced_trend_movie_txy, dim=0)\n', (6687, 6743), False, 'import torch\n'), ((6763, 6826), 'torch.std', 'torch.std', (['(original_movie_txy - original_trend_movie_txy)'], {'dim': '(0)'}), '(original_movie_txy - original_trend_movie_txy, dim=0)\n', (6772, 6826), False, 'import torch\n'), ((7108, 7145), 'torch.std', 'torch.std', (['displaced_movie_txy'], {'dim': '(0)'}), '(displaced_movie_txy, dim=0)\n', (7117, 7145), False, 'import torch\n'), ((7165, 7201), 'torch.std', 'torch.std', (['original_movie_txy'], {'dim': '(0)'}), '(original_movie_txy, dim=0)\n', (7174, 7201), False, 'import torch\n'), ((11048, 11067), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['m_t'], {}), '(m_t)\n', (11062, 11067), False, 'from skimage.filters import threshold_otsu\n'), ((3933, 4018), 'torch.median', 'torch.median', (['padded_movie.padded_movie_txy[i_t:i_t + 2 * order + 1, ...]'], {'dim': '(0)'}), '(padded_movie.padded_movie_txy[i_t:i_t + 2 * order + 1, ...], dim=0\n )\n', (3945, 4018), False, 'import torch\n')] |
import logging
import sys
from os.path import join, exists
from typing import Union, Optional, Dict, Any, List
from dataclasses import dataclass, replace
import numpy as np
from gpv2 import file_paths
from gpv2.data.dataset import Dataset, WebQaExample
from gpv2.model.model import PredictionArg
from gpv2.utils.py_utils import load_json_object, dump_json_object, int_to_str
@PredictionArg.register("webqa-answers")
class WebQaAnswers(PredictionArg, list):
def __init__(self, question_types="all"):
self.question_types = question_types
cache_file = join(file_paths.CACHE_DIR, f"webqa-answers.json")
if exists(cache_file):
answers = load_json_object(cache_file)
else:
logging.info(f"Computing and caching webqa answers")
examples = []
for part in ["train", "test", "val"]:
examples += WebQaDataset(part, qtypes=self.question_types).load()
answers = sorted(set(x.answer for x in examples))
dump_json_object(answers, cache_file, indent=2)
super().__init__(answers)
@Dataset.register("webqa")
class WebQaDataset(Dataset):
"""Loads the WebQa data
(currently this is a standin class since the image set is not completely released)
"""
QTYPES_NAME_TO_TYPES = {
"1n": "1n",
"1": ("1n", "1v", "1a"),
"1and2": ("1n", "1v", "1a", "2a", "2v"),
"1q": ("q", "1n", "1v", "1a"),
"q": ("q", ),
"basic": ("q", "1n", "1v", "1a", "2a", "2v")
}
QTYPES_TYPES_TO_NAMES = {
frozenset(v): k for k, v in QTYPES_NAME_TO_TYPES.items()
}
def __init__(self, split: str, sample=None, qtypes="basic"):
if split not in {"test", "val", "train"}:
raise ValueError(split)
if isinstance(qtypes, str):
self.qtypes = self.QTYPES_NAME_TO_TYPES[qtypes]
else:
assert len(qtypes) == len(set(qtypes))
self.qtypes = qtypes
self.sample = sample
self.split = split
def get_source_name(self) -> str:
return "webqa"
def get_qtypes_name(self):
if len(self.qtypes) == 1:
return self.qtypes[0]
else:
return self.QTYPES_TYPES_TO_NAMES[frozenset(self.qtypes)]
def get_name(self) -> str:
name = f"webqa-v4"
name += f"-{self.get_qtypes_name()}"
name += f"-{self.split}"
if self.sample is not None:
name += f"-s{int_to_str(self.sample)}"
return name
def get_answer_options(self, synonyms=False):
if synonyms:
raise NotImplementedError()
return WebQaAnswers(self.qtypes)
def load(self) -> List[WebQaExample]:
instances = load_webqa(self.split, self.qtypes)
if self.sample:
instances.sort(key=lambda x: x.gpv_id)
np.random.RandomState(613423).shuffle(instances)
return instances[:self.sample]
else:
return instances
def _intern(x):
if x is None:
return None
return sys.intern(x)
def load_webqa(split, qtypes):
file = join(file_paths.WEBQA_DIR, split + "_image_info.json")
prefix = "web" if split == "val" else f"web-{split}"
logging.info(f"Loading webqa data from {file}")
raw_instances = load_json_object(file)
out = []
for i, x in enumerate(raw_instances):
if isinstance(x["image"], dict):
image_id = x["image"]["image_id"]
else:
image_id = x["image"]
ex = WebQaExample(
None, image_id, None,
None, noun=_intern(x["noun"]),
adj=_intern(x["adj"]), verb=_intern(x["verb"])
)
ex_types = []
if "1n" in qtypes:
ex_types.append(("1n", ex.noun))
if "q" in qtypes:
ex_types.append(("q", _intern(x["bing_query"])))
if ex.verb is not None:
ex_types += [(q, ex.verb) for q in ["1v", "2v"] if q in qtypes]
if ex.adj is not None:
ex_types += [(q, ex.adj) for q in ["1a", "2a"] if q in qtypes]
for q, ans in ex_types:
out.append(replace(ex, qtype=q, answer=ans, gpv_id=f"{prefix}{i}-{q}"))
return out
| [
"gpv2.data.dataset.Dataset.register",
"gpv2.utils.py_utils.int_to_str",
"gpv2.utils.py_utils.load_json_object",
"gpv2.model.model.PredictionArg.register",
"os.path.exists",
"numpy.random.RandomState",
"logging.info",
"gpv2.utils.py_utils.dump_json_object",
"os.path.join",
"dataclasses.replace",
... | [((381, 420), 'gpv2.model.model.PredictionArg.register', 'PredictionArg.register', (['"""webqa-answers"""'], {}), "('webqa-answers')\n", (403, 420), False, 'from gpv2.model.model import PredictionArg\n'), ((1037, 1062), 'gpv2.data.dataset.Dataset.register', 'Dataset.register', (['"""webqa"""'], {}), "('webqa')\n", (1053, 1062), False, 'from gpv2.data.dataset import Dataset, WebQaExample\n'), ((2796, 2809), 'sys.intern', 'sys.intern', (['x'], {}), '(x)\n', (2806, 2809), False, 'import sys\n'), ((2852, 2906), 'os.path.join', 'join', (['file_paths.WEBQA_DIR', "(split + '_image_info.json')"], {}), "(file_paths.WEBQA_DIR, split + '_image_info.json')\n", (2856, 2906), False, 'from os.path import join, exists\n'), ((2965, 3012), 'logging.info', 'logging.info', (['f"""Loading webqa data from {file}"""'], {}), "(f'Loading webqa data from {file}')\n", (2977, 3012), False, 'import logging\n'), ((3031, 3053), 'gpv2.utils.py_utils.load_json_object', 'load_json_object', (['file'], {}), '(file)\n', (3047, 3053), False, 'from gpv2.utils.py_utils import load_json_object, dump_json_object, int_to_str\n'), ((565, 614), 'os.path.join', 'join', (['file_paths.CACHE_DIR', 'f"""webqa-answers.json"""'], {}), "(file_paths.CACHE_DIR, f'webqa-answers.json')\n", (569, 614), False, 'from os.path import join, exists\n'), ((622, 640), 'os.path.exists', 'exists', (['cache_file'], {}), '(cache_file)\n', (628, 640), False, 'from os.path import join, exists\n'), ((658, 686), 'gpv2.utils.py_utils.load_json_object', 'load_json_object', (['cache_file'], {}), '(cache_file)\n', (674, 686), False, 'from gpv2.utils.py_utils import load_json_object, dump_json_object, int_to_str\n'), ((703, 755), 'logging.info', 'logging.info', (['f"""Computing and caching webqa answers"""'], {}), "(f'Computing and caching webqa answers')\n", (715, 755), False, 'import logging\n'), ((956, 1003), 'gpv2.utils.py_utils.dump_json_object', 'dump_json_object', (['answers', 'cache_file'], {'indent': '(2)'}), '(answers, cache_file, indent=2)\n', (972, 1003), False, 'from gpv2.utils.py_utils import load_json_object, dump_json_object, int_to_str\n'), ((3765, 3824), 'dataclasses.replace', 'replace', (['ex'], {'qtype': 'q', 'answer': 'ans', 'gpv_id': 'f"""{prefix}{i}-{q}"""'}), "(ex, qtype=q, answer=ans, gpv_id=f'{prefix}{i}-{q}')\n", (3772, 3824), False, 'from dataclasses import dataclass, replace\n'), ((2275, 2298), 'gpv2.utils.py_utils.int_to_str', 'int_to_str', (['self.sample'], {}), '(self.sample)\n', (2285, 2298), False, 'from gpv2.utils.py_utils import load_json_object, dump_json_object, int_to_str\n'), ((2618, 2647), 'numpy.random.RandomState', 'np.random.RandomState', (['(613423)'], {}), '(613423)\n', (2639, 2647), True, 'import numpy as np\n')] |
import json
from pathlib import Path
import numpy as np
import pandas as pd
class Index:
def __init__(self, index_path, index_type, articles_path, mapping, metadata, k, num_workers):
self.index = self.load_index(index_path, index_type)
self.index_type = index_type
self.articles_path = articles_path
self.mapping = mapping
self.metadata = metadata
self.k = k
self.num_workers = num_workers
def load_index(self, index_path, index_type):
if index_type == 'nmslib':
import nmslib
index = nmslib.init(method='hnsw', space='cosinesimil')
index.loadIndex(index_path)
elif index_type == 'faiss':
import faiss
index = faiss.read_index(index_path)
else:
raise TypeError('Index type can only be faiss or nmslib.')
return index
def search_index(self, sentences, search_embeddings, return_batch_ids=False):
if self.index_type == 'nmslib':
batch = self.index.knnQueryBatch(search_embeddings,
k=self.k,
num_threads=self.num_workers)
batch = np.array(batch)
batch_ids = batch[:, 0].astype(np.int)
batch_distances = batch[:, 1].astype(np.float32)
elif self.index_type == 'faiss':
batch_distances, batch_ids = self.index.search(np.array(search_embeddings), k=self.k)
else:
raise TypeError('Index type can only be faiss or nmslib.')
results = self._format_results(batch_ids=batch_ids,
batch_distances=batch_distances,
sentences=sentences,
articles_path=self.articles_path,
mapping=self.mapping)
if return_batch_ids:
return results, batch_ids
return results
def _load_article(self, articles_path, paper_id):
json_path = Path(articles_path) / (paper_id + '.json')
with json_path.open() as f:
article = json.load(f)
return article
def _find_metadata(self, paper_id):
metadata = self.metadata[self.metadata['sha'] == paper_id]
if len(metadata) == 1:
metadata = metadata.iloc[0].to_dict()
return {
'doi': metadata['doi'] if not pd.isna(metadata['doi']) else 'N/A',
'url': metadata['url'] if not pd.isna(metadata['url']) else 'N/A',
'journal': metadata['journal'] if not pd.isna(metadata['journal']) else 'N/A',
'publish_time': metadata['publish_time'] if not pd.isna(metadata['publish_time']) else 'N/A',
}
else:
return None # No metadata was found
def _extract_k_hits(self, ids, distances, sentence, articles_path, sent_article_mapping):
extracted = {
"query": sentence,
"hits": []
}
for id, distance in zip(ids, distances):
mapping = sent_article_mapping[id]
paragraph_idx = mapping["paragraph_idx"]
sentence_idx = mapping["sentence_idx"]
paper_id = mapping["paper_id"]
article = self._load_article(articles_path=articles_path,
paper_id=paper_id)
hit = {
'title': article['metadata']['title'],
'authors': article['metadata']['authors'],
'paragraph': article['body_text'][paragraph_idx],
'sentence': article['body_text'][paragraph_idx]["sentences"][sentence_idx],
'abstract': article['abstract'],
'distance': float(distance),
}
metadata = self._find_metadata(paper_id)
if metadata:
hit['metadata'] = metadata
extracted["hits"].append(hit)
return extracted
def _format_results(self, batch_ids, batch_distances, sentences, articles_path, mapping):
return [self._extract_k_hits(ids=batch_ids[x],
distances=batch_distances[x],
sentence=query_sentence,
articles_path=articles_path,
sent_article_mapping=mapping) for x, query_sentence in enumerate(sentences)]
def search_args(parser):
parser.add_argument('--index_path', default="index",
help='Path to the created index')
parser.add_argument('--index_type', default="nmslib", type=str, choices=["nmslib", "faiss"],
help='Type of index')
parser.add_argument('--dataset_path', default="cord_19_dataset_formatted/",
help='Path to the extracted dataset')
parser.add_argument('--model_name_or_path', default='bert-base-nli-mean-tokens')
parser.add_argument('--batch_size', default=8, type=int,
help='Batch size for the transformer model encoding')
parser.add_argument('--num_workers', default=8, type=int,
help='Number of workers to use when parallelizing the index search')
parser.add_argument('--k', default=10, type=int,
help='The top K hits to return from the index')
parser.add_argument('--device', default='cpu',
help='Set to cuda to use the GPU')
parser.add_argument('--silent', action="store_true",
help='Turn off progress bar when searching')
return parser
def paths_from_dataset_path(dataset_path):
"""
Creates paths to the files required for searching the index.
:param dataset_path: The path to the extracted dataset.
:return: Paths to various important files/folders for searching the index.
"""
dataset_path = Path(dataset_path)
articles_path = dataset_path / 'articles/'
sentences_path = dataset_path / 'cord_19_sentences.txt'
mapping_path = dataset_path / 'cord_19_sent_to_article_mapping.json'
metadata_path = dataset_path / 'metadata.csv'
return articles_path, sentences_path, mapping_path, metadata_path
| [
"json.load",
"nmslib.init",
"faiss.read_index",
"pathlib.Path",
"numpy.array",
"pandas.isna"
] | [((5936, 5954), 'pathlib.Path', 'Path', (['dataset_path'], {}), '(dataset_path)\n', (5940, 5954), False, 'from pathlib import Path\n'), ((585, 632), 'nmslib.init', 'nmslib.init', ([], {'method': '"""hnsw"""', 'space': '"""cosinesimil"""'}), "(method='hnsw', space='cosinesimil')\n", (596, 632), False, 'import nmslib\n'), ((1226, 1241), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (1234, 1241), True, 'import numpy as np\n'), ((2070, 2089), 'pathlib.Path', 'Path', (['articles_path'], {}), '(articles_path)\n', (2074, 2089), False, 'from pathlib import Path\n'), ((2171, 2183), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2180, 2183), False, 'import json\n'), ((754, 782), 'faiss.read_index', 'faiss.read_index', (['index_path'], {}), '(index_path)\n', (770, 782), False, 'import faiss\n'), ((1454, 1481), 'numpy.array', 'np.array', (['search_embeddings'], {}), '(search_embeddings)\n', (1462, 1481), True, 'import numpy as np\n'), ((2463, 2487), 'pandas.isna', 'pd.isna', (["metadata['doi']"], {}), "(metadata['doi'])\n", (2470, 2487), True, 'import pandas as pd\n'), ((2546, 2570), 'pandas.isna', 'pd.isna', (["metadata['url']"], {}), "(metadata['url'])\n", (2553, 2570), True, 'import pandas as pd\n'), ((2637, 2665), 'pandas.isna', 'pd.isna', (["metadata['journal']"], {}), "(metadata['journal'])\n", (2644, 2665), True, 'import pandas as pd\n'), ((2742, 2775), 'pandas.isna', 'pd.isna', (["metadata['publish_time']"], {}), "(metadata['publish_time'])\n", (2749, 2775), True, 'import pandas as pd\n')] |
import numpy as np
import random
from settree.set_data import SetDataset
########################################################################################################################
# EXP 1: First quarter
########################################################################################################################
def get_first_quarter_data(num_samples, min_items_set=2, max_items_set=10, dim=2):
def inject_samples_in_first_quarter(set_of_samples, min=1, max=1, dim=2):
num = random.choice(range(min, max + 1))
pos_points = np.random.uniform(low=0, high=1, size=(num, dim))
set_of_samples[:num, :] = pos_points
return set_of_samples
def sample_point_not_from_first_quarter(dim=2):
# sample a quarter (not the first)
while True:
r = np.random.uniform(-1, 1, dim)
if sum(r >= 0) < dim:
break
return tuple(r)
def sample_set(num, dim):
return np.stack([sample_point_not_from_first_quarter(dim) for _ in range(num)])
s_1 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]
s_2 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]
s_2 = [inject_samples_in_first_quarter(i, min=1, max=1, dim=dim) for i in s_2]
x = s_1 + s_2
y = np.concatenate([np.zeros(len(s_1)), np.ones(len(s_2))]).astype(np.int64)
return x, y
########################################################################################################################
# EXP 2: Stats
########################################################################################################################
def get_data_uniform_vs_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=0.0, scale=1.0, size=(set_size // 2,)),
np.random.uniform(low=-1.0, high=1.0, size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
pos.append(np.stack([np.random.normal(loc=0.0, scale=1.0, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.uniform(low=-1.0, high=1.0, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_laplace_vs_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=0.0, scale=1.0, size=(set_size // 2,)),
np.random.laplace(loc=0.0, scale=1.0, size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
pos.append(np.stack([np.random.normal(loc=0.0, scale=1.0, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.laplace(loc=0.0, scale=1.0, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_different_mu_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=np.random.randn(), scale=1.0, size=(set_size // 2,)),
np.random.normal(loc=np.random.randn(), scale=1.0, size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
mu = np.random.randn()
pos.append(np.stack([np.random.normal(loc=mu, scale=1.0, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.normal(loc=mu, scale=1.0, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_different_sigma_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=0.0, scale=np.abs(np.random.randn()), size=(set_size // 2,)),
np.random.normal(loc=0.0, scale=np.abs(np.random.randn()), size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
sig = np.abs(np.random.randn())
pos.append(np.stack([np.random.normal(loc=0.0, scale=sig, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.normal(loc=0.0, scale=sig, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_by_task(task_name, params):
if task_name == 'different_uniform_normal':
# 1) different distributions
x_train, y_train = get_data_uniform_vs_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_uniform_vs_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
elif task_name == 'different_laplace_normal':
# 1) different distributions
x_train, y_train = get_data_laplace_vs_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_laplace_vs_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
elif task_name == 'different_mean':
# 2) different mean
x_train, y_train = get_data_different_mu_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_different_mu_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
elif task_name == 'different_std':
# 3) different sigma
x_train, y_train = get_data_different_sigma_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_different_sigma_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
else:
raise ValueError
return ds_train, y_train, ds_test, y_test
| [
"numpy.random.uniform",
"numpy.random.randn",
"numpy.random.laplace",
"settree.set_data.SetDataset",
"numpy.array",
"numpy.random.normal",
"numpy.random.rand"
] | [((2693, 2734), 'numpy.array', 'np.array', (['([0] * (n // 2) + [1] * (n // 2))'], {}), '([0] * (n // 2) + [1] * (n // 2))\n', (2701, 2734), True, 'import numpy as np\n'), ((3720, 3761), 'numpy.array', 'np.array', (['([0] * (n // 2) + [1] * (n // 2))'], {}), '([0] * (n // 2) + [1] * (n // 2))\n', (3728, 3761), True, 'import numpy as np\n'), ((4803, 4844), 'numpy.array', 'np.array', (['([0] * (n // 2) + [1] * (n // 2))'], {}), '([0] * (n // 2) + [1] * (n // 2))\n', (4811, 4844), True, 'import numpy as np\n'), ((5916, 5957), 'numpy.array', 'np.array', (['([0] * (n // 2) + [1] * (n // 2))'], {}), '([0] * (n // 2) + [1] * (n // 2))\n', (5924, 5957), True, 'import numpy as np\n'), ((573, 622), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': '(num, dim)'}), '(low=0, high=1, size=(num, dim))\n', (590, 622), True, 'import numpy as np\n'), ((4578, 4595), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4593, 4595), True, 'import numpy as np\n'), ((6232, 6273), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_train', 'is_init': '(True)'}), '(records=x_train, is_init=True)\n', (6242, 6273), False, 'from settree.set_data import SetDataset\n'), ((6382, 6422), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_test', 'is_init': '(True)'}), '(records=x_test, is_init=True)\n', (6392, 6422), False, 'from settree.set_data import SetDataset\n'), ((830, 859), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'dim'], {}), '(-1, 1, dim)\n', (847, 859), True, 'import numpy as np\n'), ((1839, 1855), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1853, 1855), True, 'import numpy as np\n'), ((1881, 1936), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (1889, 1936), True, 'import numpy as np\n'), ((1969, 2024), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (1977, 2024), True, 'import numpy as np\n'), ((2296, 2312), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2310, 2312), True, 'import numpy as np\n'), ((2338, 2393), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (2346, 2393), True, 'import numpy as np\n'), ((2426, 2481), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (2434, 2481), True, 'import numpy as np\n'), ((2866, 2882), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2880, 2882), True, 'import numpy as np\n'), ((2908, 2963), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (2916, 2963), True, 'import numpy as np\n'), ((2996, 3051), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (3004, 3051), True, 'import numpy as np\n'), ((3323, 3339), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3337, 3339), True, 'import numpy as np\n'), ((3365, 3420), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (3373, 3420), True, 'import numpy as np\n'), ((3453, 3508), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (3461, 3508), True, 'import numpy as np\n'), ((3895, 3911), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3909, 3911), True, 'import numpy as np\n'), ((3937, 3992), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (3945, 3992), True, 'import numpy as np\n'), ((4025, 4080), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (4033, 4080), True, 'import numpy as np\n'), ((4379, 4395), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4393, 4395), True, 'import numpy as np\n'), ((4421, 4476), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (4429, 4476), True, 'import numpy as np\n'), ((4509, 4564), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (4517, 4564), True, 'import numpy as np\n'), ((4981, 4997), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4995, 4997), True, 'import numpy as np\n'), ((5023, 5078), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (5031, 5078), True, 'import numpy as np\n'), ((5111, 5166), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (5119, 5166), True, 'import numpy as np\n'), ((5481, 5497), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5495, 5497), True, 'import numpy as np\n'), ((5523, 5578), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (5531, 5578), True, 'import numpy as np\n'), ((5611, 5666), 'numpy.array', 'np.array', (['([0] * (set_size // 2) + [1] * (set_size // 2))'], {}), '([0] * (set_size // 2) + [1] * (set_size // 2))\n', (5619, 5666), True, 'import numpy as np\n'), ((5688, 5705), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5703, 5705), True, 'import numpy as np\n'), ((6623, 6664), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_train', 'is_init': '(True)'}), '(records=x_train, is_init=True)\n', (6633, 6664), False, 'from settree.set_data import SetDataset\n'), ((6773, 6813), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_test', 'is_init': '(True)'}), '(records=x_test, is_init=True)\n', (6783, 6813), False, 'from settree.set_data import SetDataset\n'), ((6997, 7038), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_train', 'is_init': '(True)'}), '(records=x_train, is_init=True)\n', (7007, 7038), False, 'from settree.set_data import SetDataset\n'), ((7149, 7189), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_test', 'is_init': '(True)'}), '(records=x_test, is_init=True)\n', (7159, 7189), False, 'from settree.set_data import SetDataset\n'), ((2512, 2566), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(set_size,)'}), '(loc=0.0, scale=1.0, size=(set_size,))\n', (2528, 2566), True, 'import numpy as np\n'), ((2612, 2667), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': '(set_size,)'}), '(low=-1.0, high=1.0, size=(set_size,))\n', (2629, 2667), True, 'import numpy as np\n'), ((3539, 3593), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(set_size,)'}), '(loc=0.0, scale=1.0, size=(set_size,))\n', (3555, 3593), True, 'import numpy as np\n'), ((3639, 3694), 'numpy.random.laplace', 'np.random.laplace', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(set_size,)'}), '(loc=0.0, scale=1.0, size=(set_size,))\n', (3656, 3694), True, 'import numpy as np\n'), ((4625, 4678), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mu', 'scale': '(1.0)', 'size': '(set_size,)'}), '(loc=mu, scale=1.0, size=(set_size,))\n', (4641, 4678), True, 'import numpy as np\n'), ((4724, 4777), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mu', 'scale': '(1.0)', 'size': '(set_size,)'}), '(loc=mu, scale=1.0, size=(set_size,))\n', (4740, 4777), True, 'import numpy as np\n'), ((5736, 5790), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'sig', 'size': '(set_size,)'}), '(loc=0.0, scale=sig, size=(set_size,))\n', (5752, 5790), True, 'import numpy as np\n'), ((5836, 5890), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'sig', 'size': '(set_size,)'}), '(loc=0.0, scale=sig, size=(set_size,))\n', (5852, 5890), True, 'import numpy as np\n'), ((7376, 7417), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_train', 'is_init': '(True)'}), '(records=x_train, is_init=True)\n', (7386, 7417), False, 'from settree.set_data import SetDataset\n'), ((7531, 7571), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_test', 'is_init': '(True)'}), '(records=x_test, is_init=True)\n', (7541, 7571), False, 'from settree.set_data import SetDataset\n'), ((2070, 2129), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(set_size // 2,)'}), '(loc=0.0, scale=1.0, size=(set_size // 2,))\n', (2086, 2129), True, 'import numpy as np\n'), ((2166, 2226), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': '(set_size // 2,)'}), '(low=-1.0, high=1.0, size=(set_size // 2,))\n', (2183, 2226), True, 'import numpy as np\n'), ((3097, 3156), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(set_size // 2,)'}), '(loc=0.0, scale=1.0, size=(set_size // 2,))\n', (3113, 3156), True, 'import numpy as np\n'), ((3193, 3253), 'numpy.random.laplace', 'np.random.laplace', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(set_size // 2,)'}), '(loc=0.0, scale=1.0, size=(set_size // 2,))\n', (3210, 3253), True, 'import numpy as np\n'), ((4147, 4164), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4162, 4164), True, 'import numpy as np\n'), ((4257, 4274), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4272, 4274), True, 'import numpy as np\n'), ((5251, 5268), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5266, 5268), True, 'import numpy as np\n'), ((5369, 5386), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5384, 5386), True, 'import numpy as np\n')] |
import argparse
from tqdm import tqdm
import re
import itertools
from collections import Counter
import numpy as np
from sklearn.model_selection import train_test_split
#from data import create_data,pad_sequences
import mxnet as mx
import os
import pickle
import time
from data import SentimentIter
from models.model_cnn import sent_model
DATAPATH='./'
#DATAPATH='/media/drive/sentiment/'
parser = argparse.ArgumentParser(description='Semtiment training')
parser.add_argument('--batch-size', default=20, type=int, help='Batch size for training')
parser.add_argument('--num-embedding', default=1024, type=int, help='Number of workers used in data-loading')
parser.add_argument('--hidden-size', default=1024, type=int, help='Hidden size of RNNs')
parser.add_argument('--eval', default=False, help='Location to save epoch models')
parser.add_argument('--token', default='spacy', help='use spacy tokenizer or not')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--max-seq-len', default=1000, type=int, help='Norm cutoff to prevent explosion of gradients')
parser.add_argument('--vocab-size', default=5200, type=int, help='Annealing applied to learning rate every epoch')
parser.add_argument('--model', default='rnn', help='Model type cnn or rnn')
parser.add_argument('--calc_accuracy', dest='calc_accuracy', action='store_true', help='Calc accuracy on the full validation dataset')
parser.add_argument('--cuda', dest='cuda', action='store_true', help='Use cuda to train model')
parser.add_argument('--epoch', type=int, default=0)
def main():
global args, best_prec1
args = parser.parse_args()
mx.random.seed(args.seed)
np.random.seed(args.seed)
test_iter=SentimentIter(data_path='./Datasets',data_shapes=(args.batch_size,args.max_seq_len), label_shapes=(args.batch_size,2),calc_accuracy=args.calc_accuracy,batch_size=args.batch_size)
if args.cuda:
gpu_ids = [int(g) for g in args.gpus.split(',')]
print('Using GPUs: {}'.format(gpu_ids))
xpu = mx.gpu(device_id=gpu_ids[0]) if gpu_ids else mx.cpu()
else:
xpu=mx.cpu()
##Create Module
mod = mx.mod.Module(*sent_model(vocab_size=args.vocab_size,emb_dim=args.num_embedding,num_hidden=args.hidden_size,num_classes=2,batch_size=args.batch_size),context=xpu)
def evaluate_accuracy_fit(label,pred):
acc = mx.metric.Accuracy()
predictions = pred.argmax(1)
acc=1.0-np.abs(predictions-label.argmax(1)).sum()/len(label)
return acc
if args.eval:
print('--------------Running Evaluation -------------')
sym, arg_params, aux_params = mx.model.load_checkpoint(args.eval, args.epoch)
mod.bind(data_shapes=test_iter.provide_data, label_shapes=test_iter.provide_label,for_training=False)
mod.set_params(arg_params=arg_params,aux_params=aux_params,allow_missing= False)
start_time = time.time()
acc_test=0
ii=0
if args.calc_accuracy:
#test_iter_acc = test_iter.get_acc_iter()
start_time = time.time()
num_samples=len(test_iter.all_labels)
for i, batch in enumerate(test_iter):
if i%10==0 and i!=0:
print('test: %s %%' % (100*i*args.batch_size/num_samples) ,acc_test/i)
batch.data[0]=batch.data[0].as_in_context(xpu)
batch.label[0]=batch.label[0].as_in_context(xpu)
target=batch.label[0]
mod.forward(batch, is_train=False)
pred=mod.get_outputs()[0].asnumpy()
acc_test+=evaluate_accuracy_fit(target.asnumpy(),pred)
acc_test/=(i+1)
end_time = time.time()
print("Final test_acc %s ,Time %s" %(acc_test,end_time - start_time))
else:
while True:
start_time_iter = time.time()
ii+=1
batch=test_iter.next()
if ii%10==0 and ii!=0:
print('Tested %s batches with average accuracy: %s' % (ii ,acc_test/ii))
batch.data[0]=batch.data[0].as_in_context(xpu)
batch.label[0]=batch.label[0].as_in_context(xpu)
target=batch.label[0]
mod.forward(batch, is_train=False)
pred=mod.get_outputs()[0].asnumpy()
end_time_iter = time.time()
print('Time for current iteration: %s ' %(end_time_iter-start_time_iter))
acc_test+=evaluate_accuracy_fit(batch.label[0].asnumpy(),pred)
acc_test/=(ii+1)
end_time = time.time()
print("Final test_acc %s ,Time %s" %(acc_test,end_time - start_time))
if __name__ == '__main__':
main()
| [
"models.model_cnn.sent_model",
"numpy.random.seed",
"argparse.ArgumentParser",
"mxnet.random.seed",
"mxnet.metric.Accuracy",
"time.time",
"mxnet.cpu",
"data.SentimentIter",
"mxnet.gpu",
"mxnet.model.load_checkpoint"
] | [((416, 473), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Semtiment training"""'}), "(description='Semtiment training')\n", (439, 473), False, 'import argparse\n'), ((1662, 1687), 'mxnet.random.seed', 'mx.random.seed', (['args.seed'], {}), '(args.seed)\n', (1676, 1687), True, 'import mxnet as mx\n'), ((1693, 1718), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1707, 1718), True, 'import numpy as np\n'), ((1734, 1927), 'data.SentimentIter', 'SentimentIter', ([], {'data_path': '"""./Datasets"""', 'data_shapes': '(args.batch_size, args.max_seq_len)', 'label_shapes': '(args.batch_size, 2)', 'calc_accuracy': 'args.calc_accuracy', 'batch_size': 'args.batch_size'}), "(data_path='./Datasets', data_shapes=(args.batch_size, args.\n max_seq_len), label_shapes=(args.batch_size, 2), calc_accuracy=args.\n calc_accuracy, batch_size=args.batch_size)\n", (1747, 1927), False, 'from data import SentimentIter\n'), ((2134, 2142), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (2140, 2142), True, 'import mxnet as mx\n'), ((2411, 2431), 'mxnet.metric.Accuracy', 'mx.metric.Accuracy', ([], {}), '()\n', (2429, 2431), True, 'import mxnet as mx\n'), ((2685, 2732), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (['args.eval', 'args.epoch'], {}), '(args.eval, args.epoch)\n', (2709, 2732), True, 'import mxnet as mx\n'), ((2958, 2969), 'time.time', 'time.time', ([], {}), '()\n', (2967, 2969), False, 'import time\n'), ((2056, 2084), 'mxnet.gpu', 'mx.gpu', ([], {'device_id': 'gpu_ids[0]'}), '(device_id=gpu_ids[0])\n', (2062, 2084), True, 'import mxnet as mx\n'), ((2101, 2109), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (2107, 2109), True, 'import mxnet as mx\n'), ((2198, 2340), 'models.model_cnn.sent_model', 'sent_model', ([], {'vocab_size': 'args.vocab_size', 'emb_dim': 'args.num_embedding', 'num_hidden': 'args.hidden_size', 'num_classes': '(2)', 'batch_size': 'args.batch_size'}), '(vocab_size=args.vocab_size, emb_dim=args.num_embedding,\n num_hidden=args.hidden_size, num_classes=2, batch_size=args.batch_size)\n', (2208, 2340), False, 'from models.model_cnn import sent_model\n'), ((3121, 3132), 'time.time', 'time.time', ([], {}), '()\n', (3130, 3132), False, 'import time\n'), ((3766, 3777), 'time.time', 'time.time', ([], {}), '()\n', (3775, 3777), False, 'import time\n'), ((4701, 4712), 'time.time', 'time.time', ([], {}), '()\n', (4710, 4712), False, 'import time\n'), ((3942, 3953), 'time.time', 'time.time', ([], {}), '()\n', (3951, 3953), False, 'import time\n'), ((4458, 4469), 'time.time', 'time.time', ([], {}), '()\n', (4467, 4469), False, 'import time\n')] |
from random import shuffle
from numpy import cos, sin, pi, round, random
def generatePointsCoordinates_Circle(num_points):
points_coordinate = []
r = 0.5
for n in range(1, num_points + 1):
points_coordinate.append([round(r + r*cos((2 * pi * n) / num_points), 4), round(r + r*sin((2 * pi * n) / num_points), 4)])
return points_coordinate
| [
"numpy.sin",
"numpy.cos"
] | [((248, 276), 'numpy.cos', 'cos', (['(2 * pi * n / num_points)'], {}), '(2 * pi * n / num_points)\n', (251, 276), False, 'from numpy import cos, sin, pi, round, random\n'), ((296, 324), 'numpy.sin', 'sin', (['(2 * pi * n / num_points)'], {}), '(2 * pi * n / num_points)\n', (299, 324), False, 'from numpy import cos, sin, pi, round, random\n')] |
# ********** modules ********** #
# chainer
import chainer
from chainer import cuda
from chainer.training import extensions
# others
import numpy as np
import argparse
import glob, os
import random
# network, which named "Looking to Listen at the Cocktail Party"
from network import Audio_Visual_Net
# ********** setup ********** #
np.random.seed(0)
INDEX = 0
DATA_DIR_SPEC = ""
DATA_DIR_VISUAL = ""
# ********** main ********** #
def main():
# ===== Argparse ===== #
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", "-g", type=int, default=-1,
help="specify GPU")
parser.add_argument("--iteration", "-i", type=int, default=5000,
help="# of iterations")
parser.add_argument("--batch_size", "-b", type=int, default=6,
help="batch size")
parser.add_argument("--units", "-u", type=int,
default=5000, help="# of FC units")
parser.add_argument("--data_visual", type=str, default=DATA_DIR_VISUAL,
help="Visual data directory, which has csv files")
parser.add_argument("--data_speech", type=str, default=DATA_DIR_SPEC,
help="Spectrogram data directory, which has npz files")
parser.add_argument("--result_dir", "-r", type=str,
default="result-{}/".format(INDEX))
parser.add_argument("--resume", default="",
help="Resume the training from snapshot")
args = parser.parse_args()
# ===== GPU or CPU ===== #
if args.gpu >= 0:
xp = cuda.cupy
cuda.get_device(args.gpu).use()
else:
xp = np
chainer.backends.cuda.get_device_from_id(args.gpu).use()
# ===== Load model ===== #
print("loading model...")
model = Audio_Visual_Net(spec_len=49, face_len=12, num_fusion_units=args.units, gpu=args.gpu)
if args.gpu >= 0:
model.to_gpu(args.gpu)
optimizer = chainer.optimizers.Adam(alpha=3*1e-5)
optimizer.setup(model)
# ===== Set data ===== #
print("loading data...")
spec_input = sorted(glob.glob(os.path.join(args.data_speech, "*.npz")))
vis_input = sorted(glob.glob(os.path.join(args.data_visual, "*")))
assert len(spec_input)==len(vis_input), "# of files are different between faces and audios."
all_nums = range(len(spec_input))
threshold = int(len(all_nums) * 0.99)
all_nums_train = all_nums[:threshold]
all_nums_test = all_nums[threshold:]
train = [(i) for i in all_nums_train]
test = [(i) for i in all_nums_test]
train_iter = chainer.iterators.SerialIterator(dataset=train, batch_size=args.batch_size, shuffle=True, repeat=True)
test_iter = chainer.iterators.SerialIterator(dataset=test, batch_size=args.batch_size, shuffle=False, repeat=False)
# ===== Define trainer ===== #
print("setting trainer...")
updater = chainer.training.updaters.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = chainer.training.Trainer(updater, (args.iteration, "iteration"), out=args.result_dir)
iter_trigger = 10
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu), trigger=(iter_trigger, "iteration"))
trainer.extend(extensions.LogReport(trigger=(iter_trigger, "iteration")), trigger=(iter_trigger, "iteration"))
trainer.extend(extensions.ProgressBar(update_interval=2))
trainer.extend(extensions.PlotReport(["main/loss", "validation/main/loss"], "iteration", file_name="loss.png", trigger=(10, "iteration")))
trainer.extend(extensions.PrintReport(["epoch", "iteration", "main/loss", "validation/main/loss", "elapsed_time"]), trigger=(iter_trigger, "iteration"))
trainer.extend(extensions.snapshot(), trigger=(int(iter_trigger*10), "iteration"))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
# ===== Training ===== #
print("start training...")
trainer.run()
# ===== Save model ===== #
print("saving model...")
model.to_cpu()
chainer.serializers.save_npz(os.path.join(args.result_dir, "model-{}.npz".format(INDEX)), model)
chainer.serializers.save_npz(os.path.join(args.result_dir, "optimizer-{}.npz".format(INDEX)), optimizer)
print("done!!")
if __name__ == "__main__":
main()
| [
"chainer.optimizers.Adam",
"numpy.random.seed",
"argparse.ArgumentParser",
"chainer.training.Trainer",
"chainer.training.updaters.StandardUpdater",
"chainer.training.extensions.Evaluator",
"network.Audio_Visual_Net",
"chainer.training.extensions.PrintReport",
"chainer.serializers.load_npz",
"chain... | [((335, 352), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (349, 352), True, 'import numpy as np\n'), ((490, 515), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (513, 515), False, 'import argparse\n'), ((1811, 1901), 'network.Audio_Visual_Net', 'Audio_Visual_Net', ([], {'spec_len': '(49)', 'face_len': '(12)', 'num_fusion_units': 'args.units', 'gpu': 'args.gpu'}), '(spec_len=49, face_len=12, num_fusion_units=args.units, gpu\n =args.gpu)\n', (1827, 1901), False, 'from network import Audio_Visual_Net\n'), ((1966, 2006), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', ([], {'alpha': '(3 * 1e-05)'}), '(alpha=3 * 1e-05)\n', (1989, 2006), False, 'import chainer\n'), ((2603, 2709), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', ([], {'dataset': 'train', 'batch_size': 'args.batch_size', 'shuffle': '(True)', 'repeat': '(True)'}), '(dataset=train, batch_size=args.batch_size,\n shuffle=True, repeat=True)\n', (2635, 2709), False, 'import chainer\n'), ((2722, 2829), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', ([], {'dataset': 'test', 'batch_size': 'args.batch_size', 'shuffle': '(False)', 'repeat': '(False)'}), '(dataset=test, batch_size=args.batch_size,\n shuffle=False, repeat=False)\n', (2754, 2829), False, 'import chainer\n'), ((2912, 2998), 'chainer.training.updaters.StandardUpdater', 'chainer.training.updaters.StandardUpdater', (['train_iter', 'optimizer'], {'device': 'args.gpu'}), '(train_iter, optimizer, device=\n args.gpu)\n', (2953, 2998), False, 'import chainer\n'), ((3008, 3098), 'chainer.training.Trainer', 'chainer.training.Trainer', (['updater', "(args.iteration, 'iteration')"], {'out': 'args.result_dir'}), "(updater, (args.iteration, 'iteration'), out=args.\n result_dir)\n", (3032, 3098), False, 'import chainer\n'), ((3136, 3191), 'chainer.training.extensions.Evaluator', 'extensions.Evaluator', (['test_iter', 'model'], {'device': 'args.gpu'}), '(test_iter, model, device=args.gpu)\n', (3156, 3191), False, 'from chainer.training import extensions\n'), ((3249, 3306), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {'trigger': "(iter_trigger, 'iteration')"}), "(trigger=(iter_trigger, 'iteration'))\n", (3269, 3306), False, 'from chainer.training import extensions\n'), ((3364, 3405), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {'update_interval': '(2)'}), '(update_interval=2)\n', (3386, 3405), False, 'from chainer.training import extensions\n'), ((3426, 3552), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['main/loss', 'validation/main/loss']", '"""iteration"""'], {'file_name': '"""loss.png"""', 'trigger': "(10, 'iteration')"}), "(['main/loss', 'validation/main/loss'], 'iteration',\n file_name='loss.png', trigger=(10, 'iteration'))\n", (3447, 3552), False, 'from chainer.training import extensions\n'), ((3569, 3672), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['epoch', 'iteration', 'main/loss', 'validation/main/loss', 'elapsed_time']"], {}), "(['epoch', 'iteration', 'main/loss',\n 'validation/main/loss', 'elapsed_time'])\n", (3591, 3672), False, 'from chainer.training import extensions\n'), ((3726, 3747), 'chainer.training.extensions.snapshot', 'extensions.snapshot', ([], {}), '()\n', (3745, 3747), False, 'from chainer.training import extensions\n'), ((3831, 3881), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.resume', 'trainer'], {}), '(args.resume, trainer)\n', (3859, 3881), False, 'import chainer\n'), ((1676, 1726), 'chainer.backends.cuda.get_device_from_id', 'chainer.backends.cuda.get_device_from_id', (['args.gpu'], {}), '(args.gpu)\n', (1716, 1726), False, 'import chainer\n'), ((2128, 2167), 'os.path.join', 'os.path.join', (['args.data_speech', '"""*.npz"""'], {}), "(args.data_speech, '*.npz')\n", (2140, 2167), False, 'import glob, os\n'), ((2203, 2238), 'os.path.join', 'os.path.join', (['args.data_visual', '"""*"""'], {}), "(args.data_visual, '*')\n", (2215, 2238), False, 'import glob, os\n'), ((1614, 1639), 'chainer.cuda.get_device', 'cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (1629, 1639), False, 'from chainer import cuda\n')] |
from rdkit.Chem import AllChem
import collections
import logging
import os
import re
import numpy as np
from rdkit import Chem
import pkg_resources
from typing import List
from transformers import BertTokenizer
SMI_REGEX_PATTERN = r"(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"
def get_default_tokenizer():
default_vocab_path = (
pkg_resources.resource_filename(
"rxnfp",
"models/transformers/bert_ft_10k_25s/vocab.txt"
)
)
return SmilesTokenizer(default_vocab_path)
class SmilesTokenizer(BertTokenizer):
r"""
Constructs a SmilesTokenizer.
Mostly copied from https://github.com/huggingface/transformers
Args:
vocab_file: Path to a SMILES character per line vocabulary file
"""
def __init__(
self,
vocab_file='',
# unk_token="[UNK]",
# sep_token="[SEP]",
# pad_token="[PAD]",
# cls_token="[CLS]",
# mask_token="[MASK]",
**kwargs
):
"""Constructs a BertTokenizer.
Args:
**vocab_file**: Path to a SMILES character per line vocabulary file
"""
super().__init__(vocab_file, **kwargs)
# take into account special tokens in max length
# self.max_len_single_sentence = self.max_len - 2
# self.max_len_sentences_pair = self.max_len - 3
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocab file at path '{}'.".format(vocab_file)
)
self.vocab = load_vocab(vocab_file)
self.highest_unused_index = max(
[
i for i, v in enumerate(self.vocab.keys())
if v.startswith("[unused")
]
)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()]
)
self.basic_tokenizer = BasicSmilesTokenizer()
# self.init_kwargs["max_len"] = self.max_len
@property
def vocab_size(self):
return len(self.vocab)
@property
def vocab_list(self):
return list(self.vocab.keys())
def _tokenize(self, text):
split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def add_special_tokens_ids_single_sequence(self, token_ids):
"""
Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
"""
return [self.cls_token_id] + token_ids + [self.sep_token_id]
def add_special_tokens_single_sequence(self, tokens):
"""
Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
"""
return [self.cls_token] + tokens + [self.sep_token]
def add_special_tokens_sequence_pair(self, token_0, token_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self.sep_token]
cls = [self.cls_token]
return cls + token_0 + sep + token_1 + sep
def add_special_tokens_ids_sequence_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def add_padding_tokens(self, token_ids, length, right=True):
"""
Adds padding tokens to return a sequence of length max_length.
By default padding tokens are added to the right of the sequence.
"""
padding = [self.pad_token_id] * (length - len(token_ids))
if right:
return token_ids + padding
else:
return padding + token_ids
class BasicSmilesTokenizer(object):
"""Run basic SMILES tokenization"""
def __init__(self, regex_pattern=SMI_REGEX_PATTERN):
""" Constructs a BasicSMILESTokenizer.
Args:
**regex**: SMILES token regex
"""
self.regex_pattern = regex_pattern
self.regex = re.compile(self.regex_pattern)
def tokenize(self, text):
""" Basic Tokenization of a SMILES.
"""
tokens = [token for token in self.regex.findall(text)]
return tokens
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
InputFeatures = collections.namedtuple(
"InputFeatures", ["input_ids", "input_mask", "segment_ids", "lm_label_ids"]
)
InputFeaturesBatch = collections.namedtuple(
"InputFeaturesBatch", ["input_ids", "input_mask", "segment_ids", "lm_label_ids"]
)
def convert_reaction_to_valid_features(reaction: str, tokenizer: SmilesTokenizer, max_seq_length:int=512):
r"""
Convert reaction SMILES into input features.
"""
max_len_single_sentence = max_seq_length - 2
tokens = tokenizer.add_special_tokens_single_sequence(
tokenizer.tokenize(reaction)[:max_len_single_sentence]
) # add [CLS] and [SEP] token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_array = np.full(
max_seq_length, dtype=np.int, fill_value=tokenizer.pad_token_id
)
input_array[: len(input_ids)] = input_ids
mask_array = np.zeros(max_seq_length, dtype=np.bool)
mask_array[: len(input_ids)] = 1
lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
# do not evaluate on [CLS] and [SEP] token
lm_label_array[1 : len(input_ids) - 1] = input_ids[1:-1]
segment_array = np.zeros(max_seq_length, dtype=np.bool)
features = InputFeatures(
input_ids=input_array,
input_mask=mask_array,
segment_ids=segment_array,
lm_label_ids=lm_label_array,
)
return features
def convert_reaction_to_valid_features_batch(
reaction_list: List[str], tokenizer: SmilesTokenizer
):
r"""
Convert list of reaction SMILES into batch of input features.
"""
input_ids = []
input_masks = []
segment_ids = []
lm_label_ids = []
for reaction in reaction_list:
features = convert_reaction_to_valid_features(reaction, tokenizer)
input_ids.append(features.input_ids)
input_masks.append(features.input_mask)
segment_ids.append(features.segment_ids)
lm_label_ids.append(features.lm_label_ids)
feature_batch = InputFeaturesBatch(
input_ids=np.stack(input_ids, axis=0),
input_mask=np.stack(input_masks, axis=0),
segment_ids=np.stack(segment_ids, axis=0),
lm_label_ids=np.stack(lm_label_ids, axis=0),
)
return feature_batch
class NotCanonicalizableSmilesException(ValueError):
pass
def canonicalize_smi(smi, remove_atom_mapping=False):
r"""
Canonicalize SMILES
"""
mol = Chem.MolFromSmiles(smi)
if not mol:
raise NotCanonicalizableSmilesException("Molecule not canonicalizable")
if remove_atom_mapping:
for atom in mol.GetAtoms():
if atom.HasProp("molAtomMapNumber"):
atom.ClearProp("molAtomMapNumber")
return Chem.MolToSmiles(mol)
def process_reaction(rxn):
"""
Process and canonicalize reaction SMILES
"""
reactants, reagents, products = rxn.split(">")
try:
precursors = [canonicalize_smi(r, True) for r in reactants.split(".")]
if len(reagents) > 0:
precursors += [
canonicalize_smi(r, True) for r in reagents.split(".")
]
products = [canonicalize_smi(p, True) for p in products.split(".")]
except NotCanonicalizableSmilesException:
return ""
joined_precursors = ".".join(sorted(precursors))
joined_products = ".".join(sorted(products))
return f"{joined_precursors}>>{joined_products}"
atom_mapped_rxn = 'F[c:5]1[n:6][cH:7][cH:8][cH:9][c:10]1[F:11].[CH3:1][CH:2]([CH3:3])[SH:4]>CN(C)C=O.O=C([O-])[O-].[K+].[K+]>[CH3:1][CH:2]([CH3:3])[S:4][c:5]1[n:6][cH:7][cH:8][cH:9][c:10]1[F:11]'
canonical_rxn = "CC(C)S.CN(C)C=O.Fc1cccnc1F.O=C([O-])[O-].[K+].[K+]>>CC(C)Sc1ncccc1F"
tokenized_rxn = 'C C ( C ) S . C N ( C ) C = O . F c 1 c c c n c 1 F . O = C ( [O-] ) [O-] . [K+] . [K+] >> C C ( C ) S c 1 n c c c c 1 F'
AllChem.ReactionFromSmarts(atom_mapped_rxn, useSmiles=True)
assert canonical_rxn == process_reaction(atom_mapped_rxn)
AllChem.ReactionFromSmarts(canonical_rxn, useSmiles=True)
tokenizer = get_default_tokenizer()
assert isinstance(tokenizer, SmilesTokenizer)
basic_tokenizer = BasicSmilesTokenizer()
assert tokenized_rxn == ' '.join(basic_tokenizer.tokenize(canonical_rxn))
| [
"numpy.full",
"rdkit.Chem.MolToSmiles",
"numpy.stack",
"rdkit.Chem.AllChem.ReactionFromSmarts",
"numpy.zeros",
"pkg_resources.resource_filename",
"os.path.isfile",
"collections.namedtuple",
"collections.OrderedDict",
"rdkit.Chem.MolFromSmiles",
"re.compile"
] | [((5576, 5679), 'collections.namedtuple', 'collections.namedtuple', (['"""InputFeatures"""', "['input_ids', 'input_mask', 'segment_ids', 'lm_label_ids']"], {}), "('InputFeatures', ['input_ids', 'input_mask',\n 'segment_ids', 'lm_label_ids'])\n", (5598, 5679), False, 'import collections\n'), ((5703, 5811), 'collections.namedtuple', 'collections.namedtuple', (['"""InputFeaturesBatch"""', "['input_ids', 'input_mask', 'segment_ids', 'lm_label_ids']"], {}), "('InputFeaturesBatch', ['input_ids', 'input_mask',\n 'segment_ids', 'lm_label_ids'])\n", (5725, 5811), False, 'import collections\n'), ((9363, 9422), 'rdkit.Chem.AllChem.ReactionFromSmarts', 'AllChem.ReactionFromSmarts', (['atom_mapped_rxn'], {'useSmiles': '(True)'}), '(atom_mapped_rxn, useSmiles=True)\n', (9389, 9422), False, 'from rdkit.Chem import AllChem\n'), ((9481, 9538), 'rdkit.Chem.AllChem.ReactionFromSmarts', 'AllChem.ReactionFromSmarts', (['canonical_rxn'], {'useSmiles': '(True)'}), '(canonical_rxn, useSmiles=True)\n', (9507, 9538), False, 'from rdkit.Chem import AllChem\n'), ((409, 502), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""rxnfp"""', '"""models/transformers/bert_ft_10k_25s/vocab.txt"""'], {}), "('rxnfp',\n 'models/transformers/bert_ft_10k_25s/vocab.txt')\n", (440, 502), False, 'import pkg_resources\n'), ((5313, 5338), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5336, 5338), False, 'import collections\n'), ((6271, 6343), 'numpy.full', 'np.full', (['max_seq_length'], {'dtype': 'np.int', 'fill_value': 'tokenizer.pad_token_id'}), '(max_seq_length, dtype=np.int, fill_value=tokenizer.pad_token_id)\n', (6278, 6343), True, 'import numpy as np\n'), ((6422, 6461), 'numpy.zeros', 'np.zeros', (['max_seq_length'], {'dtype': 'np.bool'}), '(max_seq_length, dtype=np.bool)\n', (6430, 6461), True, 'import numpy as np\n'), ((6521, 6573), 'numpy.full', 'np.full', (['max_seq_length'], {'dtype': 'np.int', 'fill_value': '(-1)'}), '(max_seq_length, dtype=np.int, fill_value=-1)\n', (6528, 6573), True, 'import numpy as np\n'), ((6703, 6742), 'numpy.zeros', 'np.zeros', (['max_seq_length'], {'dtype': 'np.bool'}), '(max_seq_length, dtype=np.bool)\n', (6711, 6742), True, 'import numpy as np\n'), ((7955, 7978), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (7973, 7978), False, 'from rdkit import Chem\n'), ((8250, 8271), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (8266, 8271), False, 'from rdkit import Chem\n'), ((5015, 5045), 're.compile', 're.compile', (['self.regex_pattern'], {}), '(self.regex_pattern)\n', (5025, 5045), False, 'import re\n'), ((1456, 1482), 'os.path.isfile', 'os.path.isfile', (['vocab_file'], {}), '(vocab_file)\n', (1470, 1482), False, 'import os\n'), ((7571, 7598), 'numpy.stack', 'np.stack', (['input_ids'], {'axis': '(0)'}), '(input_ids, axis=0)\n', (7579, 7598), True, 'import numpy as np\n'), ((7619, 7648), 'numpy.stack', 'np.stack', (['input_masks'], {'axis': '(0)'}), '(input_masks, axis=0)\n', (7627, 7648), True, 'import numpy as np\n'), ((7670, 7699), 'numpy.stack', 'np.stack', (['segment_ids'], {'axis': '(0)'}), '(segment_ids, axis=0)\n', (7678, 7699), True, 'import numpy as np\n'), ((7722, 7752), 'numpy.stack', 'np.stack', (['lm_label_ids'], {'axis': '(0)'}), '(lm_label_ids, axis=0)\n', (7730, 7752), True, 'import numpy as np\n')] |
###############################################################################
# Imports
###############################################################################
import numpy as np
from scipy.stats import norm
from scipy.special import erfc
import h5py
from astropy import constants as const
import pkg_resources
from warnings import simplefilter, warn
###############################################################################
# Setup
###############################################################################
simplefilter('always', UserWarning)
hyper_file = pkg_resources.resource_filename('forecaster', 'fitting_parameters.h5')
h5 = h5py.File(hyper_file, 'r')
all_hyper = h5['hyper_posterior'][:]
h5.close()
n_pop = 4
###############################################################################
# Mass to Radius
###############################################################################
def Mpost2R(mass_array, unit='Jupiter', classify=False):
"""
Description:
---------------
Given an array of masses, return an equal length
array of forecasted radii. Masses/radii do not
have to correspond to a single physical source:
output indecies correspond to input indecies,
so the mass array can hold information for
multiple objects.
Parameters:
---------------
mass_array: one dimensional array
Input mass array
unit: str (optional)
Unit of mass_array.
Default is 'Jupiter'.
Options are 'Earth' and 'Jupiter'.
classify: boolean (optional)
Indicator for whether to calculate the probabilities that
the mass array represents each object in
{Terran, Neptunian, Jovian, Stellar}.
Default is False.
Do not use if the mass array does not represent a single
object.
Returns
---------------
If classify==False:
radii: one dimensional array
Predicted radius distribution in the specified input unit
Else:
radii: one dimensional array
Predicted radius distribution in the specified input unit
classification: dict
Probabilities the mass array represents an object
in each of the 4 populations.
"""
# Initial setup
assert len(mass_array.shape) == 1 and len(mass_array) > 0, \
"Input mass must 1-D array with non-zero length"
sample_size = len(mass_array)
hyper_ind = np.random.randint(low = 0,
high = np.shape(all_hyper)[0],
size = sample_size)
hypers = all_hyper[hyper_ind,:]
Probs = np.random.random(sample_size)
# Convert internally to Earth masses
if unit == 'Earth':
pass
elif unit == 'Jupiter':
mass_array = mass_array * (const.M_jup / const.M_earth).value
else:
unit = 'Jupiter'
mass_array = mass_array * (const.M_jup / const.M_earth).value
warn("Input unit must be 'Earth' or 'Jupiter'. " + \
"Using 'Jupiter' as default.")
# Ensure input within model expectations
if np.sum(mass_array > 3e5) > 0:
raise ValueError('Mass array contains values above 3e5 M_e, ' + \
'outside of model expectation. Returning None')
if np.sum(mass_array < 3e-4) > 0:
raise ValueError('Mass array contains values below 3e-4 M_e, ' + \
'outside of model expectation. Returning None')
logMs = np.log10(mass_array)
# Get the data needed to calculate radii
w = split_hyper_linear(hypers)
# w, read as (#hyper, flag for {C, slope, sigma, trans}, pop #)
TS = np.zeros((len(w), 5))*np.nan
TS[:, 0] = -np.inf
TS[:, 1:4] = w[:, -1, :-1]
TS[:, -1] = np.inf
pop_num = np.zeros(len(logMs))*np.nan
pop_num[((logMs > TS[:,0]) & (logMs < TS[:,1]))] = 0
pop_num[((logMs > TS[:,1]) & (logMs < TS[:,2]))] = 1
pop_num[((logMs > TS[:,2]) & (logMs < TS[:,3]))] = 2
pop_num[((logMs > TS[:,3]) & (logMs < TS[:,4]))] = 3
pop_num = pop_num.astype(int)
Cs = w[np.arange(0,len(w),1), 0, pop_num]
Slopes = w[np.arange(0,len(w),1), 1, pop_num]
Mus = Cs + logMs*Slopes
Sigs = w[np.arange(0,len(w),1), 2, pop_num]
# Calculate the radii
logRs = norm.ppf(Probs, Mus, Sigs)
radii_sample = 10.** logRs
# convert to right unit
if unit == 'Jupiter':
radii = radii_sample / (const.R_jup / const.R_earth).value
else:
radii = radii_sample
# Return
if classify:
prob = np.zeros(4)
prob[0] = np.sum(pop_num == 0)
prob[1] = np.sum(pop_num == 1)
prob[2] = np.sum(pop_num == 2)
prob[3] = np.sum(pop_num == 3)
prob = prob / np.sum(prob) * 100
return radii, {'Terran': prob[0],
'Neptunian': prob[1],
'Jovian': prob[2],
'Stellar': prob[3]}
else:
return radii
#------------------------------------------------------------------------------
def Mstat2R(mean, onesig_neg, onesig_pos,
unit='Jupiter',
n_mass_samples=int(1e3),
classify=False):
"""
Description:
---------------
Given a mean and (possibly asymmetric) uncertainties in mass,
return the median and +/- one sigma uncertainties in radius.
Relies on Mpost2R and draw_from_asymmetric
Parameters:
---------------
mean: float
Input mass
onesig_neg: float
The one sigma negative uncertainty
onesig_pos: float
The one sigma positive uncertainty
unit: str (optional)
Unit of the input radius.
Default is 'Jupiter'.
Options are 'Earth' and 'Jupiter'.
n_mass_samples: int (optional)
The number of draws from a distribution created using
stated mean and uncertainties.
Default is 1000
classify: boolean (optional)
Indicator for whether to calculate the probabilities that
the input mass represents each object in
{Terran, Neptunian, Jovian, Stellar}.
Default is False.
Returns
---------------
If classify==False:
radius: a (3,) array
Values are [median radius, + uncertainty, - uncertainty],
All values in the specified input units
Else:
radius: a (3,) array
Values are [median radius, + uncertainty, - uncertainty],
All values in the specified input units
classification: dict
Probabilities the input mass statistics represent an object
in each of the 4 populations.
"""
# Initial setup
onesig_neg = np.abs(onesig_neg)
onesig_pos = np.abs(onesig_pos)
if onesig_neg == 0:
warn("Negative uncertainty cannot be zero, using 1e-9 instead")
onesig_neg = 1e-9
if onesig_pos == 0:
warn("Positive uncertainty cannot be zero, using 1e-9 instead")
onesig_pos = 1e-9
# Create an array of masses from the given statistics
masses = draw_from_asymmetric(mu=mean,
signeg=onesig_neg,
sigpos=onesig_pos,
xmin=0, xmax=np.inf,
nsamples=n_mass_samples)
# Convert that mass array to radius
r = Mpost2R(masses, unit=unit, classify=classify)
# Address the different shaped outputs depending on classify
if classify:
radii = r[0]
else:
radii = r
# Calculate the returned statistics
med = np.median(radii)
onesigma = 34.1
stats = np.array([med, np.percentile(radii, 50.+onesigma, \
interpolation='nearest') - med,
-(med - np.percentile(radii, 50.-onesigma, \
interpolation='nearest'))])
if classify:
return stats, r[1]
else:
return stats
###############################################################################
# Radius to Mass
###############################################################################
def Rpost2M(radius_array, unit='Jupiter', grid_size=int(1e3), classify=False):
"""
Description:
---------------
Given an array of radii, return an equal length
array of forecasted masses. Masses/radii do not
have to correspond to a single physical source:
output indecies correspond to input indecies,
so the mass array can hold information for
multiple objects.
Parameters:
---------------
radius_array: one dimensional array
Input radius array
unit: str (optional)
Unit of radius_array.
Default is 'Jupiter'.
Options are 'Earth' and 'Jupiter'.
grid_size: int
The size of the possible masses considered
in the range [-3.522, 5.477] log(M_e).
Default is 1e3.
Anything below 10 will be converted to 10.
classify: boolean (optional)
Indicator for whether to calculate the probabilities that
the radius array represents each object in
{Terran, Neptunian, Jovian, Stellar}.
Default is False.
Do not use if the mass array does not represent a single
object.
Returns
---------------
If classify==False:
mass: one dimensional array
Predicted mass distribution in the specified input unit
Else:
mass: one dimensional array
Predicted mass distribution in the specified input unit
classification: dict
Probabilities the radius array represents an object
in each of the 4 populations.
"""
# Initial setup
assert len(radius_array.shape) == 1 and len(radius_array) > 0, \
"Input radius must 1-D array with non-zero length"
if unit == 'Earth':
pass
elif unit == 'Jupiter':
radius_array = radius_array * (const.R_jup / const.R_earth).value
else:
unit = 'Jupiter'
radius_array = radius_array * (const.R_jup / const.R_earth).value
warn("Input unit must be 'Earth' or 'Jupiter'. " + \
"Using 'Jupiter' as default.")
# Ensure sample grid isn't too sparse
if grid_size < 10:
Warn('The sample grid of masses is too sparse, replacing ' +\
'grid_size with 10 instead.')
grid_size = 10
# Ensure input within model expectations
if np.sum(radius_array > 1e2) > 0:
raise ValueError('Radius array contains values above 1e2 R_e, ' + \
'outside of model expectation. Returning None')
if np.sum(radius_array < 1e-1) > 0:
raise ValueError('Mass array contains values below 1e-1 M_e, ' + \
'outside of model expectation. Returning None')
# Get the data to convert to masses
sample_size = len(radius_array)
logr = np.log10(radius_array)
logm_grid = np.linspace(-3.522, 5.477, grid_size)
hyper_ind = np.random.randint(low = 0,
high = np.shape(all_hyper)[0],
size = sample_size)
hypers = all_hyper[hyper_ind,:]
w = split_hyper_linear(hypers)
Ind = indicate(logm_grid, w)
Probs = ProbRGivenM(log_radii=logr, M=logm_grid,
indicate_output=Ind, split_hyper_linear_output=w)
# Calculate the masses
logm = logm_grid[(Probs.cumsum(1) > \
np.random.rand(Probs.shape[0])[:,None]).argmax(1)]
mass_sample = 10.** logm
# Convert to original unit
if unit == 'Jupiter':
mass = mass_sample / (const.M_jup / const.M_earth).value
else:
mass = mass_sample
if classify:
ind = indicate(logm, w)
prob = np.sum(np.sum(ind, axis=2), axis=0) / \
(len(logm) * len(radius_array)) * 100
return mass, {'Terran': prob[0],
'Neptunian': prob[1],
'Jovian': prob[2],
'Stellar': prob[3]}
else:
return mass
#------------------------------------------------------------------------------
def Rstat2M(mean, onesig_neg, onesig_pos,
unit='Jupiter', n_radii_samples=int(1e3),
mass_grid_size=int(1e3), classify=False):
"""
Description:
---------------
Given a mean and (possibly asymmetric) uncertainties in radius,
return the median and +/- one sigma uncertainties in mass.
Relies on Rpost2M and draw_from_asymmetric
Parameters:
---------------
mean: float
Input mass
onesig_neg: float
The one sigma negative uncertainty
onesig_pos: float
The one sigma positive uncertainty
unit: str (optional)
Unit of the input mass.
Default is 'Jupiter'.
Options are 'Earth' and 'Jupiter'.
n_radii_samples: int (optional)
The number of draws from a distribution created using
stated mean and uncertainties.
Default is 1000
mass_grid_size: int (optional)
The size of the mass grid considered in Rpost2M
classify: boolean (optional)
Indicator for whether to calculate the probabilities that
the input mass represents each object in
{Terran, Neptunian, Jovian, Stellar}.
Default is False.
Returns
---------------
If classify==False:
mass: a (3,) array
Values are [median mass, + uncertainty, - uncertainty],
All values in the specified input units
Else:
mass: a (3,) array
Values are [median mass, + uncertainty, - uncertainty],
All values in the specified input units
classification: dict
Probabilities the input radius statistics represent an object
in each of the 4 populations.
"""
# Initial setup
onesig_neg = np.abs(onesig_neg)
onesig_pos = np.abs(onesig_pos)
if onesig_neg == 0:
warn("Negative uncertainty cannot be zero, using 1e-9 instead")
onesig_neg = 1e-9
if onesig_pos == 0:
warn("Positive uncertainty cannot be zero, using 1e-9 instead")
onesig_pos = 1e-9
radii = draw_from_asymmetric(mu=mean,
signeg=onesig_neg,
sigpos=onesig_pos,
xmin=0, xmax=np.inf,
nsamples=n_radii_samples)
m = Rpost2M(radii, unit=unit,
grid_size=mass_grid_size,
classify=classify)
if classify:
masses = m[0]
else:
masses = m
med = np.median(masses)
onesigma = 34.1
stats = np.array([med, np.percentile(masses, 50.+onesigma, \
interpolation='nearest') - med,
-(med - np.percentile(masses, 50.-onesigma, \
interpolation='nearest'))])
if classify:
return stats, m[1]
else:
return stats
###############################################################################
# Helper Functions
###############################################################################
def draw_from_asymmetric(mu, signeg, sigpos, xmin, xmax, nsamples):
'''
Implement an asymmetric distribution sampling method
written for a Mathematica notebook in Python.
Original source: https://github.com/davidkipping/asymmetric
This breaks when signeg or sigpos = 0, so replace those with
a tiny value if they are actually valid
'''
signeg = np.abs(signeg)
sigpos = np.abs(sigpos)
Xs = np.random.uniform(mu-20*signeg, mu+20*sigpos, 1000000)
if (np.max(Xs) > xmax) and (np.min(Xs) > xmin):
Xs = np.random.uniform(mu-20*signeg, xmax, 1000000)
elif (np.max(Xs) < xmax) and (np.min(Xs) < xmin):
Xs = np.random.uniform(xmin, mu+20*sigpos, 1000000)
elif (np.max(Xs) < xmax) and (np.min(Xs) > xmin):
Xs = np.random.uniform(mu-20*signeg, mu+20*sigpos, 1000000)
elif (np.max(Xs) > xmax) and (np.min(Xs) < xmin):
Xs = np.random.uniform(xmin, xmax, 1000000)
pdf = np.zeros(len(Xs))*np.nan
pdf[Xs < mu] = np.exp(- (Xs[Xs < mu] - mu)**2 / (2*signeg**2)) / \
(2*np.pi*signeg*sigpos * \
(1 / (np.sqrt(2*np.pi) * signeg) + \
1 / (np.sqrt(2*np.pi) * sigpos)) * \
(0.5 - 0.5*erfc((mu - xmin) / (np.sqrt(2) * signeg))))
pdf[Xs >= mu] = np.exp(- (Xs[Xs >= mu] - mu)**2 / (2*sigpos**2)) / \
(2*np.pi*signeg*sigpos * \
(1 / (np.sqrt(2*np.pi) * signeg) + \
1 / (np.sqrt(2*np.pi) * sigpos)) * \
(0.5*erfc((mu - xmax) / (np.sqrt(2) * sigpos)) - 0.5))
pdf = pdf/np.sum(pdf)
v = np.random.choice(Xs, nsamples, p=pdf)
return v
def split_hyper_linear(hypers):
'''
Convert the raw output of a selection from the
hyperparameter file into something useable later
in processing. Vectorized version of the original
split_hyper_linear in chenjj2's forecaster.
'''
C0 = hypers[:, 0]
Slope = hypers[:, 1:1+n_pop]
Sigma = hypers[:, 1+n_pop:1+2*n_pop]
Trans = hypers[:, 1+2*n_pop:]
C = np.zeros_like(Slope)
C[:,0] = C0
C[:,1] = C[:,0] + Trans[:,0] * (Slope[:,0] - Slope[:,1])
C[:,2] = C[:,1] + Trans[:,1] * (Slope[:,1] - Slope[:,2])
C[:,3] = C[:,2] + Trans[:,2] * (Slope[:,2] - Slope[:,3])
# Read as (#hyper/radii, flag for {C, slope, sigma, trans}, pop #)
# Trans only has 3 numbers, leave the last as nan
w = np.zeros((len(hypers), 4, 4))*np.nan
w[:, 0, :] = C
w[:, 1, :] = Slope
w[:, 2, :] = Sigma
w[:, 3, :-1] = Trans
return w
def indicate(logM, split_hyper_output):
'''
Flag which population given associated masses and
hyperparameter belong to. Vectorized version of
the original indicate in chenjj2's forecaster.
'''
TS = np.zeros((len(split_hyper_output), 5))*np.nan
TS[:, 0] = -np.inf
TS[:, 1:4] = split_hyper_output[:, -1, :-1]
TS[:, -1] = np.inf
TS
# Read as Ind[hyper/radius, pop#, mass grid spot]
Ind = np.zeros((len(split_hyper_output), 4, len(logM)))
Ind[:, 0, :] = ((logM[:, np.newaxis] >= TS[:, 0]) & (logM[:, np.newaxis] < TS[:, 1])).T
Ind[:, 1, :] = ((logM[:, np.newaxis] >= TS[:, 1]) & (logM[:, np.newaxis] < TS[:, 2])).T
Ind[:, 2, :] = ((logM[:, np.newaxis] >= TS[:, 2]) & (logM[:, np.newaxis] < TS[:, 3])).T
Ind[:, 3, :] = ((logM[:, np.newaxis] >= TS[:, 3]) & (logM[:, np.newaxis] < TS[:, 4])).T
Ind = Ind.astype(bool)
return Ind
def ProbRGivenM(log_radii, M, indicate_output, split_hyper_linear_output):
'''
For each input mass, calculate the probability
of that object corresponding to each of the input radii.
Vectorized version of
the original ProbRGivenM in chenjj2's forecaster.
'''
w = split_hyper_linear_output
Ind = indicate_output
Mexpanded = np.ones((len(log_radii), len(M)))*M
Mu = np.zeros((len(log_radii), len(M)))*np.nan
Mu[Ind[:,0,:]] = (w[:, 0, 0][:, np.newaxis] + (Mexpanded*w[:, 1, 0][:, np.newaxis]))[Ind[:,0,:]]
Mu[Ind[:,1,:]] = (w[:, 0, 1][:, np.newaxis] + (Mexpanded*w[:, 1, 1][:, np.newaxis]))[Ind[:,1,:]]
Mu[Ind[:,2,:]] = (w[:, 0, 2][:, np.newaxis] + (Mexpanded*w[:, 1, 2][:, np.newaxis]))[Ind[:,2,:]]
Mu[Ind[:,3,:]] = (w[:, 0, 3][:, np.newaxis] + (Mexpanded*w[:, 1, 3][:, np.newaxis]))[Ind[:,3,:]]
Sig = np.zeros((len(log_radii), len(M)))*np.nan
Sig[Ind[:,0,:]] = (np.ones((len(log_radii), len(M)))*w[:, 2, 0][:, np.newaxis])[Ind[:,0,:]]
Sig[Ind[:,1,:]] = (np.ones((len(log_radii), len(M)))*w[:, 2, 1][:, np.newaxis])[Ind[:,1,:]]
Sig[Ind[:,2,:]] = (np.ones((len(log_radii), len(M)))*w[:, 2, 2][:, np.newaxis])[Ind[:,2,:]]
Sig[Ind[:,3,:]] = (np.ones((len(log_radii), len(M)))*w[:, 2, 3][:, np.newaxis])[Ind[:,3,:]]
Probs = norm.pdf(x=np.repeat(log_radii, len(M)),
loc=Mu.reshape(len(M)*len(log_radii)),
scale=Sig.reshape(len(M)*len(log_radii)))
Probs = Probs.reshape((len(log_radii), len(M)))
Probs = Probs / np.sum(Probs, axis=1)[:, np.newaxis]
return Probs
| [
"numpy.abs",
"numpy.sum",
"pkg_resources.resource_filename",
"numpy.shape",
"numpy.exp",
"numpy.zeros_like",
"warnings.simplefilter",
"numpy.max",
"numpy.linspace",
"numpy.random.choice",
"numpy.log10",
"scipy.stats.norm.ppf",
"h5py.File",
"numpy.median",
"numpy.percentile",
"numpy.min... | [((531, 566), 'warnings.simplefilter', 'simplefilter', (['"""always"""', 'UserWarning'], {}), "('always', UserWarning)\n", (543, 566), False, 'from warnings import simplefilter, warn\n'), ((580, 650), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""forecaster"""', '"""fitting_parameters.h5"""'], {}), "('forecaster', 'fitting_parameters.h5')\n", (611, 650), False, 'import pkg_resources\n'), ((656, 682), 'h5py.File', 'h5py.File', (['hyper_file', '"""r"""'], {}), "(hyper_file, 'r')\n", (665, 682), False, 'import h5py\n'), ((2554, 2583), 'numpy.random.random', 'np.random.random', (['sample_size'], {}), '(sample_size)\n', (2570, 2583), True, 'import numpy as np\n'), ((3420, 3440), 'numpy.log10', 'np.log10', (['mass_array'], {}), '(mass_array)\n', (3428, 3440), True, 'import numpy as np\n'), ((4227, 4253), 'scipy.stats.norm.ppf', 'norm.ppf', (['Probs', 'Mus', 'Sigs'], {}), '(Probs, Mus, Sigs)\n', (4235, 4253), False, 'from scipy.stats import norm\n'), ((6479, 6497), 'numpy.abs', 'np.abs', (['onesig_neg'], {}), '(onesig_neg)\n', (6485, 6497), True, 'import numpy as np\n'), ((6515, 6533), 'numpy.abs', 'np.abs', (['onesig_pos'], {}), '(onesig_pos)\n', (6521, 6533), True, 'import numpy as np\n'), ((7392, 7408), 'numpy.median', 'np.median', (['radii'], {}), '(radii)\n', (7401, 7408), True, 'import numpy as np\n'), ((10618, 10640), 'numpy.log10', 'np.log10', (['radius_array'], {}), '(radius_array)\n', (10626, 10640), True, 'import numpy as np\n'), ((10657, 10694), 'numpy.linspace', 'np.linspace', (['(-3.522)', '(5.477)', 'grid_size'], {}), '(-3.522, 5.477, grid_size)\n', (10668, 10694), True, 'import numpy as np\n'), ((13451, 13469), 'numpy.abs', 'np.abs', (['onesig_neg'], {}), '(onesig_neg)\n', (13457, 13469), True, 'import numpy as np\n'), ((13487, 13505), 'numpy.abs', 'np.abs', (['onesig_pos'], {}), '(onesig_pos)\n', (13493, 13505), True, 'import numpy as np\n'), ((14215, 14232), 'numpy.median', 'np.median', (['masses'], {}), '(masses)\n', (14224, 14232), True, 'import numpy as np\n'), ((15184, 15198), 'numpy.abs', 'np.abs', (['signeg'], {}), '(signeg)\n', (15190, 15198), True, 'import numpy as np\n'), ((15212, 15226), 'numpy.abs', 'np.abs', (['sigpos'], {}), '(sigpos)\n', (15218, 15226), True, 'import numpy as np\n'), ((15236, 15298), 'numpy.random.uniform', 'np.random.uniform', (['(mu - 20 * signeg)', '(mu + 20 * sigpos)', '(1000000)'], {}), '(mu - 20 * signeg, mu + 20 * sigpos, 1000000)\n', (15253, 15298), True, 'import numpy as np\n'), ((16376, 16413), 'numpy.random.choice', 'np.random.choice', (['Xs', 'nsamples'], {'p': 'pdf'}), '(Xs, nsamples, p=pdf)\n', (16392, 16413), True, 'import numpy as np\n'), ((16822, 16842), 'numpy.zeros_like', 'np.zeros_like', (['Slope'], {}), '(Slope)\n', (16835, 16842), True, 'import numpy as np\n'), ((3039, 3068), 'numpy.sum', 'np.sum', (['(mass_array > 300000.0)'], {}), '(mass_array > 300000.0)\n', (3045, 3068), True, 'import numpy as np\n'), ((3219, 3246), 'numpy.sum', 'np.sum', (['(mass_array < 0.0003)'], {}), '(mass_array < 0.0003)\n', (3225, 3246), True, 'import numpy as np\n'), ((4505, 4516), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4513, 4516), True, 'import numpy as np\n'), ((4535, 4555), 'numpy.sum', 'np.sum', (['(pop_num == 0)'], {}), '(pop_num == 0)\n', (4541, 4555), True, 'import numpy as np\n'), ((4574, 4594), 'numpy.sum', 'np.sum', (['(pop_num == 1)'], {}), '(pop_num == 1)\n', (4580, 4594), True, 'import numpy as np\n'), ((4613, 4633), 'numpy.sum', 'np.sum', (['(pop_num == 2)'], {}), '(pop_num == 2)\n', (4619, 4633), True, 'import numpy as np\n'), ((4652, 4672), 'numpy.sum', 'np.sum', (['(pop_num == 3)'], {}), '(pop_num == 3)\n', (4658, 4672), True, 'import numpy as np\n'), ((6566, 6629), 'warnings.warn', 'warn', (['"""Negative uncertainty cannot be zero, using 1e-9 instead"""'], {}), "('Negative uncertainty cannot be zero, using 1e-9 instead')\n", (6570, 6629), False, 'from warnings import simplefilter, warn\n'), ((6688, 6751), 'warnings.warn', 'warn', (['"""Positive uncertainty cannot be zero, using 1e-9 instead"""'], {}), "('Positive uncertainty cannot be zero, using 1e-9 instead')\n", (6692, 6751), False, 'from warnings import simplefilter, warn\n'), ((10165, 10193), 'numpy.sum', 'np.sum', (['(radius_array > 100.0)'], {}), '(radius_array > 100.0)\n', (10171, 10193), True, 'import numpy as np\n'), ((10349, 10375), 'numpy.sum', 'np.sum', (['(radius_array < 0.1)'], {}), '(radius_array < 0.1)\n', (10355, 10375), True, 'import numpy as np\n'), ((13538, 13601), 'warnings.warn', 'warn', (['"""Negative uncertainty cannot be zero, using 1e-9 instead"""'], {}), "('Negative uncertainty cannot be zero, using 1e-9 instead')\n", (13542, 13601), False, 'from warnings import simplefilter, warn\n'), ((13660, 13723), 'warnings.warn', 'warn', (['"""Positive uncertainty cannot be zero, using 1e-9 instead"""'], {}), "('Positive uncertainty cannot be zero, using 1e-9 instead')\n", (13664, 13723), False, 'from warnings import simplefilter, warn\n'), ((15356, 15406), 'numpy.random.uniform', 'np.random.uniform', (['(mu - 20 * signeg)', 'xmax', '(1000000)'], {}), '(mu - 20 * signeg, xmax, 1000000)\n', (15373, 15406), True, 'import numpy as np\n'), ((15800, 15852), 'numpy.exp', 'np.exp', (['(-(Xs[Xs < mu] - mu) ** 2 / (2 * signeg ** 2))'], {}), '(-(Xs[Xs < mu] - mu) ** 2 / (2 * signeg ** 2))\n', (15806, 15852), True, 'import numpy as np\n'), ((16078, 16131), 'numpy.exp', 'np.exp', (['(-(Xs[Xs >= mu] - mu) ** 2 / (2 * sigpos ** 2))'], {}), '(-(Xs[Xs >= mu] - mu) ** 2 / (2 * sigpos ** 2))\n', (16084, 16131), True, 'import numpy as np\n'), ((16355, 16366), 'numpy.sum', 'np.sum', (['pdf'], {}), '(pdf)\n', (16361, 16366), True, 'import numpy as np\n'), ((2879, 2964), 'warnings.warn', 'warn', (['("Input unit must be \'Earth\' or \'Jupiter\'. " + "Using \'Jupiter\' as default.")'], {}), '("Input unit must be \'Earth\' or \'Jupiter\'. " +\n "Using \'Jupiter\' as default.")\n', (2883, 2964), False, 'from warnings import simplefilter, warn\n'), ((9804, 9889), 'warnings.warn', 'warn', (['("Input unit must be \'Earth\' or \'Jupiter\'. " + "Using \'Jupiter\' as default.")'], {}), '("Input unit must be \'Earth\' or \'Jupiter\'. " +\n "Using \'Jupiter\' as default.")\n', (9808, 9889), False, 'from warnings import simplefilter, warn\n'), ((15299, 15309), 'numpy.max', 'np.max', (['Xs'], {}), '(Xs)\n', (15305, 15309), True, 'import numpy as np\n'), ((15323, 15333), 'numpy.min', 'np.min', (['Xs'], {}), '(Xs)\n', (15329, 15333), True, 'import numpy as np\n'), ((15470, 15520), 'numpy.random.uniform', 'np.random.uniform', (['xmin', '(mu + 20 * sigpos)', '(1000000)'], {}), '(xmin, mu + 20 * sigpos, 1000000)\n', (15487, 15520), True, 'import numpy as np\n'), ((19760, 19781), 'numpy.sum', 'np.sum', (['Probs'], {'axis': '(1)'}), '(Probs, axis=1)\n', (19766, 19781), True, 'import numpy as np\n'), ((2422, 2441), 'numpy.shape', 'np.shape', (['all_hyper'], {}), '(all_hyper)\n', (2430, 2441), True, 'import numpy as np\n'), ((4695, 4707), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (4701, 4707), True, 'import numpy as np\n'), ((7457, 7519), 'numpy.percentile', 'np.percentile', (['radii', '(50.0 + onesigma)'], {'interpolation': '"""nearest"""'}), "(radii, 50.0 + onesigma, interpolation='nearest')\n", (7470, 7519), True, 'import numpy as np\n'), ((10780, 10799), 'numpy.shape', 'np.shape', (['all_hyper'], {}), '(all_hyper)\n', (10788, 10799), True, 'import numpy as np\n'), ((14281, 14344), 'numpy.percentile', 'np.percentile', (['masses', '(50.0 + onesigma)'], {'interpolation': '"""nearest"""'}), "(masses, 50.0 + onesigma, interpolation='nearest')\n", (14294, 14344), True, 'import numpy as np\n'), ((15413, 15423), 'numpy.max', 'np.max', (['Xs'], {}), '(Xs)\n', (15419, 15423), True, 'import numpy as np\n'), ((15437, 15447), 'numpy.min', 'np.min', (['Xs'], {}), '(Xs)\n', (15443, 15447), True, 'import numpy as np\n'), ((15584, 15646), 'numpy.random.uniform', 'np.random.uniform', (['(mu - 20 * signeg)', '(mu + 20 * sigpos)', '(1000000)'], {}), '(mu - 20 * signeg, mu + 20 * sigpos, 1000000)\n', (15601, 15646), True, 'import numpy as np\n'), ((7596, 7658), 'numpy.percentile', 'np.percentile', (['radii', '(50.0 - onesigma)'], {'interpolation': '"""nearest"""'}), "(radii, 50.0 - onesigma, interpolation='nearest')\n", (7609, 7658), True, 'import numpy as np\n'), ((11508, 11527), 'numpy.sum', 'np.sum', (['ind'], {'axis': '(2)'}), '(ind, axis=2)\n', (11514, 11527), True, 'import numpy as np\n'), ((14421, 14484), 'numpy.percentile', 'np.percentile', (['masses', '(50.0 - onesigma)'], {'interpolation': '"""nearest"""'}), "(masses, 50.0 - onesigma, interpolation='nearest')\n", (14434, 14484), True, 'import numpy as np\n'), ((15527, 15537), 'numpy.max', 'np.max', (['Xs'], {}), '(Xs)\n', (15533, 15537), True, 'import numpy as np\n'), ((15551, 15561), 'numpy.min', 'np.min', (['Xs'], {}), '(Xs)\n', (15557, 15561), True, 'import numpy as np\n'), ((15706, 15744), 'numpy.random.uniform', 'np.random.uniform', (['xmin', 'xmax', '(1000000)'], {}), '(xmin, xmax, 1000000)\n', (15723, 15744), True, 'import numpy as np\n'), ((11188, 11218), 'numpy.random.rand', 'np.random.rand', (['Probs.shape[0]'], {}), '(Probs.shape[0])\n', (11202, 11218), True, 'import numpy as np\n'), ((15649, 15659), 'numpy.max', 'np.max', (['Xs'], {}), '(Xs)\n', (15655, 15659), True, 'import numpy as np\n'), ((15673, 15683), 'numpy.min', 'np.min', (['Xs'], {}), '(Xs)\n', (15679, 15683), True, 'import numpy as np\n'), ((15909, 15927), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (15916, 15927), True, 'import numpy as np\n'), ((15958, 15976), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (15965, 15976), True, 'import numpy as np\n'), ((16188, 16206), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (16195, 16206), True, 'import numpy as np\n'), ((16237, 16255), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (16244, 16255), True, 'import numpy as np\n'), ((16033, 16043), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16040, 16043), True, 'import numpy as np\n'), ((16306, 16316), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (16313, 16316), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
These are some useful data management functions
"""
#========================================================================
# Import what you need
#========================================================================
import numpy as np
import pandas as pd
#========================================================================
def read_in_behavdata(behav_data_f):
"""
This function reads in the ABIDE phenotypic data file,
removes all participants who have no measure in the
func_perc_fd column or if they have no associated
connectivity matrix file ('FILE_ID' variable).
It also calculates and adds a variable called AGE_YRS
which is the age of the participant in years.
Finally the function removes all participants who are
younger than 6 and older than 18 and returns
the pandas data frame.
"""
df = pd.read_csv(behav_data_f)
df = df.loc[df['func_perc_fd'].notnull(), :]
df = df.loc[df['FILE_ID']!='no_filename', :]
df['AGE_YRS'] = np.floor(df['AGE_AT_SCAN'])
df= df.loc[(df['AGE_YRS']>=6)& (df['AGE_YRS']<=18), :] #only include kids
return df
#========================================================================
def filter_data(df, motion_thresh, age_l, age_u, motion_measure='func_perc_fd'):
"""
This function filters the data so you're looking at data
within a certain age range (between age_l and age_u *inclusive*)
and with particpants who have motion lower than a certain
value of motion_measure (set to func_perc_fd by default but
an alternative value would be func_mean_fd.
"""
# Start by removing all participants whose data is below a certain
# motion threshold.
df_samp_motion = df.loc[df[motion_measure] < motion_thresh, :]
# Then remove participants who are younger (in years) than age_l and older
# than age_u. Note that this means people who are age_l and age_u
# (eg 6 and 10) will be included in the sample.
df_samp = df_samp_motion.loc[(df_samp_motion['AGE_YRS']>=age_l)
& (df_samp_motion['AGE_YRS']<=age_u), :]
return df_samp
#========================================================================
def select_random_sample(df, n=100):
"""
This function shuffles the data in filtered_df and then selects
the top n entries.
"""
# Make a copy (just because sometimes crazy things happen when you
# shuffle in python!)
df_copy = df.copy()
# Permute the data and re-index
df_copy = df_copy.reindex(np.random.permutation(df_copy.index))
# Then just take the top n
df_copy = df_copy.iloc[:n, :]
return df_copy
| [
"pandas.read_csv",
"numpy.floor",
"numpy.random.permutation"
] | [((891, 916), 'pandas.read_csv', 'pd.read_csv', (['behav_data_f'], {}), '(behav_data_f)\n', (902, 916), True, 'import pandas as pd\n'), ((1035, 1062), 'numpy.floor', 'np.floor', (["df['AGE_AT_SCAN']"], {}), "(df['AGE_AT_SCAN'])\n", (1043, 1062), True, 'import numpy as np\n'), ((2567, 2603), 'numpy.random.permutation', 'np.random.permutation', (['df_copy.index'], {}), '(df_copy.index)\n', (2588, 2603), True, 'import numpy as np\n')] |
from types import CoroutineType
import pybullet as p
import pybullet_data
import numpy as np
import gym
from gym import spaces
class humanoid(gym.Env):
def __init__(self) -> None:
super(humanoid, self).__init__()
p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=-45, cameraPitch=-20, cameraTargetPosition=[0,0,0.1])
self.reset()
def step(self, action):
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING)
p.setJointMotorControlArray(self.dancerUid, [self.joint2Index[joint] for joint in self.joint_names], p.POSITION_CONTROL, action)
p.stepSimulation()
state, _, sensorState = self.getObservation()
return state, 0, False, sensorState
def getObservation(self):
jointStates = {}
for joint in self.joint_names:
jointStates[joint] = p.getJointState(self.dancerUid, self.joint2Index[joint])
# check collision and get sensor position
collision = False
for link in self.link_names:
if len(p.getContactPoints(bodyA=self.dancerUid, linkIndexA=self.link2Index[link])) > 0:
collision = True
for contact in p.getContactPoints(bodyA=self.dancerUid, bodyB=self.dancerUid, linkIndexA=self.link2Index[link]):
print("Collision Occurred in Link {} & Link {}!!!".format(contact[3], contact[4]))
p.changeVisualShape(self.dancerUid, contact[3], rgbaColor=[1,0,0,1])
p.changeVisualShape(self.dancerUid, contact[4], rgbaColor=[1,0,0,1])
# check sensor
sensorStates = {}
for sensor in self.sensor_name:
sensorStates[sensor[0]] = (p.getJointState(self.dancerUid, self.sensor2Index[sensor[0]])[2], p.getLinkState(self.dancerUid, self.link2Index[sensor[1]])[0])
observation = [jointStates[joint][0] for joint in self.joint_names]
self.get_zmp(sensorStates)
return observation, collision, sensorStates
def reset(self):
p.resetSimulation()
self.step_counter = 0
self.dancerUid = p.loadURDF("./half_human.urdf", [0,0,0], p.getQuaternionFromEuler([0,0,0]),
flags=p.URDF_USE_SELF_COLLISION+p.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0,0,-9.8)
self.groudId = p.loadURDF("plane.urdf")
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(1./60.)
self.joint_names = []
self.joint2Index = {} # index map to jointName
self.link_names = []
self.link2Index = {} # index map to linkName
self.lower_limits = []
self.upper_limits = []
self.init_angles = []
self.sensor_name = []
self.sensor2Index = {}
for index in range(p.getNumJoints(self.dancerUid)):
jointName = p.getJointInfo(self.dancerUid, index)[1].decode('utf-8')
linkName = p.getJointInfo(self.dancerUid, index)[12].decode('utf-8')
self.link_names.append(linkName)
self.link2Index[linkName] = index
if jointName == 'joint_wolrd':
continue
if 'sensor' in jointName:
self.sensor_name.append((jointName, linkName))
self.sensor2Index[jointName] = index
p.enableJointForceTorqueSensor(self.dancerUid, index, enableSensor=True)
continue
self.joint_names.append(jointName)
self.joint2Index[jointName] = index
self.lower_limits.append(-np.pi)
self.upper_limits.append(np.pi)
self.init_angles.append(0)
self.action_space = spaces.Box(np.array(self.lower_limits,dtype=np.float), np.array(self.upper_limits,dtype=np.float))
self.observation_space = spaces.Box(np.array(self.lower_limits,dtype=np.float), np.array(self.upper_limits,dtype=np.float))
state, _, sensorState = self.getObservation()
return state
def close(self):
p.disconnect()
def get_zmp(self, sensorState):
sigma_PFx = 0
sigma_PFy = 0
sigma_F = 0
for key, value in sensorState.items():
F, pos = value
if pos[2] > 0.025:
continue
sigma_PFx += pos[0] * F[2]
sigma_PFy += pos[1] * F[2]
sigma_F += F[2]
px = sigma_PFx/(sigma_F + 1e-8)
py = sigma_PFy/(sigma_F + 1e-8)
line_length = 0.2
p.addUserDebugLine([px + line_length/2.0, py, 0], [px - line_length/2.0, py, 0], lineColorRGB=[1,0,0], lineWidth=4, lifeTime=1/120.0)
p.addUserDebugLine([px, py + line_length/2.0, 0], [px, py - line_length/2.0, 0], lineColorRGB=[1,0,0], lineWidth=4, lifeTime=1/120.0)
return (px, py, 0) | [
"pybullet.resetSimulation",
"pybullet.resetDebugVisualizerCamera",
"pybullet.connect",
"pybullet.getQuaternionFromEuler",
"pybullet.getContactPoints",
"pybullet.getLinkState",
"pybullet.enableJointForceTorqueSensor",
"pybullet.setJointMotorControlArray",
"pybullet.setGravity",
"pybullet.setTimeSte... | [((234, 250), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (243, 250), True, 'import pybullet as p\n'), ((259, 378), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.5)', 'cameraYaw': '(-45)', 'cameraPitch': '(-20)', 'cameraTargetPosition': '[0, 0, 0.1]'}), '(cameraDistance=1.5, cameraYaw=-45, cameraPitch\n =-20, cameraTargetPosition=[0, 0, 0.1])\n', (287, 378), True, 'import pybullet as p\n'), ((434, 496), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_SINGLE_STEP_RENDERING'], {}), '(p.COV_ENABLE_SINGLE_STEP_RENDERING)\n', (460, 496), True, 'import pybullet as p\n'), ((505, 637), 'pybullet.setJointMotorControlArray', 'p.setJointMotorControlArray', (['self.dancerUid', '[self.joint2Index[joint] for joint in self.joint_names]', 'p.POSITION_CONTROL', 'action'], {}), '(self.dancerUid, [self.joint2Index[joint] for\n joint in self.joint_names], p.POSITION_CONTROL, action)\n', (532, 637), True, 'import pybullet as p\n'), ((642, 660), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (658, 660), True, 'import pybullet as p\n'), ((2081, 2100), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (2098, 2100), True, 'import pybullet as p\n'), ((2394, 2418), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.8)'], {}), '(0, 0, -9.8)\n', (2406, 2418), True, 'import pybullet as p\n'), ((2441, 2465), 'pybullet.loadURDF', 'p.loadURDF', (['"""plane.urdf"""'], {}), "('plane.urdf')\n", (2451, 2465), True, 'import pybullet as p\n'), ((2474, 2526), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'numSolverIterations': '(150)'}), '(numSolverIterations=150)\n', (2501, 2526), True, 'import pybullet as p\n'), ((2535, 2560), 'pybullet.setTimeStep', 'p.setTimeStep', (['(1.0 / 60.0)'], {}), '(1.0 / 60.0)\n', (2548, 2560), True, 'import pybullet as p\n'), ((4120, 4134), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (4132, 4134), True, 'import pybullet as p\n'), ((4590, 4735), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', (['[px + line_length / 2.0, py, 0]', '[px - line_length / 2.0, py, 0]'], {'lineColorRGB': '[1, 0, 0]', 'lineWidth': '(4)', 'lifeTime': '(1 / 120.0)'}), '([px + line_length / 2.0, py, 0], [px - line_length / 2.0,\n py, 0], lineColorRGB=[1, 0, 0], lineWidth=4, lifeTime=1 / 120.0)\n', (4608, 4735), True, 'import pybullet as p\n'), ((4732, 4877), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', (['[px, py + line_length / 2.0, 0]', '[px, py - line_length / 2.0, 0]'], {'lineColorRGB': '[1, 0, 0]', 'lineWidth': '(4)', 'lifeTime': '(1 / 120.0)'}), '([px, py + line_length / 2.0, 0], [px, py - line_length /\n 2.0, 0], lineColorRGB=[1, 0, 0], lineWidth=4, lifeTime=1 / 120.0)\n', (4750, 4877), True, 'import pybullet as p\n'), ((887, 943), 'pybullet.getJointState', 'p.getJointState', (['self.dancerUid', 'self.joint2Index[joint]'], {}), '(self.dancerUid, self.joint2Index[joint])\n', (902, 943), True, 'import pybullet as p\n'), ((2197, 2232), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2221, 2232), True, 'import pybullet as p\n'), ((2357, 2384), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (2382, 2384), False, 'import pybullet_data\n'), ((2906, 2936), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.dancerUid'], {}), '(self.dancerUid)\n', (2920, 2936), True, 'import pybullet as p\n'), ((3790, 3833), 'numpy.array', 'np.array', (['self.lower_limits'], {'dtype': 'np.float'}), '(self.lower_limits, dtype=np.float)\n', (3798, 3833), True, 'import numpy as np\n'), ((3834, 3877), 'numpy.array', 'np.array', (['self.upper_limits'], {'dtype': 'np.float'}), '(self.upper_limits, dtype=np.float)\n', (3842, 3877), True, 'import numpy as np\n'), ((3922, 3965), 'numpy.array', 'np.array', (['self.lower_limits'], {'dtype': 'np.float'}), '(self.lower_limits, dtype=np.float)\n', (3930, 3965), True, 'import numpy as np\n'), ((3966, 4009), 'numpy.array', 'np.array', (['self.upper_limits'], {'dtype': 'np.float'}), '(self.upper_limits, dtype=np.float)\n', (3974, 4009), True, 'import numpy as np\n'), ((1230, 1331), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.dancerUid', 'bodyB': 'self.dancerUid', 'linkIndexA': 'self.link2Index[link]'}), '(bodyA=self.dancerUid, bodyB=self.dancerUid, linkIndexA=\n self.link2Index[link])\n', (1248, 1331), True, 'import pybullet as p\n'), ((3430, 3502), 'pybullet.enableJointForceTorqueSensor', 'p.enableJointForceTorqueSensor', (['self.dancerUid', 'index'], {'enableSensor': '(True)'}), '(self.dancerUid, index, enableSensor=True)\n', (3460, 3502), True, 'import pybullet as p\n'), ((1085, 1159), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.dancerUid', 'linkIndexA': 'self.link2Index[link]'}), '(bodyA=self.dancerUid, linkIndexA=self.link2Index[link])\n', (1103, 1159), True, 'import pybullet as p\n'), ((1451, 1522), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['self.dancerUid', 'contact[3]'], {'rgbaColor': '[1, 0, 0, 1]'}), '(self.dancerUid, contact[3], rgbaColor=[1, 0, 0, 1])\n', (1470, 1522), True, 'import pybullet as p\n'), ((1540, 1611), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['self.dancerUid', 'contact[4]'], {'rgbaColor': '[1, 0, 0, 1]'}), '(self.dancerUid, contact[4], rgbaColor=[1, 0, 0, 1])\n', (1559, 1611), True, 'import pybullet as p\n'), ((1746, 1807), 'pybullet.getJointState', 'p.getJointState', (['self.dancerUid', 'self.sensor2Index[sensor[0]]'], {}), '(self.dancerUid, self.sensor2Index[sensor[0]])\n', (1761, 1807), True, 'import pybullet as p\n'), ((1812, 1870), 'pybullet.getLinkState', 'p.getLinkState', (['self.dancerUid', 'self.link2Index[sensor[1]]'], {}), '(self.dancerUid, self.link2Index[sensor[1]])\n', (1826, 1870), True, 'import pybullet as p\n'), ((2963, 3000), 'pybullet.getJointInfo', 'p.getJointInfo', (['self.dancerUid', 'index'], {}), '(self.dancerUid, index)\n', (2977, 3000), True, 'import pybullet as p\n'), ((3043, 3080), 'pybullet.getJointInfo', 'p.getJointInfo', (['self.dancerUid', 'index'], {}), '(self.dancerUid, index)\n', (3057, 3080), True, 'import pybullet as p\n')] |
import random
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import dill
#don't ask how I came up with these numbers
SIZE_H1 = 50
SIZE_H2 = 100
SIZE_H3 = 60
class Actor(torch.nn.Module):
"""Defines custom model
Inherits from torch.nn.Module
"""
def __init__(self, dim_input, dim_output):
super(actor, self).__init__()
self._dim_input = dim_input
self._dim_output = dim_output
'''Initialize nnet layers'''
self._l1 = torch.nn.Linear(self._dim_input, SIZE_H1)
self._l2 = torch.nn.Linear(SIZE_H1, SIZE_H2)
self._l3 = torch.nn.Linear(SIZE_H2, SIZE_H3)
self._l4 = torch.nn.Linear( SIZE_H3, self._dim_output)
def forward(self,s_t, r_t, aux_array): #TODO: Add aux task support, experiment with inputting previous action as well
x = Variable(torch.FloatTensor(np.concatenate((s_t,r_t), axis = 1)))
self._l1_out = F.relu(self._l1(x))
self._l2_out = F.relu(self._l2(self._l1_out))
self._l3_out = nn.BatchNorm1d(SIZE_H3)(self._l3(self._l2_out))
self._out = F.tanh(self._l4(self._l3_out))
return self._out
#TODO: Change to use config file instead, add main function and all that
def generate(path, idim, odim):
M = actor(idim, odim)
with open(path, "wb") as f:
dill.dump(M, f)
| [
"torch.nn.BatchNorm1d",
"dill.dump",
"numpy.concatenate",
"torch.nn.Linear"
] | [((570, 611), 'torch.nn.Linear', 'torch.nn.Linear', (['self._dim_input', 'SIZE_H1'], {}), '(self._dim_input, SIZE_H1)\n', (585, 611), False, 'import torch\n'), ((631, 664), 'torch.nn.Linear', 'torch.nn.Linear', (['SIZE_H1', 'SIZE_H2'], {}), '(SIZE_H1, SIZE_H2)\n', (646, 664), False, 'import torch\n'), ((684, 717), 'torch.nn.Linear', 'torch.nn.Linear', (['SIZE_H2', 'SIZE_H3'], {}), '(SIZE_H2, SIZE_H3)\n', (699, 717), False, 'import torch\n'), ((737, 779), 'torch.nn.Linear', 'torch.nn.Linear', (['SIZE_H3', 'self._dim_output'], {}), '(SIZE_H3, self._dim_output)\n', (752, 779), False, 'import torch\n'), ((1398, 1413), 'dill.dump', 'dill.dump', (['M', 'f'], {}), '(M, f)\n', (1407, 1413), False, 'import dill\n'), ((1101, 1124), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['SIZE_H3'], {}), '(SIZE_H3)\n', (1115, 1124), True, 'import torch.nn as nn\n'), ((943, 977), 'numpy.concatenate', 'np.concatenate', (['(s_t, r_t)'], {'axis': '(1)'}), '((s_t, r_t), axis=1)\n', (957, 977), True, 'import numpy as np\n')] |
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from GCN.dataset import Dataset
from GCN import model as m
from GCN.callback import *
from datetime import datetime
import numpy as np
import time
import csv
import os
class Trainer(object):
def __init__(self, dataset):
self.data = None
self.model = None
self.hyper = {"dataset": dataset}
self.log = {}
def __repr__(self):
text = ""
for key, value in self.log.items():
text += "{}:\t".format(key)
for error in value[0]:
text += "{0:.4f} ".format(float(error))
text += "\n"
return text
def load_data(self, batch=128, normalize=False, use_atom_symbol=True, use_atom_symbol_extended=False,
use_atom_number=False, use_degree=False, use_hybridization=False, use_implicit_valence=False,
use_partial_charge=False, use_formal_charge=False, use_ring_size=False, use_hydrogen_bonding=False,
use_acid_base=False, use_aromaticity=False, use_chirality=False, use_num_hydrogen=False):
self.data = Dataset(self.hyper["dataset"],
batch=batch,
normalize=normalize,
use_atom_symbol=use_atom_symbol,
use_atom_symbol_extended=use_atom_symbol_extended,
use_atom_number=use_atom_number,
use_degree=use_degree,
use_hybridization=use_hybridization,
use_implicit_valence=use_implicit_valence,
use_partial_charge=use_partial_charge,
use_formal_charge=use_formal_charge,
use_ring_size=use_ring_size,
use_hydrogen_bonding=use_hydrogen_bonding,
use_acid_base=use_acid_base,
use_aromaticity=use_aromaticity,
use_chirality=use_chirality,
use_num_hydrogen=use_num_hydrogen)
self.hyper["num_train"] = len(self.data.y["train"])
self.hyper["num_val"] = len(self.data.y["valid"])
self.hyper["num_test"] = len(self.data.y["test"])
self.hyper["num_atoms"] = self.data.max_atoms
self.hyper["num_features"] = self.data.num_features
self.hyper["data_std"] = self.data.std
self.hyper["data_mean"] = self.data.mean
self.hyper["task"] = self.data.task
self.hyper["outputs"] = self.data.outputs
self.hyper["batch"] = batch
self.hyper["normalize"] = normalize
def load_model(self, model, units_conv=128, units_dense=128, num_layers=2, loss="mse"):
self.hyper["model"] = model
self.hyper["units_conv"] = units_conv
self.hyper["units_dense"] = units_dense
self.hyper["num_layers"] = num_layers
self.hyper["loss"] = loss
self.model = getattr(m, model)(self.hyper)
self.model.summary()
def fit(self, model, epoch=150, batch=128, fold=10, normalize=False, loss="mse", monitor="val_rmse", label="",
units_conv=50, units_dense=128, num_layers=2, mode="min", use_multiprocessing=True, use_atom_symbol=True,
use_atom_symbol_extended=False, use_atom_number=False, use_degree=False, use_hybridization=False,
use_implicit_valence=False, use_partial_charge=False, use_formal_charge=False, use_ring_size=False,
use_hydrogen_bonding=False, use_acid_base=False, use_aromaticity=False, use_chirality=False,
use_num_hydrogen=False):
# 1. Generate CV folder
now = datetime.now()
base_path = "../result/{}/{}/".format(model, self.hyper["dataset"])
log_path = base_path
results = []
for i in range(1, fold + 1):
start_time = time.time()
# 2. Generate data
self.load_data(batch=batch,
normalize=normalize,
use_atom_symbol=use_atom_symbol,
use_atom_symbol_extended=use_atom_symbol_extended,
use_atom_number=use_atom_number,
use_degree=use_degree,
use_hybridization=use_hybridization,
use_implicit_valence=use_implicit_valence,
use_partial_charge=use_partial_charge,
use_formal_charge=use_formal_charge,
use_ring_size=use_ring_size,
use_hydrogen_bonding=use_hydrogen_bonding,
use_acid_base=use_acid_base,
use_aromaticity=use_aromaticity,
use_chirality=use_chirality,
use_num_hydrogen=use_num_hydrogen)
# 3. Make model
self.load_model(model, units_conv=units_conv, units_dense=units_dense, num_layers=num_layers, loss=loss)
# 4. Callbacks
log_path = base_path + "{}_c{}_d{}_l{}_{}_{}/".format(batch, units_conv, units_dense, num_layers, label,
now.strftime("%m%d%H"))
tb_path = log_path + "trial_{}/".format(i)
callbacks = []
if self.data.task != "regression":
callbacks.append(Roc(self.data.generator("valid")))
mode = "max"
callbacks += [Tensorboard(log_dir=tb_path, write_graph=False, histogram_freq=0, write_images=True),
ModelCheckpoint(tb_path + "{epoch:01d}-{" + monitor + ":.3f}.hdf5", monitor=monitor,
save_weights_only=True, save_best_only=True, period=1, mode=mode),
EarlyStopping(patience=15),
ReduceLROnPlateau(monitor="val_loss", factor=0.9, patience=10, min_lr=0.0005)]
# 5. Fit
self.model.fit_generator(self.data.generator("train"), epochs=epoch,
validation_data=self.data.generator("valid"), callbacks=callbacks,
use_multiprocessing=use_multiprocessing, workers=10)
self.hyper["train_time"] = time.time() - start_time
# 6. Find best checkpoint
models = []
for root, dirs, files in os.walk(tb_path):
for fname in files:
if "hdf5" in fname:
models.append([fname[:-5].split("-")[0], fname[:-5].split("-")[1]])
if self.data.task == "regression":
idx = np.argmin(np.array(models), axis=0)[-1]
else:
idx = np.argmax(np.array(models), axis=0)[-1]
best_model = tb_path + str(models[idx][0]) + "-" + str(models[idx][1]) + ".hdf5"
self.model.load_weights(best_model)
# 7. Save train, valid, test losses
if self.data.task == "regression":
train_loss = self.model.evaluate_generator(self.data.generator("train"),
use_multiprocessing=use_multiprocessing, workers=6)
valid_loss = self.model.evaluate_generator(self.data.generator("valid"),
use_multiprocessing=use_multiprocessing, workers=6)
test_loss = self.model.evaluate_generator(self.data.generator("test"),
use_multiprocessing=use_multiprocessing, workers=6)
results.append([train_loss[1], valid_loss[1], test_loss[1], train_loss[2], valid_loss[2], test_loss[2]])
else:
losses = []
for gen in [self.data.generator("train"), self.data.generator("valid"), self.data.generator("test")]:
val_roc, val_pr = calculate_roc_pr(self.model, gen)
losses.append(val_roc)
losses.append(val_pr)
results.append([losses[0], losses[2], losses[4], losses[1], losses[3], losses[5]])
# 8. Save hyper
with open(tb_path + "hyper.csv", "w") as file:
writer = csv.DictWriter(file, fieldnames=list(self.hyper.keys()))
writer.writeheader()
writer.writerow(self.hyper)
# 9. Save test results
pred = self.model.predict_generator(self.data.generator("test", task="input_only"),
use_multiprocessing=use_multiprocessing, workers=6)
self.data.save_dataset(pred, tb_path, target="test")
# Save cross validation results
if self.data.task == "regression":
header = ["train_mae", "valid_mae", "test_mae", "train_rmse", "valid_rmse", "test_rmse"]
else:
header = ["train_roc", "valid_roc", "test_roc", "train_pr", "valid_pr", "test_pr"]
with open(log_path + "raw_results.csv", "w") as file:
writer = csv.writer(file, delimiter=",")
writer.writerow(header)
for r in results:
writer.writerow(r)
results = np.array(results)
results = [np.mean(results, axis=0), np.std(results, axis=0)]
with open(log_path + "results.csv", "w") as csvfile:
writer = csv.writer(csvfile, delimiter=",")
writer.writerow(header)
for r in results:
writer.writerow(r)
# Update cross validation log
self.log[
"{}_B{}_N{}_C{}_D{}_L{}".format(model, batch, normalize, units_conv, units_dense, num_layers)] = results
print(self)
print("Training Ended")
| [
"csv.writer",
"numpy.std",
"keras.callbacks.ModelCheckpoint",
"os.walk",
"time.time",
"GCN.dataset.Dataset",
"numpy.mean",
"numpy.array",
"keras.callbacks.EarlyStopping",
"keras.callbacks.ReduceLROnPlateau",
"datetime.datetime.now"
] | [((1149, 1742), 'GCN.dataset.Dataset', 'Dataset', (["self.hyper['dataset']"], {'batch': 'batch', 'normalize': 'normalize', 'use_atom_symbol': 'use_atom_symbol', 'use_atom_symbol_extended': 'use_atom_symbol_extended', 'use_atom_number': 'use_atom_number', 'use_degree': 'use_degree', 'use_hybridization': 'use_hybridization', 'use_implicit_valence': 'use_implicit_valence', 'use_partial_charge': 'use_partial_charge', 'use_formal_charge': 'use_formal_charge', 'use_ring_size': 'use_ring_size', 'use_hydrogen_bonding': 'use_hydrogen_bonding', 'use_acid_base': 'use_acid_base', 'use_aromaticity': 'use_aromaticity', 'use_chirality': 'use_chirality', 'use_num_hydrogen': 'use_num_hydrogen'}), "(self.hyper['dataset'], batch=batch, normalize=normalize,\n use_atom_symbol=use_atom_symbol, use_atom_symbol_extended=\n use_atom_symbol_extended, use_atom_number=use_atom_number, use_degree=\n use_degree, use_hybridization=use_hybridization, use_implicit_valence=\n use_implicit_valence, use_partial_charge=use_partial_charge,\n use_formal_charge=use_formal_charge, use_ring_size=use_ring_size,\n use_hydrogen_bonding=use_hydrogen_bonding, use_acid_base=use_acid_base,\n use_aromaticity=use_aromaticity, use_chirality=use_chirality,\n use_num_hydrogen=use_num_hydrogen)\n", (1156, 1742), False, 'from GCN.dataset import Dataset\n'), ((3744, 3758), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3756, 3758), False, 'from datetime import datetime\n'), ((9343, 9360), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (9351, 9360), True, 'import numpy as np\n'), ((3948, 3959), 'time.time', 'time.time', ([], {}), '()\n', (3957, 3959), False, 'import time\n'), ((6512, 6528), 'os.walk', 'os.walk', (['tb_path'], {}), '(tb_path)\n', (6519, 6528), False, 'import os\n'), ((9191, 9222), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (9201, 9222), False, 'import csv\n'), ((9380, 9404), 'numpy.mean', 'np.mean', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (9387, 9404), True, 'import numpy as np\n'), ((9406, 9429), 'numpy.std', 'np.std', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (9412, 9429), True, 'import numpy as np\n'), ((9513, 9547), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (9523, 9547), False, 'import csv\n'), ((5698, 5853), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(tb_path + '{epoch:01d}-{' + monitor + ':.3f}.hdf5')"], {'monitor': 'monitor', 'save_weights_only': '(True)', 'save_best_only': '(True)', 'period': '(1)', 'mode': 'mode'}), "(tb_path + '{epoch:01d}-{' + monitor + ':.3f}.hdf5', monitor\n =monitor, save_weights_only=True, save_best_only=True, period=1, mode=mode)\n", (5713, 5853), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), ((5918, 5944), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(15)'}), '(patience=15)\n', (5931, 5944), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), ((5972, 6049), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.9)', 'patience': '(10)', 'min_lr': '(0.0005)'}), "(monitor='val_loss', factor=0.9, patience=10, min_lr=0.0005)\n", (5989, 6049), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), ((6387, 6398), 'time.time', 'time.time', ([], {}), '()\n', (6396, 6398), False, 'import time\n'), ((6777, 6793), 'numpy.array', 'np.array', (['models'], {}), '(models)\n', (6785, 6793), True, 'import numpy as np\n'), ((6857, 6873), 'numpy.array', 'np.array', (['models'], {}), '(models)\n', (6865, 6873), True, 'import numpy as np\n')] |
# made by <NAME> 1610110007
# written in python using pytorch library
import matplotlib
import torchvision
from torch.utils.data import DataLoader as DataLoader
import torch.nn as nn
from PIL import Image
from torchvision.utils import save_image
import numpy
import torch
import os
epochs = 10
batch_size = 100;
DATA_PATH = '/Users/adityadubey/PycharmProjects/dsp-project'
#/Users/adityadubey/PycharmProjects/dsp-project
MODEL_STORE_PATH = '/Users/adityadubey/PycharmProjects/dsp-project/pytorch_models\\'
trans = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = torchvision.datasets.MNIST(root=DATA_PATH, train=True, transform=trans, download=True)
test_dataset = torchvision.datasets.MNIST(root=DATA_PATH, train=False, transform=trans)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False)
# divides the training data into batch sizes of 100 and shuffles the data
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
#
# min min V(D,G)
#data = train
# discrimnator
# image 93 almost 8
class Discrimnator(nn.Module):
def __init__(self):
super(Discrimnator, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(784,256),
nn.LeakyReLU(0.2),
)
self.layer2 = nn.Sequential(
nn.Linear(256,100),
nn.LeakyReLU(0.2),
nn.Dropout(0.2)
)
self.layer3 = nn.Sequential(
nn.Linear(100,1),
nn.LeakyReLU(0.2),
nn.Dropout(0.2)
)
self.layer4 = nn.Sequential(
nn.Sigmoid()
)
def forward(self,x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
discrimnator = Discrimnator()
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(100,256),
nn.LeakyReLU(0.3)
)
self.layer2 = nn.Sequential(
nn.Linear(256,512),
nn.LeakyReLU(0.3)
)
self.layer3 = nn.Sequential(
nn.Linear(512, 784),
nn.LeakyReLU(0.3)
)
self.layer4 = nn.Sequential(
nn.Tanh()
)
def forward(self,x):
x = self.layer1(x);
x = self.layer2(x);
x = self.layer3(x);
x = self.layer4(x);
return x
generator = Generator()
# optimizers and loss
learning_rate = 0.0002;
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
optimizer_dis = torch.optim.Adam(discrimnator.parameters(), lr=learning_rate)
optimizer_gen = torch.optim.Adam(generator.parameters(), lr=learning_rate)
loss = criterion = nn.BCELoss()
def train_gan(images,fake_labels,real_labels,batch_size):
# discrimnator
# train it on real images for discrimnator
output = discrimnator.forward(images)
d_loss_real = loss(output,real_labels)
real_score = output
# train it on fake images for discrimnator
output = discrimnator.forward(images)
d_loss_fake = loss(output,fake_labels)
# collect the loss and backpropagate
d_loss = d_loss_fake + d_loss_real
d_1 = d_loss.item()
optimizer_dis.zero_grad()
d_loss.backward()
optimizer_dis.step()
# generator
# input a noise image and produce a fake image
global fake_image
global img
img = torch.rand(batch_size,100)
global fake_images
fake_image = generator.forward(img)
# test it in discrimnator
error_dis = discrimnator.forward(fake_image.reshape(batch_size, -1))
error = loss(error_dis,real_labels)
value = error.item()
# backpropagate
optimizer_gen.zero_grad()
error.backward()
optimizer_gen.step()
return value,d_1
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
# train discrimnator and generator
num_epochs = 100;
Error = []
DATA_PATH_1 = '/Users/adityadubey/PycharmProjects/dsp-project/images'
for epoch in range(num_epochs):
for i, (images, _) in enumerate(train_loader):
error1 = []
error2 = []
batch_size = 100
images = images.reshape(batch_size, -1)
real_labels = torch.ones(batch_size, 1)
fake_labels = torch.zeros(batch_size, 1)
#batch_size = 100
error_gen = train_gan(images,real_labels,fake_labels,batch_size)
error1.append(error_gen[0])
error2.append(error_gen[1])
error1 = numpy.array(error1)
error2 = numpy.array(error2)
error1.mean()
error2.mean()
print("epoch - no ---> {} generator-error --> {} discrimnator --> {} ".format(epoch,error1,error2))
# save the image
# Save real images
try :
images = images.reshape(img.size(0), 1, 28, 28)
save_image(denorm(images), os.path.join(DATA_PATH_1, 'real_images-{}.png'.format(epoch)))
imag2 = fake_image.reshape(fake_image.size(0), 1, 28, 28)
save_image(denorm(imag2), os.path.join(DATA_PATH_1, 'fake_images-{}.png'.format(epoch)))
except Exception as e:
print(e)
# Save sampled images
#fake_images = fake_image.reshape(fake_image.size(0), 1, 28, 28)
#save_image(denorm(fake_images), os.path.join(DATA_PATH, 'fake_images-{}.png'.format(epoch + 1)))
| [
"torch.nn.Dropout",
"torch.ones",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"numpy.array",
"torch.rand",
"torch.zeros",
"torchvision.transforms.Normalize",
"torchvision.datasets.MNIST",
"torchvision.tran... | [((659, 749), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'DATA_PATH', 'train': '(True)', 'transform': 'trans', 'download': '(True)'}), '(root=DATA_PATH, train=True, transform=trans,\n download=True)\n', (685, 749), False, 'import torchvision\n'), ((761, 833), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'DATA_PATH', 'train': '(False)', 'transform': 'trans'}), '(root=DATA_PATH, train=False, transform=trans)\n', (787, 833), False, 'import torchvision\n'), ((850, 921), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=False)\n', (860, 921), True, 'from torch.utils.data import DataLoader as DataLoader\n'), ((1011, 1081), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n', (1021, 1081), True, 'from torch.utils.data import DataLoader as DataLoader\n'), ((2851, 2863), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (2861, 2863), True, 'import torch.nn as nn\n'), ((3533, 3560), 'torch.rand', 'torch.rand', (['batch_size', '(100)'], {}), '(batch_size, 100)\n', (3543, 3560), False, 'import torch\n'), ((4596, 4615), 'numpy.array', 'numpy.array', (['error1'], {}), '(error1)\n', (4607, 4615), False, 'import numpy\n'), ((4629, 4648), 'numpy.array', 'numpy.array', (['error2'], {}), '(error2)\n', (4640, 4648), False, 'import numpy\n'), ((551, 584), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (582, 584), False, 'import torchvision\n'), ((586, 640), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (618, 640), False, 'import torchvision\n'), ((4333, 4358), 'torch.ones', 'torch.ones', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (4343, 4358), False, 'import torch\n'), ((4381, 4407), 'torch.zeros', 'torch.zeros', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (4392, 4407), False, 'import torch\n'), ((1303, 1322), 'torch.nn.Linear', 'nn.Linear', (['(784)', '(256)'], {}), '(784, 256)\n', (1312, 1322), True, 'import torch.nn as nn\n'), ((1335, 1352), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1347, 1352), True, 'import torch.nn as nn\n'), ((1413, 1432), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(100)'], {}), '(256, 100)\n', (1422, 1432), True, 'import torch.nn as nn\n'), ((1445, 1462), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1457, 1462), True, 'import torch.nn as nn\n'), ((1476, 1491), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (1486, 1491), True, 'import torch.nn as nn\n'), ((1551, 1568), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(1)'], {}), '(100, 1)\n', (1560, 1568), True, 'import torch.nn as nn\n'), ((1581, 1598), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1593, 1598), True, 'import torch.nn as nn\n'), ((1612, 1627), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (1622, 1627), True, 'import torch.nn as nn\n'), ((1687, 1699), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1697, 1699), True, 'import torch.nn as nn\n'), ((2036, 2055), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(256)'], {}), '(100, 256)\n', (2045, 2055), True, 'import torch.nn as nn\n'), ((2068, 2085), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.3)'], {}), '(0.3)\n', (2080, 2085), True, 'import torch.nn as nn\n'), ((2145, 2164), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(512)'], {}), '(256, 512)\n', (2154, 2164), True, 'import torch.nn as nn\n'), ((2177, 2194), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.3)'], {}), '(0.3)\n', (2189, 2194), True, 'import torch.nn as nn\n'), ((2254, 2273), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(784)'], {}), '(512, 784)\n', (2263, 2273), True, 'import torch.nn as nn\n'), ((2287, 2304), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.3)'], {}), '(0.3)\n', (2299, 2304), True, 'import torch.nn as nn\n'), ((2364, 2373), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2371, 2373), True, 'import torch.nn as nn\n')] |
"""
Belief Propogation Search
"""
import torch
import faiss
import numpy as np
import torchvision
from einops import rearrange,repeat
import nnf_utils as nnf_utils
from nnf_share import padBurst,getBlockLabels,tileBurst,padAndTileBatch,padLocs,locs2flow,mode_vals,mode_ndarray
from bnnf_utils import runBurstNnf,evalAtFlow
from sub_burst import runBurstNnf as runSubBurstNnf
from sub_burst import evalAtLocs
# from wnnf_utils import runWeightedBurstNnf
from easydict import EasyDict as edict
import sys
sys.path.append("/home/gauenk/Documents/experiments/cl_gen/lib/")
from .utils import create_search_ranges,warp_burst_from_pix,warp_burst_from_locs,compute_temporal_cluster,update_state,locs_frames2groups,compute_search_blocks,pix2locs,index_along_ftrs,temporal_inliers_outliers,update_state_outliers
from .merge_search_ranges_numba import merge_search_ranges
center_crop = torchvision.transforms.functional.center_crop
resize = torchvision.transforms.functional.resize
th_pad = torchvision.transforms.functional.pad
def runBpSearch_rand(noisy, clean, patchsize, nblocks, k = 1,
nparticles = 1, niters = 100,
valMean = 0., std = None,
l2_nblocks = None, l2_valMean=0.,
blockLabels=None, ref=None,
to_flow=False, fmt=False):
# -------------------------------
#
# ---- error checking ----
#
# -------------------------------
if l2_nblocks is None: l2_nblocks = nblocks
assert nparticles == 1, "Only one particle currently supported."
# -------------------------------
#
# ---- initalize fxn ----
#
# -------------------------------
device = noisy.device
nframes,nimages,c,h,w = noisy.shape
ishape = [h,w]
img_shape = [c,h,w]
psHalf,nbHalf = patchsize//2,nblocks//2
fPad = 2*(psHalf + nbHalf)
int_shape = [h-fPad,w-fPad]
isize = edict({'h':h,'w':w})
psize = edict({'h':h+2*nbHalf,'w':w+2*nbHalf})
pshape = [h+psHalf,w+psHalf]
mask = torch.zeros(h+2*nbHalf,w+2*nbHalf).to(device)
MAX_SEARCH_FRAMES = 4
numSearch = min(MAX_SEARCH_FRAMES,nframes-1)
if std is None: std = torch.std(noisy.reshape(-1)).item()
if np.isclose(valMean,0):
ps2 = patchsize**2
t = numSearch + 1
c2 = ((t-1)/t)**2 * std**2 + (t-1)/t**2 * std**2
mode = (1 - 2/p)*theory_npn.c2*p
valMean = mode
# -------------------------------
#
# ---- inital search ----
#
# -> large patchsize to account
# for large global motion.
#
# -> search over keypoints only
#
# -------------------------------
# -- 1.) run l2 local search --
vals,pix = nnf_utils.runNnfBurst(noisy, patchsize, l2_nblocks,
k=nparticles, valMean = l2_valMean,
img_shape = None)
vals = torch.mean(vals,dim=0).to(device)
l2_vals = vals
pix = pix.to(device)
locs = pix2locs(pix)
# l2_locs = torch.zeros_like(locs)
l2_locs = locs
# -- 2.) create local search radius from topK locs --
nframes,nimages,h,w,k,two = locs.shape
search_ranges = create_search_ranges(nblocks,h,w,nframes)
search_ranges = torch.LongTensor(search_ranges[:,None]).to(device)
# -- 3.) pad and tile --
tnoisy = padAndTileBatch(noisy,patchsize,nblocks)
img_shape[0] = tnoisy.shape[-3]
# -- 4.) update "val" from "l2" to "burst" @ curr --
vals,e_locs = evalAtLocs(tnoisy, l2_locs, 1,
nblocks, img_shape=img_shape)
# -- 5.) warp burst to top location --
pixPad = (tnoisy.shape[-1] - noisy.shape[-1])//2
plocs = padLocs(locs,pixPad,'extend')
warped_noisy = warp_burst_from_locs(tnoisy,plocs,1,psize)[0]
# -- compute search ranges for number of search frames --
ngroups = numSearch+1
# search_ranges = create_search_ranges(nblocks,h,w,nsearch,0)
# search_ranges = torch.LongTensor(search_ranges[:,None]).to(device)
# search_blocks = compute_search_blocks(search_ranges,0)
search_blocks,_ = getBlockLabels(None,nblocks,torch.long,device,True,t=ngroups)
ngroups,nsearch,two = search_blocks.shape
left = search_blocks[:ngroups//2]
right = search_blocks[ngroups//2+1:]
search_blocks = torch.cat([search_blocks[[ngroups//2]],left,right],dim=0)
print("search_blocks.shape: ",search_blocks.shape)
# -------------------------------
#
# -- execute random search --
#
# -------------------------------
counts = torch.zeros(nframes)
for i in range(niters):
prev_locs = locs.clone()
# -- 1.) cluster each pixel across time --
search_frames,names,nuniuqes = temporal_inliers_outliers(tnoisy,warped_noisy,
vals,std,
numSearch=numSearch)
print(f"{i}")
print("locs.shape: ",locs.shape)
print("search_frames.shape: ",search_frames.shape)
print("search_blocks.shape: ",search_blocks.shape)
# print("Names: ",list(names.cpu().numpy()))
counts[names] += 1
# -- 2.) exh srch over a selected frames --
sub_vals,sub_locs = runBurstNnf(search_frames, 1, nblocks, k=1,
blockLabels=search_blocks,
img_shape=img_shape,valMean=valMean)
# print("Num Uniques: ",nuniuqes[:,16,16])
sub_vals = sub_vals / center_crop(nuniuqes,ishape)[...,None]
sub_vals = torch.abs(sub_vals - valMean)
# -- 3.) update vals and locs --
vals,locs = update_state_outliers(vals,locs,sub_vals,
sub_locs,names,False)
max_displ = torch.abs(locs).max().item()
assert max_displ <= nbHalf, "displacement must remain contained!"
# print("vals @ (16,16): ",vals[0,16,16,0])
# print("sub_vals @ (16,16): ",sub_vals[0,16,16,0])
# -- 4.) rewarp bursts --
pad_locs = padLocs(locs,nbHalf,'extend')
nframes,nimages,hP,wP,k,two = pad_locs.shape
warped_noisy_old = warped_noisy.clone()
warped_noisy = warp_burst_from_locs(tnoisy,pad_locs,nblocks,psize)[0]
delta = torch.sum(torch.abs(prev_locs - locs)).item()
# print("Delta: ",delta)
# delta = torch.sum(torch.abs(prev_locs[:,0,16,16,0] - locs[:,0,16,16,0])).item()
# delta = torch.sum(torch.abs(warped_noisy_old[...,16,16] - \
# tnoisy[...,16,16])).item()
# delta = torch.sum(torch.abs(warped_noisy_old[...,16+1,16+1] - \
# warped_noisy[...,16+1,16+1])).item()
# print("Delta: ",delta)
# -------------------------------
#
# -- finalizing outputs --
#
# -------------------------------
# print("Counts: ",counts)
# -- get the output image from tiled image --
warped_noisy = center_crop(warped_noisy,ishape)
warped_noisy = index_along_ftrs(warped_noisy,patchsize,c)
# -- convert "locs" to "flow" --
if to_flow:
locs = locs2flow(locs)
# -- reformat for experiment api --
if fmt:
locs = rearrange(locs,'t i h w k two -> k i (h w) t two')
return vals,locs,warped_noisy#,warped_clean
| [
"sys.path.append",
"torch.mean",
"nnf_share.getBlockLabels",
"torch.LongTensor",
"nnf_share.locs2flow",
"bnnf_utils.runBurstNnf",
"torch.cat",
"nnf_share.padLocs",
"nnf_share.padAndTileBatch",
"numpy.isclose",
"einops.rearrange",
"easydict.EasyDict",
"sub_burst.evalAtLocs",
"torch.zeros",
... | [((506, 571), 'sys.path.append', 'sys.path.append', (['"""/home/gauenk/Documents/experiments/cl_gen/lib/"""'], {}), "('/home/gauenk/Documents/experiments/cl_gen/lib/')\n", (521, 571), False, 'import sys\n'), ((1940, 1963), 'easydict.EasyDict', 'edict', (["{'h': h, 'w': w}"], {}), "({'h': h, 'w': w})\n", (1945, 1963), True, 'from easydict import EasyDict as edict\n'), ((1973, 2022), 'easydict.EasyDict', 'edict', (["{'h': h + 2 * nbHalf, 'w': w + 2 * nbHalf}"], {}), "({'h': h + 2 * nbHalf, 'w': w + 2 * nbHalf})\n", (1978, 2022), True, 'from easydict import EasyDict as edict\n'), ((2246, 2268), 'numpy.isclose', 'np.isclose', (['valMean', '(0)'], {}), '(valMean, 0)\n', (2256, 2268), True, 'import numpy as np\n'), ((2744, 2850), 'nnf_utils.runNnfBurst', 'nnf_utils.runNnfBurst', (['noisy', 'patchsize', 'l2_nblocks'], {'k': 'nparticles', 'valMean': 'l2_valMean', 'img_shape': 'None'}), '(noisy, patchsize, l2_nblocks, k=nparticles, valMean=\n l2_valMean, img_shape=None)\n', (2765, 2850), True, 'import nnf_utils as nnf_utils\n'), ((3376, 3418), 'nnf_share.padAndTileBatch', 'padAndTileBatch', (['noisy', 'patchsize', 'nblocks'], {}), '(noisy, patchsize, nblocks)\n', (3391, 3418), False, 'from nnf_share import padBurst, getBlockLabels, tileBurst, padAndTileBatch, padLocs, locs2flow, mode_vals, mode_ndarray\n'), ((3529, 3589), 'sub_burst.evalAtLocs', 'evalAtLocs', (['tnoisy', 'l2_locs', '(1)', 'nblocks'], {'img_shape': 'img_shape'}), '(tnoisy, l2_locs, 1, nblocks, img_shape=img_shape)\n', (3539, 3589), False, 'from sub_burst import evalAtLocs\n'), ((3728, 3759), 'nnf_share.padLocs', 'padLocs', (['locs', 'pixPad', '"""extend"""'], {}), "(locs, pixPad, 'extend')\n", (3735, 3759), False, 'from nnf_share import padBurst, getBlockLabels, tileBurst, padAndTileBatch, padLocs, locs2flow, mode_vals, mode_ndarray\n'), ((4134, 4200), 'nnf_share.getBlockLabels', 'getBlockLabels', (['None', 'nblocks', 'torch.long', 'device', '(True)'], {'t': 'ngroups'}), '(None, nblocks, torch.long, device, True, t=ngroups)\n', (4148, 4200), False, 'from nnf_share import padBurst, getBlockLabels, tileBurst, padAndTileBatch, padLocs, locs2flow, mode_vals, mode_ndarray\n'), ((4343, 4405), 'torch.cat', 'torch.cat', (['[search_blocks[[ngroups // 2]], left, right]'], {'dim': '(0)'}), '([search_blocks[[ngroups // 2]], left, right], dim=0)\n', (4352, 4405), False, 'import torch\n'), ((4601, 4621), 'torch.zeros', 'torch.zeros', (['nframes'], {}), '(nframes)\n', (4612, 4621), False, 'import torch\n'), ((5325, 5437), 'bnnf_utils.runBurstNnf', 'runBurstNnf', (['search_frames', '(1)', 'nblocks'], {'k': '(1)', 'blockLabels': 'search_blocks', 'img_shape': 'img_shape', 'valMean': 'valMean'}), '(search_frames, 1, nblocks, k=1, blockLabels=search_blocks,\n img_shape=img_shape, valMean=valMean)\n', (5336, 5437), False, 'from bnnf_utils import runBurstNnf, evalAtFlow\n'), ((5652, 5681), 'torch.abs', 'torch.abs', (['(sub_vals - valMean)'], {}), '(sub_vals - valMean)\n', (5661, 5681), False, 'import torch\n'), ((6139, 6170), 'nnf_share.padLocs', 'padLocs', (['locs', 'nbHalf', '"""extend"""'], {}), "(locs, nbHalf, 'extend')\n", (6146, 6170), False, 'from nnf_share import padBurst, getBlockLabels, tileBurst, padAndTileBatch, padLocs, locs2flow, mode_vals, mode_ndarray\n'), ((7248, 7263), 'nnf_share.locs2flow', 'locs2flow', (['locs'], {}), '(locs)\n', (7257, 7263), False, 'from nnf_share import padBurst, getBlockLabels, tileBurst, padAndTileBatch, padLocs, locs2flow, mode_vals, mode_ndarray\n'), ((7332, 7383), 'einops.rearrange', 'rearrange', (['locs', '"""t i h w k two -> k i (h w) t two"""'], {}), "(locs, 't i h w k two -> k i (h w) t two')\n", (7341, 7383), False, 'from einops import rearrange, repeat\n'), ((2056, 2099), 'torch.zeros', 'torch.zeros', (['(h + 2 * nbHalf)', '(w + 2 * nbHalf)'], {}), '(h + 2 * nbHalf, w + 2 * nbHalf)\n', (2067, 2099), False, 'import torch\n'), ((2937, 2960), 'torch.mean', 'torch.mean', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (2947, 2960), False, 'import torch\n'), ((3282, 3322), 'torch.LongTensor', 'torch.LongTensor', (['search_ranges[:, None]'], {}), '(search_ranges[:, None])\n', (3298, 3322), False, 'import torch\n'), ((6375, 6402), 'torch.abs', 'torch.abs', (['(prev_locs - locs)'], {}), '(prev_locs - locs)\n', (6384, 6402), False, 'import torch\n'), ((5870, 5885), 'torch.abs', 'torch.abs', (['locs'], {}), '(locs)\n', (5879, 5885), False, 'import torch\n')] |
from pathlib import Path
from tqdm.notebook import tqdm
from tqdm import trange
import pandas as po
import numpy as np
import warnings
import pickle
import nltk
import math
import os
import random
import re
import torch
import torch.nn as nn
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer,
XLMConfig, XLMForSequenceClassification, XLMTokenizer,
XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer,
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
BartConfig, BartTokenizer, BartForSequenceClassification,
LongformerConfig, LongformerForSequenceClassification, LongformerTokenizer,
AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer,
ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer,
ReformerConfig, ReformerForSequenceClassification, ReformerTokenizer,
MobileBertConfig, MobileBertForSequenceClassification, MobileBertTokenizer,
DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer,
AutoTokenizer, AutoModel, AutoModelForSequenceClassification,
)
from torch.utils.data import (DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset)
from paths import get_path_q, get_path_df_scores, get_path_predict
from utils import get_df_explanations, get_questions, average_precision_score
from rank import get_ranks, get_preds, ideal_rerank, remove_combo_suffix, format_predict_line, write_preds
SEP = "#" * 100 + "\n"
path_data = Path("data/")
path_tables = path_data.joinpath("raw/tables")
device = 'cuda'
def make_dataset_test(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, top_k, model_with_no_token_types, model_name='roberta'):
all_input_ids = []
all_token_type_ids = []
all_attention_masks = []
all_labels = []
rerank = []
for i in tqdm(range(len(df))):
uids_pred = uids[ranks[i]]
question = df.iloc[i]['q_reformat']
pred = [uid2text[j] for j in uids_pred][:top_k]
gold = df_exp.loc[df_exp['uid'].isin(df.iloc[i]['exp_uids'])]['text'].tolist()
labels = [1 if i in gold else 0 for i in pred]
row = {}
row['ques'] = question
row['pred'] = pred
row['gold'] = gold
rerank.append(row)
for i, p in enumerate(pred):
label = labels[i]
if model_name in model_with_no_token_types:
encoded_input = tokenizer(question, p, padding='max_length', max_length=100, truncation='longest_first', return_tensors="pt")
input_ids = encoded_input['input_ids'].tolist()
attention_mask = encoded_input['attention_mask'].tolist()
all_input_ids.append(input_ids)
all_attention_masks.append(attention_mask)
all_labels.append(label)
elif model_name=='bert':
encoded_input = tokenizer(question, p, padding='max_length', max_length=100, truncation='longest_first', return_tensors="pt")
input_ids = encoded_input['input_ids'].tolist()
token_type_ids = encoded_input['token_type_ids'].tolist()
attention_mask = encoded_input['attention_mask'].tolist()
all_input_ids.append(input_ids)
all_token_type_ids.append(token_type_ids)
all_attention_masks.append(attention_mask)
all_labels.append(label)
if model_name in model_with_no_token_types:
all_input_ids = torch.tensor(all_input_ids).squeeze()
#all_token_type_ids = torch.tensor(all_token_type_ids).squeeze()
all_attention_masks = torch.tensor(all_attention_masks).squeeze()
all_labels = torch.tensor(all_labels)
dataset = TensorDataset(all_input_ids, all_attention_masks, all_labels)
elif model_name=='bert':
all_input_ids = torch.tensor(all_input_ids).squeeze()
all_token_type_ids = torch.tensor(all_token_type_ids).squeeze()
all_attention_masks = torch.tensor(all_attention_masks).squeeze()
all_labels = torch.tensor(all_labels)
dataset = TensorDataset(all_input_ids,all_token_type_ids, all_attention_masks, all_labels)
return dataset, rerank
def make_dataset_train(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, top_k, model_with_no_token_types, model_name='roberta'):
all_input_ids = []
all_token_type_ids = []
all_attention_masks = []
all_labels = []
rerank = []
for i in tqdm(range(len(df))):
uids_pred = uids[ranks[i]]
question = df.iloc[i]['q_reformat']
pred_imb = [uid2text[j] for j in uids_pred][:top_k]
gold = df_exp.loc[df_exp['uid'].isin(df.iloc[i]['exp_uids'])]['text'].tolist()
pred_0 = [p for p in pred_imb if p not in gold]
pred_1 = [p for p in pred_imb if p in gold]
if len(pred_1) == 0:
continue
pred = []
pred += (pred_1*(int((top_k/2)/len(pred_1))+1))[:int(top_k/2)]
pred += pred_0[:int(top_k/2)]
pred = random.sample(pred, k=len(pred))
labels = [1 if i in gold else 0 for i in pred]
row = {}
row['ques'] = question
row['pred'] = pred
row['gold'] = gold
rerank.append(row)
for i, p in enumerate(pred):
label = labels[i]
if model_name in model_with_no_token_types:
encoded_input = tokenizer(question, p, padding='max_length', max_length=100, truncation='longest_first', return_tensors="pt")
input_ids = encoded_input['input_ids'].tolist()
attention_mask = encoded_input['attention_mask'].tolist()
all_input_ids.append(input_ids)
all_attention_masks.append(attention_mask)
all_labels.append(label)
else:
encoded_input = tokenizer(question, p, padding='max_length', max_length=100, truncation='longest_first', return_tensors="pt")
input_ids = encoded_input['input_ids'].tolist()
token_type_ids = encoded_input['token_type_ids'].tolist()
attention_mask = encoded_input['attention_mask'].tolist()
all_input_ids.append(input_ids)
all_token_type_ids.append(token_type_ids)
all_attention_masks.append(attention_mask)
all_labels.append(label)
if model_name in model_with_no_token_types:
all_input_ids = torch.tensor(all_input_ids).squeeze()
all_attention_masks = torch.tensor(all_attention_masks).squeeze()
all_labels = torch.tensor(all_labels)
dataset = TensorDataset(all_input_ids, all_attention_masks, all_labels)
else:
all_input_ids = torch.tensor(all_input_ids).squeeze()
all_token_type_ids = torch.tensor(all_token_type_ids).squeeze()
all_attention_masks = torch.tensor(all_attention_masks).squeeze()
all_labels = torch.tensor(all_labels)
dataset = TensorDataset(all_input_ids,all_token_type_ids, all_attention_masks, all_labels)
return dataset, rerank
def train_model(df, df_exp, uids, uid2idx, uid2text, ranks, preds, MODEL_CLASSES, model_with_no_token_types, model_name='roberta', model_type='roberta-base', top_k = 100, num_train_epochs = 1, BATCH_SIZE=32, learning_rate=2e-5, epsilon=1e-8, gradient_accumulation_steps = 1, max_grad_norm = 1, weight_decay = 0, number_of_warmup_steps=0, global_step = 0, tr_loss=0.0, logging_loss = 0.0 ):
config_class, model_classifier, model_tokenizer = MODEL_CLASSES[model_name]
tokenizer = model_tokenizer.from_pretrained(model_type)
model = model_classifier.from_pretrained(model_type)
model.cuda()
model.train()
save_model = model_name+'_t_'+str(top_k)+'_epoc_'+ str(num_train_epochs)+'_lr_'+ str(learning_rate)+'_b_s_'+ str(BATCH_SIZE)
train_dataset, rerank = make_dataset_train(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, top_k, model_with_no_token_types, model_name=model_name)
train_dataloader = DataLoader(train_dataset, batch_size = BATCH_SIZE)
t_total = len(train_dataloader) // gradient_accumulation_steps * 3
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=epsilon )
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=number_of_warmup_steps, num_training_steps=t_total)
model.zero_grad()
for i in tqdm(range(num_train_epochs)):
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(device) for t in batch)
if model_name in model_with_no_token_types:
inputs = {'input_ids': batch[0],
#'token_type_ids': batch[1],
'attention_mask': batch[1],
'labels': batch[2]}
else:
inputs = {'input_ids': batch[0],
'token_type_ids': batch[1],
'attention_mask': batch[2],
'labels': batch[3]}
ouputs = model(**inputs)
loss = ouputs[0]
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
tr_loss += loss.item()
if (step + 1) % gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
## Dont Forget to save the model
#torch.save(model.state_dict(), './saved_models/'+save_model+'state_dict'+'.pt')
torch.save(model, './saved_models/'+save_model+'.pt')
print("Model is saved as : ",save_model)
print("Use this to load the model")
return save_model
def evaluate_model(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, save_model, model_with_no_token_types, model_name='roberta',model_type='roberta-base', mode='train', top_k = 100 ):
if mode=='train':
train_dataset, rerank = make_dataset_train(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, top_k, model_with_no_token_types, model_name= model_name)
train_dataloader = DataLoader(train_dataset, batch_size=top_k)
model=torch.load('./saved_models/'+save_model+'.pt')
preds = []
with torch.no_grad():
direct_aps = []
reranked_aps = []
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
model.eval()
batch = tuple(t.to(device) for t in batch)
if model_name in model_with_no_token_types:
inputs = {'input_ids': batch[0],
#'token_type_ids': batch[1],
'attention_mask': batch[1]}
else:
inputs = {'input_ids': batch[0],
'token_type_ids': batch[1],
'attention_mask': batch[2]}
outputs = model(**inputs)
pred = outputs[0]
pred = np.argmax(nn.Softmax(dim=1)(pred).cpu().detach().numpy(), axis=1)
pred = [rerank[step]['pred'][p] for p in range(len(rerank[step]['pred'])) if pred[p] != 0]
direct_score = average_precision_score(rerank[step]['gold'], rerank[step]['pred'])
reranked_score = average_precision_score(rerank[step]['gold'], pred)
direct_aps.append(direct_score)
reranked_aps.append(reranked_score)
print(np.mean(np.array(direct_aps)))
print(np.mean(np.array(reranked_aps)))
if not os.path.exists('results/results_train.csv'):
result_df = po.DataFrame(columns=['Model_train','TFIDF_MAP_train','Reranked_MAP_train'])
else:
result_df = po.read_csv('results/results_train.csv')
results={'Model_train':save_model,'TFIDF_MAP_train':np.mean(np.array(direct_aps)),'Reranked_MAP_train':np.mean(np.array(reranked_aps))}
result_df = result_df.append(results, ignore_index=True)
result_df.to_csv('results/results_train.csv',index=False)
elif mode=='dev':
val_dataset, rerank = make_dataset_test(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, top_k, model_with_no_token_types, model_name= model_name)
val_dataloader = DataLoader(val_dataset, batch_size=top_k)
model=torch.load('./saved_models/'+save_model+'.pt')
preds = []
with torch.no_grad():
direct_aps = []
reranked_aps = []
epoch_iterator = tqdm(val_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
model.eval()
batch = tuple(t.to(device) for t in batch)
if model_name in model_with_no_token_types:
inputs = {'input_ids': batch[0],
#'token_type_ids': batch[1],
'attention_mask': batch[1]}
else:
inputs = {'input_ids': batch[0],
'token_type_ids': batch[1],
'attention_mask': batch[2]}
outputs = model(**inputs)
pred = outputs[0]
pred = np.argmax(nn.Softmax(dim=1)(pred).cpu().detach().numpy(), axis=1)
pred = [rerank[step]['pred'][p] for p in range(len(rerank[step]['pred'])) if pred[p] != 0]
preds.append(pred)
direct_score = average_precision_score(rerank[step]['gold'], rerank[step]['pred'])
reranked_score = average_precision_score(rerank[step]['gold'], pred)
direct_aps.append(direct_score)
reranked_aps.append(reranked_score)
print(np.mean(np.array(direct_aps)))
print(np.mean(np.array(reranked_aps)))
if not os.path.exists('results/results_dev.csv'):
result_df = po.DataFrame(columns=['Model_dev','TFIDF_MAP_dev','Reranked_MAP_dev'])
else:
result_df = po.read_csv('results/results_dev.csv')
results={'Model_dev':save_model,'TFIDF_MAP_dev':np.mean(np.array(direct_aps)),'Reranked_MAP_dev':np.mean(np.array(reranked_aps))}
result_df = result_df.append(results, ignore_index=True)
result_df.to_csv('results/results_dev.csv',index=False)
def predict_model(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, save_model, model_with_no_token_types, top_k=100, model_name='roberta',model_type='roberta-base'):
test_dataset, rerank = make_dataset_test(tokenizer,df, df_exp, uids, uid2idx, uid2text, ranks, preds, top_k, model_with_no_token_types, model_name= model_name)
test_dataloader = DataLoader(test_dataset, batch_size=top_k)
model = torch.load('./saved_models/'+save_model+'.pt')
with torch.no_grad():
preds = []
epoch_iterator = tqdm(test_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
model.eval()
batch = tuple(t.to(device) for t in batch)
if model_name in model_with_no_token_types:
inputs = {'input_ids': batch[0],
#'token_type_ids': batch[1],
'attention_mask': batch[1]}
else:
inputs = {'input_ids': batch[0],
'token_type_ids': batch[1],
'attention_mask': batch[2]}
outputs = model(**inputs)
pred = outputs[0]
pred = np.argmax(nn.Softmax(dim=1)(pred).cpu().detach().numpy(), axis=1)
pred = [rerank[step]['pred'][p] for p in range(len(rerank[step]['pred'])) if pred[p] != 0]
preds.append(pred)
uids = df_exp.uid.apply(remove_combo_suffix).values
qids = df.QuestionID.tolist()
preds_idx = []
for i in tqdm(range(len(preds))):
question_id = qids[i]
for p in preds[i]:
explanation_uid = df_exp.loc[df_exp['text'] == p, 'uid'].to_list()[0]
preds_idx.append(question_id + "\t" + explanation_uid)
print("The predictions are stored in the file : "+'./predictions/'+save_model+'.txt')
write_preds(preds_idx, './predictions/'+save_model+'.txt')
| [
"pandas.DataFrame",
"utils.average_precision_score",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"rank.write_preds",
"torch.load",
"tqdm.notebook.tqdm",
"os.path.exists",
"torch.save",
"pathlib.Path",
"transformers.get_linear_schedule_with_warmup",
"transformers.AdamW",
"torch.utils.da... | [((1889, 1902), 'pathlib.Path', 'Path', (['"""data/"""'], {}), "('data/')\n", (1893, 1902), False, 'from pathlib import Path\n'), ((8336, 8384), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'BATCH_SIZE'}), '(train_dataset, batch_size=BATCH_SIZE)\n', (8346, 8384), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((8876, 8942), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'learning_rate', 'eps': 'epsilon'}), '(optimizer_grouped_parameters, lr=learning_rate, eps=epsilon)\n', (8881, 8942), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((8960, 9076), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'number_of_warmup_steps', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=\n number_of_warmup_steps, num_training_steps=t_total)\n', (8991, 9076), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((10348, 10405), 'torch.save', 'torch.save', (['model', "('./saved_models/' + save_model + '.pt')"], {}), "(model, './saved_models/' + save_model + '.pt')\n", (10358, 10405), False, 'import torch\n'), ((15414, 15456), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'top_k'}), '(test_dataset, batch_size=top_k)\n', (15424, 15456), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((15470, 15520), 'torch.load', 'torch.load', (["('./saved_models/' + save_model + '.pt')"], {}), "('./saved_models/' + save_model + '.pt')\n", (15480, 15520), False, 'import torch\n'), ((16807, 16869), 'rank.write_preds', 'write_preds', (['preds_idx', "('./predictions/' + save_model + '.txt')"], {}), "(preds_idx, './predictions/' + save_model + '.txt')\n", (16818, 16869), False, 'from rank import get_ranks, get_preds, ideal_rerank, remove_combo_suffix, format_predict_line, write_preds\n'), ((4035, 4059), 'torch.tensor', 'torch.tensor', (['all_labels'], {}), '(all_labels)\n', (4047, 4059), False, 'import torch\n'), ((4075, 4136), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_attention_masks', 'all_labels'], {}), '(all_input_ids, all_attention_masks, all_labels)\n', (4088, 4136), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((6901, 6925), 'torch.tensor', 'torch.tensor', (['all_labels'], {}), '(all_labels)\n', (6913, 6925), False, 'import torch\n'), ((6943, 7004), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_attention_masks', 'all_labels'], {}), '(all_input_ids, all_attention_masks, all_labels)\n', (6956, 7004), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((7237, 7261), 'torch.tensor', 'torch.tensor', (['all_labels'], {}), '(all_labels)\n', (7249, 7261), False, 'import torch\n'), ((7282, 7367), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_token_type_ids', 'all_attention_masks', 'all_labels'], {}), '(all_input_ids, all_token_type_ids, all_attention_masks,\n all_labels)\n', (7295, 7367), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((9161, 9201), 'tqdm.notebook.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""'}), "(train_dataloader, desc='Iteration')\n", (9165, 9201), False, 'from tqdm.notebook import tqdm\n'), ((10929, 10972), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'top_k'}), '(train_dataset, batch_size=top_k)\n', (10939, 10972), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((10987, 11037), 'torch.load', 'torch.load', (["('./saved_models/' + save_model + '.pt')"], {}), "('./saved_models/' + save_model + '.pt')\n", (10997, 11037), False, 'import torch\n'), ((15526, 15541), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15539, 15541), False, 'import torch\n'), ((15584, 15623), 'tqdm.notebook.tqdm', 'tqdm', (['test_dataloader'], {'desc': '"""Iteration"""'}), "(test_dataloader, desc='Iteration')\n", (15588, 15623), False, 'from tqdm.notebook import tqdm\n'), ((4380, 4404), 'torch.tensor', 'torch.tensor', (['all_labels'], {}), '(all_labels)\n', (4392, 4404), False, 'import torch\n'), ((4423, 4508), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_token_type_ids', 'all_attention_masks', 'all_labels'], {}), '(all_input_ids, all_token_type_ids, all_attention_masks,\n all_labels)\n', (4436, 4508), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((11066, 11081), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11079, 11081), False, 'import torch\n'), ((11165, 11205), 'tqdm.notebook.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""'}), "(train_dataloader, desc='Iteration')\n", (11169, 11205), False, 'from tqdm.notebook import tqdm\n'), ((12363, 12406), 'os.path.exists', 'os.path.exists', (['"""results/results_train.csv"""'], {}), "('results/results_train.csv')\n", (12377, 12406), False, 'import os\n'), ((12432, 12510), 'pandas.DataFrame', 'po.DataFrame', ([], {'columns': "['Model_train', 'TFIDF_MAP_train', 'Reranked_MAP_train']"}), "(columns=['Model_train', 'TFIDF_MAP_train', 'Reranked_MAP_train'])\n", (12444, 12510), True, 'import pandas as po\n'), ((12547, 12587), 'pandas.read_csv', 'po.read_csv', (['"""results/results_train.csv"""'], {}), "('results/results_train.csv')\n", (12558, 12587), True, 'import pandas as po\n'), ((13086, 13127), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'top_k'}), '(val_dataset, batch_size=top_k)\n', (13096, 13127), False, 'from torch.utils.data import DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset\n'), ((13142, 13192), 'torch.load', 'torch.load', (["('./saved_models/' + save_model + '.pt')"], {}), "('./saved_models/' + save_model + '.pt')\n", (13152, 13192), False, 'import torch\n'), ((3841, 3868), 'torch.tensor', 'torch.tensor', (['all_input_ids'], {}), '(all_input_ids)\n', (3853, 3868), False, 'import torch\n'), ((3974, 4007), 'torch.tensor', 'torch.tensor', (['all_attention_masks'], {}), '(all_attention_masks)\n', (3986, 4007), False, 'import torch\n'), ((6772, 6799), 'torch.tensor', 'torch.tensor', (['all_input_ids'], {}), '(all_input_ids)\n', (6784, 6799), False, 'import torch\n'), ((6838, 6871), 'torch.tensor', 'torch.tensor', (['all_attention_masks'], {}), '(all_attention_masks)\n', (6850, 6871), False, 'import torch\n'), ((7038, 7065), 'torch.tensor', 'torch.tensor', (['all_input_ids'], {}), '(all_input_ids)\n', (7050, 7065), False, 'import torch\n'), ((7103, 7135), 'torch.tensor', 'torch.tensor', (['all_token_type_ids'], {}), '(all_token_type_ids)\n', (7115, 7135), False, 'import torch\n'), ((7174, 7207), 'torch.tensor', 'torch.tensor', (['all_attention_masks'], {}), '(all_attention_masks)\n', (7186, 7207), False, 'import torch\n'), ((12015, 12082), 'utils.average_precision_score', 'average_precision_score', (["rerank[step]['gold']", "rerank[step]['pred']"], {}), "(rerank[step]['gold'], rerank[step]['pred'])\n", (12038, 12082), False, 'from utils import get_df_explanations, get_questions, average_precision_score\n'), ((12112, 12163), 'utils.average_precision_score', 'average_precision_score', (["rerank[step]['gold']", 'pred'], {}), "(rerank[step]['gold'], pred)\n", (12135, 12163), False, 'from utils import get_df_explanations, get_questions, average_precision_score\n'), ((12278, 12298), 'numpy.array', 'np.array', (['direct_aps'], {}), '(direct_aps)\n', (12286, 12298), True, 'import numpy as np\n'), ((12323, 12345), 'numpy.array', 'np.array', (['reranked_aps'], {}), '(reranked_aps)\n', (12331, 12345), True, 'import numpy as np\n'), ((12656, 12676), 'numpy.array', 'np.array', (['direct_aps'], {}), '(direct_aps)\n', (12664, 12676), True, 'import numpy as np\n'), ((12707, 12729), 'numpy.array', 'np.array', (['reranked_aps'], {}), '(reranked_aps)\n', (12715, 12729), True, 'import numpy as np\n'), ((13221, 13236), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13234, 13236), False, 'import torch\n'), ((13320, 13358), 'tqdm.notebook.tqdm', 'tqdm', (['val_dataloader'], {'desc': '"""Iteration"""'}), "(val_dataloader, desc='Iteration')\n", (13324, 13358), False, 'from tqdm.notebook import tqdm\n'), ((14559, 14600), 'os.path.exists', 'os.path.exists', (['"""results/results_dev.csv"""'], {}), "('results/results_dev.csv')\n", (14573, 14600), False, 'import os\n'), ((14626, 14698), 'pandas.DataFrame', 'po.DataFrame', ([], {'columns': "['Model_dev', 'TFIDF_MAP_dev', 'Reranked_MAP_dev']"}), "(columns=['Model_dev', 'TFIDF_MAP_dev', 'Reranked_MAP_dev'])\n", (14638, 14698), True, 'import pandas as po\n'), ((14735, 14773), 'pandas.read_csv', 'po.read_csv', (['"""results/results_dev.csv"""'], {}), "('results/results_dev.csv')\n", (14746, 14773), True, 'import pandas as po\n'), ((4187, 4214), 'torch.tensor', 'torch.tensor', (['all_input_ids'], {}), '(all_input_ids)\n', (4199, 4214), False, 'import torch\n'), ((4250, 4282), 'torch.tensor', 'torch.tensor', (['all_token_type_ids'], {}), '(all_token_type_ids)\n', (4262, 4282), False, 'import torch\n'), ((4319, 4352), 'torch.tensor', 'torch.tensor', (['all_attention_masks'], {}), '(all_attention_masks)\n', (4331, 4352), False, 'import torch\n'), ((14211, 14278), 'utils.average_precision_score', 'average_precision_score', (["rerank[step]['gold']", "rerank[step]['pred']"], {}), "(rerank[step]['gold'], rerank[step]['pred'])\n", (14234, 14278), False, 'from utils import get_df_explanations, get_questions, average_precision_score\n'), ((14308, 14359), 'utils.average_precision_score', 'average_precision_score', (["rerank[step]['gold']", 'pred'], {}), "(rerank[step]['gold'], pred)\n", (14331, 14359), False, 'from utils import get_df_explanations, get_questions, average_precision_score\n'), ((14474, 14494), 'numpy.array', 'np.array', (['direct_aps'], {}), '(direct_aps)\n', (14482, 14494), True, 'import numpy as np\n'), ((14519, 14541), 'numpy.array', 'np.array', (['reranked_aps'], {}), '(reranked_aps)\n', (14527, 14541), True, 'import numpy as np\n'), ((14838, 14858), 'numpy.array', 'np.array', (['direct_aps'], {}), '(direct_aps)\n', (14846, 14858), True, 'import numpy as np\n'), ((14887, 14909), 'numpy.array', 'np.array', (['reranked_aps'], {}), '(reranked_aps)\n', (14895, 14909), True, 'import numpy as np\n'), ((16190, 16207), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (16200, 16207), True, 'import torch.nn as nn\n'), ((11829, 11846), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (11839, 11846), True, 'import torch.nn as nn\n'), ((13994, 14011), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (14004, 14011), True, 'import torch.nn as nn\n')] |
import numpy as np
def persistence(labels):
# converts trajectory of cluster labels to states and time spent per state
# in the format [label,time spent]
states = []
current = [labels[0],0]
for label in labels:
if label == current[0]:
current[1]+=1
else:
states.append(current)
current = [label,1]
states.append(current)
return np.array(states)
def commitment(states,t_commit):
# computes which state visits last a time greater than or equal to t_commit
# input and output states are in the format [label,time spent]
metastable = states[states[:,0]>-1]
committed = metastable[metastable[:,1]>=t_commit]
return committed
def count_path(states,path):
# counts the number of paths taken matching the given path
# states is in the format [label,time] and path is vector with a specified
# order of states.
# the output is an integer count of paths
edges = np.shape(path)[0]
routes = np.zeros([np.shape(states)[0]-edges+1,edges])
for i in range(np.shape(routes)[0]):
routes[i,:] = np.array([states[i:i+edges,0]])
return np.shape(routes[(routes == tuple(path)).all(axis=1)])[0]
def find_wells(prob):
energy = []
for i in (range(len(prob))):
if prob[i] == 0:
energy.append(np.inf)
else:
energy.append(-1 * np.log(prob[i]))
wells = 0
max = np.inf
min = np.inf
d = 1
i = 0
for x in energy:
if x > max:
max = x
if (max - min > 1):
min = x
d = 1
elif x < min:
min = x
if (max - min > 1):
if d == 1:
wells = wells + 1
max = x
d = -1
i = i + 1
return wells
def find_barriers(prob):
# finds the barriers > 1 kT for a given probability distribution
# returns barrier indices
energy = []
for i in (range(len(prob))):
if prob[i] == 0:
energy.append(np.inf)
else:
energy.append(-1 * np.log(prob[i]))
barriers=[]
max = np.inf
min = np.inf
d = 1
i = 0
bar = [i,max]
for x in energy:
if x > max:
max = x
if max > bar[1]:
bar = [i,max]
if (max - min > 1):
min = x
d = 1
elif x < min:
min = x
if (max - min > 1):
if d == 1:
barriers.append(bar[0])
bar = [i,0]
max = x
d = -1
i = i + 1
return barriers[1:]
| [
"numpy.shape",
"numpy.array",
"numpy.log"
] | [((409, 425), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (417, 425), True, 'import numpy as np\n'), ((977, 991), 'numpy.shape', 'np.shape', (['path'], {}), '(path)\n', (985, 991), True, 'import numpy as np\n'), ((1117, 1151), 'numpy.array', 'np.array', (['[states[i:i + edges, 0]]'], {}), '([states[i:i + edges, 0]])\n', (1125, 1151), True, 'import numpy as np\n'), ((1073, 1089), 'numpy.shape', 'np.shape', (['routes'], {}), '(routes)\n', (1081, 1089), True, 'import numpy as np\n'), ((1395, 1410), 'numpy.log', 'np.log', (['prob[i]'], {}), '(prob[i])\n', (1401, 1410), True, 'import numpy as np\n'), ((2121, 2136), 'numpy.log', 'np.log', (['prob[i]'], {}), '(prob[i])\n', (2127, 2136), True, 'import numpy as np\n'), ((1018, 1034), 'numpy.shape', 'np.shape', (['states'], {}), '(states)\n', (1026, 1034), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 8 15:19:36 2016
@author: virati
This is now the actual code for doing OnTarget/OffTarget LFP Ephys
THIS APPEARS to do the chirp template search
"""
import numpy as np
import pandas as pd
from collections import defaultdict, OrderedDict
import scipy.signal as sig
import matplotlib
import sys
import seaborn as sns
sns.set_context('paper')
sns.set(font_scale=3)
sns.set_style('white')
import matplotlib.pyplot as plt
sys.path.append('/home/virati/Dropbox/projects/Research/MDD-DBS/Ephys/archive/MMDBS/')
import TimeSeries as ts
from scipy.interpolate import interp1d
import pdb
import matplotlib.colors as colors
from sklearn.decomposition import PCA
import scipy.stats as stats
import sys
sys.path.append('/home/virati/Dropbox/projects/Research/MDD-DBS/Ephys/DBSpace/')
import DBSpace as dbo
from DBSpace import nestdict
import pickle
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 20}
matplotlib.rc('font', **font)
matplotlib.rcParams['svg.fonttype'] = 'none'
plt.rcParams['image.cmap'] = 'jet'
plt.close('all')
#%% Script Definitions
#Data.view_raw_ts(channs=range(2))
#Data.view_raw_hist(chann=[0,1])
def plot_SG(mI,pI,conditI,chann,SGs,tpts=0):
plt.figure()
for m in mI:
for p in pI:
for condit in conditI:
plt.figure()
t_idxs = np.where(np.logical_and(SGs[m][p][condit]['T'] > tpts[0],SGs[m][p][condit]['T'] < tpts[1]))
plt.pcolormesh(SGs[m][p][condit]['T'][t_idxs],SGs[m]['F'],10*np.log10(np.squeeze(np.abs(SGs[m][p][condit]['SG'][chann][:,t_idxs]))))
plt.title(m + ' ' + p + ' ' + condit)
#plt.axis('off')
#plt.tight_layout()
#plt.autoscale(enable=True,tight=True)
#plt.ylim((0,50))
plt.xlabel('Time (sec)')
plt.ylabel('Frequency (Hz)')
plt.show()
def plot_PSDs(mI,pI,conditI,chann,SGs):
for m in mI:
for p in pI:
for condit in conditI:
plt.figure()
plt.plot(SGs[m]['F'],10*np.log10(np.squeeze(np.abs(np.median(SGs[m][p][condit]['SG'][chann][:,:],axis=1)))))
plt.axis('off')
def plot_ts(mI,pI,conditI,chann, SGs,tpts, filt=True):
for m in mI:
for p in pI:
for condit in conditI:
#tvec = SGs[m][p][condit]['TRaw']
#sel_tvec = np.logical_and(tvec > tpts[0],tvec < tpts[1])
#Lchann = stats.zscore(sig.filtfilt(b,a,SGs[m][p][condit]['Raw'][sel_tvec,0]))
#Rchann = stats.zscore(sig.filtfilt(b,a,SGs[m][p][condit]['Raw'][sel_tvec,1]))
plt.figure()
plt.subplot(311)
plt.plot(SGs[m][p][condit]['TRaw'],SGs[m][p][condit]['Raw'])
plt.subplot(312)
def plot_phase(mI,pI,conditI,chann,SGs,tpts,filt=True,fileio_out=False):
b,a = sig.butter(10,30/422)
cm = plt.cm.get_cmap('RdYlBu')
chirp = defaultdict(dict)
for m in mI:
for p in pI:
for condit in conditI:
tvec = SGs[m][p][condit]['TRaw']
sel_tvec = np.logical_and(tvec > tpts[0],tvec < tpts[1])
Lchann = stats.zscore(sig.filtfilt(b,a,SGs[m][p][condit]['Raw'][sel_tvec,0]))
Rchann = stats.zscore(sig.filtfilt(b,a,SGs[m][p][condit]['Raw'][sel_tvec,1]))
if fileio_out:
chirp['Raw'] = SGs[m][p][condit]['Raw'][sel_tvec,:]
chirp['Filt'] = [Lchann,Rchann]
pickle.dump(chirp,open('/tmp/test.pickle',"wb"))
else:
plt.figure()
# plt.subplot(311)
# plt.plot(SGs[m][p][condit]['TRaw'],SGs[m][p][condit]['Raw'])
# plt.axis('off')
plt.subplot(312)
#filter the two
plt.plot(tvec[sel_tvec],Lchann)
plt.plot(tvec[sel_tvec],Rchann)
plt.subplot(313)
#plt.scatter(Lchann,Rchann,c=tvec[sel_tvec],marker='.',cmap=cm,alpha=0.1)
plt.xlim((-5,5))
plt.ylim((-5,5))
plt.title('Phase Portrait')
chirp['Raw'] = [Lchann,Rchann]
return chirp
#Get the banded power for each band now
def get_SG_Bands(m,p,condit,SGs,band):
band_vect = np.where(np.logical_and(SGs[m]['F'] > band['range'][0],SGs[m]['F'] < band['range'][1]))
Band_TC = defaultdict(dict)
for cc in range(2):
Band_TC[band['label']] = []
Band_TC[band['label']].append(np.mean(SGs[m][p][condit][cc]['SG'][band_vect,:],0))
return Band_TC
def get_SG_Bands(m,p,condit,SGs,band):
band_vect = np.where(np.logical_and(SGs[m]['F'] > band['range'][0],SGs[m]['F'] < band['range'][1]))
Band_TC = defaultdict(dict)
for cc in range(2):
Band_TC[band['label']] = []
Band_TC[band['label']].append(np.mean(SGs[m][p][condit][cc]['SG'][band_vect,:],0))
return Band_TC
#%% Main Script
Ephys = defaultdict(dict)
modalities = ['LFP']
patients = ['901','903','905','906','907','908']
condits = ['OnTarget','OffTarget']
for mod in modalities:
Ephys[mod] = defaultdict(dict)
for pt in patients:
Ephys[mod][pt] = defaultdict(dict)
for cnd in condits:
Ephys[mod][pt][cnd] = defaultdict(dict)
#%%
#Only for TurnOn for now
Phase = 'TurnOn'
if Phase == 'TurnOn':
Ephys['LFP']['901']['OnTarget']['Filename'] = '/home/virati/MDD_Data/BR/901/Session_2014_05_16_Friday/DBS901_2014_05_16_17_10_31__MR_0.txt'
Ephys['LFP']['901']['OffTarget']['Filename'] = '/home/virati/MDD_Data/BR/901/Session_2014_05_16_Friday/DBS901_2014_05_16_16_25_07__MR_0.txt'
Ephys['LFP']['901']['OnTarget']['segments']['Bilat'] = (600,630)
Ephys['LFP']['901']['OnTarget']['segments']['PreBilat'] = (500,530)
Ephys['LFP']['901']['OffTarget']['segments']['Bilat'] = (600,630)
Ephys['LFP']['901']['OffTarget']['segments']['PreBilat'] = (480,510)
Ephys['LFP']['903']['OnTarget']['Filename'] = '/home/virati/MDD_Data/BR/903/Session_2014_09_03_Wednesday/DBS903_2014_09_03_14_16_57__MR_0.txt'
Ephys['LFP']['903']['OffTarget']['Filename'] = '/home/virati/MDD_Data/BR/903/Session_2014_09_04_Thursday/DBS903_2014_09_04_12_53_09__MR_0.txt'
Ephys['LFP']['903']['OnTarget']['segments']['Bilat'] = (550,580)
Ephys['LFP']['903']['OffTarget']['segments']['Bilat'] = (550,580)
Ephys['LFP']['903']['OnTarget']['segments']['PreBilat'] = (501,531)
Ephys['LFP']['903']['OffTarget']['segments']['PreBilat'] = (501,531)
Ephys['LFP']['905']['OnTarget']['Filename'] = '/home/virati/MDD_Data/BR/905/Session_2015_09_28_Monday/Dbs905_2015_09_28_13_51_42__MR_0.txt'
Ephys['LFP']['905']['OffTarget']['Filename'] = '/home/virati/MDD_Data/BR/905/Session_2015_09_29_Tuesday/Dbs905_2015_09_29_12_32_47__MR_0.txt'
Ephys['LFP']['905']['OnTarget']['segments']['Bilat'] = (610,640)
Ephys['LFP']['905']['OffTarget']['segments']['Bilat'] = (610,640)
Ephys['LFP']['905']['OnTarget']['segments']['PreBilat'] = (561,591)
Ephys['LFP']['905']['OffTarget']['segments']['PreBilat'] = (561,591)
Ephys['LFP']['906']['OnTarget']['Filename'] = '/home/virati/MDD_Data/BR/906/Session_2015_08_27_Thursday/DBS906_2015_08_27_15_10_44__MR_0.txt'
Ephys['LFP']['906']['OffTarget']['Filename'] = '/home/virati/MDD_Data/BR/906/Session_2015_08_27_Thursday/DBS906_2015_08_27_16_20_23__MR_0.txt'
Ephys['LFP']['906']['OnTarget']['segments']['Bilat'] = (610,640)
Ephys['LFP']['906']['OffTarget']['segments']['Bilat'] = (610,640)
Ephys['LFP']['906']['OnTarget']['segments']['PreBilat'] = (561,591)
Ephys['LFP']['906']['OffTarget']['segments']['PreBilat'] = (561,591)
Ephys['LFP']['907']['OnTarget']['Filename'] = '/home/virati/MDD_Data/BR/907/Session_2015_12_16_Wednesday/DBS907_2015_12_16_12_09_04__MR_0.txt'
Ephys['LFP']['907']['OffTarget']['Filename'] = '/home/virati/MDD_Data/BR/907/Session_2015_12_17_Thursday/DBS907_2015_12_17_10_53_08__MR_0.txt'
Ephys['LFP']['907']['OnTarget']['segments']['Bilat'] = (640,670)
Ephys['LFP']['907']['OffTarget']['segments']['Bilat'] = (625,655)
Ephys['LFP']['907']['OnTarget']['segments']['PreBilat'] = (590,620)
Ephys['LFP']['907']['OffTarget']['segments']['PreBilat'] = (560,590)
Ephys['LFP']['908']['OnTarget']['Filename'] = '/home/virati/MDD_Data/BR/908/Session_2016_02_10_Wednesday/DBS908_2016_02_10_13_03_10__MR_0.txt'
Ephys['LFP']['908']['OffTarget']['Filename'] = '/home/virati/MDD_Data/BR/908/Session_2016_02_11_Thursday/DBS908_2016_02_11_12_34_21__MR_0.txt'
Ephys['LFP']['908']['OnTarget']['segments']['Bilat'] = (611,641)
Ephys['LFP']['908']['OffTarget']['segments']['Bilat'] = (611,641)
Ephys['LFP']['908']['OnTarget']['segments']['PreBilat'] = (551,581)
Ephys['LFP']['908']['OffTarget']['segments']['PreBilat'] = (551,581)
elif Phase == '6Mo':
#901
Ephys['LFP']['901']['OnTarget']['Filename'] = '/run/media/virati/Samsung USB/MDD_Data/BR/901/Session_2014_11_14_Friday/DBS901_2014_11_14_16_46_35__MR_0.txt'
Ephys['LFP']['901']['OffTarget']['Filename'] = '/run/media/virati/Samsung USB/MDD_Data/BR/901/Session_2014_11_14_Friday/DBS901_2014_11_14_17_34_35__MR_0.txt'
Ephys['LFP']['901']['OnTarget']['segments']['Bilat'] = (670,700)
Ephys['LFP']['901']['OnTarget']['segments']['PreBilat'] = (620,650)
Ephys['LFP']['901']['OffTarget']['segments']['Bilat'] = ()
Ephys['LFP']['901']['OffTarget']['segments']['PreBilat'] = ()
#903
Ephys['LFP']['903']['OnTarget']['Filename'] = ''
Ephys['LFP']['903']['OffTarget']['Filename'] = ''
Ephys['LFP']['903']['OnTarget']['segments']['PreBilat'] = ()
Ephys['LFP']['903']['OnTarget']['segments']['Bilat'] = ()
Ephys['LFP']['903']['OffTarget']['segments']['PreBilat'] = ()
Ephys['LFP']['903']['OffTarget']['segments']['Bilat'] = ()
#905
Ephys['LFP']['905']['OnTarget']['Filename'] = ''
Ephys['LFP']['905']['OffTarget']['Filename'] = ''
Ephys['LFP']['905']['OnTarget']['segments']['PreBilat'] = ()
Ephys['LFP']['905']['OnTarget']['segments']['Bilat'] = ()
Ephys['LFP']['905']['OffTarget']['segments']['PreBilat'] = ()
Ephys['LFP']['905']['OffTarget']['segments']['Bilat'] = ()
#906
Ephys['LFP']['906']['OnTarget']['Filename'] = ''
Ephys['LFP']['906']['OffTarget']['Filename'] = ''
Ephys['LFP']['906']['OnTarget']['segments']['Bilat'] = (610,640)
Ephys['LFP']['906']['OffTarget']['segments']['Bilat'] = (610,640)
Ephys['LFP']['906']['OnTarget']['segments']['PreBilat'] = (561,591)
Ephys['LFP']['906']['OffTarget']['segments']['PreBilat'] = (561,591)
#907
Ephys['LFP']['907']['OnTarget']['Filename'] = ''
Ephys['LFP']['907']['OffTarget']['Filename'] = ''
Ephys['LFP']['907']['OnTarget']['segments']['Bilat'] = (640,670)
Ephys['LFP']['907']['OffTarget']['segments']['Bilat'] = (625,655)
Ephys['LFP']['907']['OnTarget']['segments']['PreBilat'] = (590,620)
Ephys['LFP']['907']['OffTarget']['segments']['PreBilat'] = (560,590)
#908
Ephys['LFP']['908']['OnTarget']['Filename'] = ''
Ephys['LFP']['908']['OffTarget']['Filename'] = ''
Ephys['LFP']['908']['OnTarget']['segments']['Bilat'] = (611,641)
Ephys['LFP']['908']['OffTarget']['segments']['Bilat'] = (611,641)
Ephys['LFP']['908']['OnTarget']['segments']['PreBilat'] = (551,581)
Ephys['LFP']['908']['OffTarget']['segments']['PreBilat'] = (551,581)
#%%
#import shutil
#
#for mod in modalities:
# for pt in patients:
#
# for cnd in condits:
# #copy the file to tmp:
# shutil.copyfile(Ephys[mod][pt][cnd]['Filename'],'/tmp/data_upload/' + pt + '_' + mod + '_' + cnd)
#%%
#Load in the files
plt.close('all')
SCC_State = defaultdict(dict)
do_DSV = np.array([[-0.00583578, -0.00279751, 0.00131825, 0.01770169, 0.01166687],[-1.06586005e-02, 2.42700023e-05, 7.31445236e-03, 2.68723035e-03,-3.90440108e-06]])
do_DSV = do_DSV / np.linalg.norm(do_DSV)
SGs = nestdict()
#set the Fs for LFPs here
SGs['LFP']['Fs'] = 422
#Loop through the modalities and import the BR recording
for mm, modal in enumerate(['LFP']):
for pp, pt in enumerate(['901','903','905','906','907','908']):
SGs[modal][pt] = defaultdict(dict)
for cc, condit in enumerate(['OnTarget','OffTarget']):
Data = []
Data = ts.import_BR(Ephys[modal][pt][condit]['Filename'],snip=(0,0))
#Data = dbo.load_BR_dict(Ephys[modal][pt][condit]['Filename'],sec_end=0)
#Compute the TF representation of the above imported data
F,T,SG,BANDS = Data.compute_tf()
SG_Dict = dbo.gen_SG(Data.extract_dict(),overlap=False)
#Fvect = dbo.calc_feats()
#for iv, interval in enumerate():
[datatv,dataraw] = Data.raw_ts()
SGs[modal][pt][condit]['SG'] = {chann:SG_Dict[chann]['SG'] for chann in ['Left','Right']}
SGs[modal][pt][condit]['Raw'] = dataraw
SGs[modal][pt][condit]['TRaw'] = datatv
SGs[modal][pt][condit]['T'] = SG_Dict['Left']['T']
#pdb.set_trace()
SGs[modal][pt][condit]['Bands'] = BANDS
SGs[modal][pt][condit]['BandMatrix'] = np.zeros((BANDS[0]['Alpha'].shape[0],2,5))
SGs[modal][pt][condit]['BandSegments'] = nestdict()
SGs[modal][pt][condit]['DSV'] = np.zeros((BANDS[0]['Alpha'].shape[0],2,1))
SGs[modal]['F'] = SG_Dict['F']
#%%
#Segment the data based on prescribed segmentations
#bands = ts.band_structs()
do_bands = dbo.feat_order
Response_matrix = np.zeros((6,2,2,5))
for mm, modal in enumerate(['LFP']):
for pp, pt in enumerate(['901','903','905','906','907','908']):
for co, condit in enumerate(['OnTarget','OffTarget']):
#SGs[modal][pt][condit]['BandSegments'] = defaultdict(dict)
for bb, bands in enumerate(do_bands):
for cc in range(2):
SGs[modal][pt][condit]['BandMatrix'][:,cc,bb] = SGs[modal][pt][condit]['Bands'][cc][bands]
for seg in range(SGs[modal][pt][condit]['BandMatrix'].shape[0]):
SGs[modal][pt][condit]['DSV'][seg,cc] = np.dot(SGs[modal][pt][condit]['BandMatrix'][seg,cc,:],do_DSV[cc,:])
for sg, seg in enumerate(Ephys[modal][pt][condit]['segments'].keys()):
tbounds = [Ephys[modal][pt][condit]['segments'][seg][0],Ephys[modal][pt][condit]['segments'][seg][1]]
#extract from time vector the actual indices
t_idxs = np.ceil(np.where(np.logical_and(SGs[modal][pt][condit]['T'] >= tbounds[0],SGs[modal][pt][condit]['T'] <= tbounds[1]))).astype(int)
#pdb.set_trace()
SGs[modal][pt][condit]['BandSegments'][seg]=SGs[modal][pt][condit]['BandMatrix'][t_idxs,:,:]
SGs[modal][pt][condit][seg] = defaultdict(dict)
SGs[modal][pt][condit][seg]['PCA'] = defaultdict(dict)
#This is a (2,4) matrix, with channel x band
SGs[modal][pt][condit]['Response'] = 10* np.log10(np.mean(SGs[modal][pt][condit]['BandSegments']['Bilat'][0,:,:,:],0)) - 10* np.log10(np.mean(SGs[modal][pt][condit]['BandSegments']['PreBilat'][0,:,:,:],0))
Response_matrix[pp,co,:,:] = SGs[modal][pt][condit]['Response']
#%%
#Plot DSV directions/control theory
for pt in ['901','903','905','906','907','908']:
plt.figure()
for cc,condit in enumerate(['OnTarget','OffTarget']):
plt.subplot(2,1,cc+1)
#plt.plot(SGs[modal][pt][condit]['DSV'][:,0],label='Left LFP')
#plt.plot(SGs[modal][pt][condit]['DSV'][:,1],label='Right LFP')
llfp = stats.zscore(SGs[modal][pt][condit]['DSV'][:,0]).squeeze()
rlfp = stats.zscore(SGs[modal][pt][condit]['DSV'][:,1]).squeeze()
orig_len=len(llfp)
ti = np.linspace(2,orig_len+1,10*orig_len)
li = np.concatenate((llfp[-3:-1],llfp,llfp[1:3]))
ri = np.concatenate((rlfp[-3:-1],rlfp,rlfp[1:3]))
t = np.arange(li.shape[0])
lii = interp1d(t,li,kind='cubic')(ti)
rii = interp1d(t,ri,kind='cubic')(ti)
plt.scatter(llfp,rlfp,c=np.linspace(0,1,llfp.shape[0]),cmap='cool')
plt.plot(lii,rii,alpha=0.2)
#for ii in range(len(lii)):
#p = plt.plot(lii,rii,color=pl.cm.jet(np.linspace(0,1,len(lii)))[ii])
#colorline(lii,rii,np.linspace(0,1,len(lii)),cmap=plt.get_cmap('jet'))
plt.ylim((-0.8,0.8))
plt.xlim((-0.8,0.8))
plt.title(condit)
plt.suptitle(pt)
#%%
#Now, just plot in a boxplot format what we want
condit = 0
#plt.figure()
ax = plt.subplot(121)
plt.boxplot(Response_matrix[:,condit,0,2])
plt.ylim((-3,30))
plt.axhline(y=0)
plt.xticks([1],['Alpha'])
plt.xlabel('Oscillation')
plt.ylabel('Power Change (dB)')
plt.title('Left LFP Channel')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax = plt.subplot(122)
plt.boxplot(Response_matrix[:,condit,1,2])
plt.ylim((-3,30))
plt.axhline(y=0)
plt.xticks([1],['Alpha'])
plt.xlabel('Oscillation')
plt.ylabel('Power Change (dB)')
plt.title('Right LFP Channel')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
plt.show()
#%%
#Now, plot both conditions next to each other
#plt.figure()
bb = 2
#for bb,band in enumerate(do_bands):
plt.figure()
ax = plt.subplot(121)
bp = plt.boxplot(Response_matrix[:,0,0,:],positions=np.linspace(1,6,5),widths=0.25)
edge_color='blue'
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=edge_color)
bp = plt.boxplot(Response_matrix[:,1,0,:],positions=np.linspace(1.5,6.5,5),widths=0.25)
edge_color='green'
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=edge_color)
plt.xlim((0,7))
#plt.boxplot(2*np.ones(6),Response_matrix[:,1,0,bb])
plt.ylim((-3,40))
plt.axhline(y=0)
plt.xticks(np.linspace(1.25,6.25,5),do_bands,rotation=45)
plt.ylabel('Power Change (dB)')
plt.title('Left LFP Channel')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax = plt.subplot(122)
bp = plt.boxplot(Response_matrix[:,0,1,:],positions=np.linspace(1,6,5),widths=0.25)
edge_color='blue'
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=edge_color)
bp = plt.boxplot(Response_matrix[:,1,1,:],positions=np.linspace(1.5,6.5,5),widths=0.25)
edge_color='green'
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=edge_color)
plt.xlim((0,7))
plt.ylim((-3,40))
plt.axhline(y=0)
plt.xticks(np.linspace(1.25,6.25,5),do_bands,rotation=45)
plt.ylabel('Power Change (dB)')
plt.title('Right LFP Channel')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
#plt.suptitle(band + ' Power Response to Stim')
plt.show()
#%%
#Do, in a simple way, a PCA on the entire channel space
#Focus on Alpha and Beta for now
plt.scatter(Response_matrix[:,condit,0,2],Response_matrix[:,condit,0,3])
#%%
#Do PCA on the band matrices
def Do_PCA():
seg_subtr = 1
for mm, modal in enumerate(['LFP']):
for pp, pt in enumerate(['901','903','905','906','907','908']):
for co, condit in enumerate(['OnTarget','OffTarget']):
pca = []
pca = PCA()
if seg_subtr:
start_shape = np.squeeze(SGs[modal][pt][condit]['BandSegments']['Bilat']).shape
flattened_matrix = np.reshape(np.squeeze(SGs[modal][pt][condit]['BandSegments']['Bilat']) - np.squeeze(SGs[modal][pt][condit]['BandSegments']['PreBilat']),(start_shape[0],start_shape[1]*start_shape[2]),'F')
seg = 'Bilat'
else:
for sg, seg in enumerate(Ephys[modal][pt][condit]['segments'].keys()):
start_shape = np.squeeze(SGs[modal][pt][condit]['BandSegments'][seg]).shape
pca.fit(flattened_matrix)
SGs[modal][pt][condit][seg]['PCA']['PCAModel'] = pca
SGs[modal][pt][condit][seg]['PCA']['RawBands'] = flattened_matrix
SGs[modal][pt][condit][seg]['PCA']['RotData'] = pca.transform(flattened_matrix)
SGs[modal][pt][condit][seg]['PCA']['VComps'] = pca.components_
SGs[modal][pt][condit][seg]['PCA']['Evals'] = pca.explained_variance_ratio_
#Plot, per patient, the timecourse of the oscillatory band powers
#To be clear, the time dimension is removed, and each is just seen as an independent observation of the underlying process
#plt.figure()
#plt.plot(flattened_matrix)
#plt.suptitle(pt + ' ' + condit)
#%%
def Plot_PCA():
from mpl_toolkits.mplot3d import Axes3D
modal = 'LFP'
pt = '901'
condit = 'OffTarget'
seg = 'Bilat'
fig = plt.figure()
ax = fig.add_subplot(412,projection='3d')
ax.scatter(SGs[modal][pt][condit][seg]['PCA']['RotData'][:,0],SGs[modal][pt][condit][seg]['PCA']['RotData'][:,1],SGs[modal][pt][condit][seg]['PCA']['RotData'][:,2])
plt.title('Top three components')
ax=fig.add_subplot(411,projection='3d')
ax.scatter(SGs[modal][pt][condit][seg]['PCA']['RawBands'][:,0],SGs[modal][pt][condit][seg]['PCA']['RawBands'][:,1],SGs[modal][pt][condit][seg]['PCA']['RawBands'][:,2])
ax=fig.add_subplot(413)
ax.imshow(SGs[modal][pt][condit][seg]['PCA']['VComps'],interpolation='none')
ax=fig.add_subplot(414)
plot_SG(modal,pt,condit,0,SGs,tpts=Ephys[modal][pt][condit]['segments'][seg])
#%%
#This gets to the meat; it starts looking at what the OffTarget data looks like in the projection of the optimal
big_comp_matr = np.zeros((8,8,2,6))
eivals = np.zeros((8,2,6))
for pp,pt in enumerate(['901','903','905','906','907','908']):
fig_2 = plt.figure()
ax = fig_2.add_subplot(311,projection='3d')
temp_hold_ont = SGs[modal][pt]['OnTarget'][seg]['PCA']['RotData']
ax.scatter(temp_hold_ont[:,0],temp_hold_ont[:,1],temp_hold_ont[:,2])
plt.title('OnTarget Representation in OnTarget PCs')
temp_hold = SGs[modal][pt]['OnTarget'][seg]['PCA']['PCAModel'].transform(SGs[modal][pt]['OffTarget'][seg]['PCA']['RawBands'])
ax = fig_2.add_subplot(312,projection='3d')
ax.scatter(temp_hold[:,0],temp_hold[:,1],temp_hold[:,2])
plt.title('OffTarget Representation in OnTarget PCs')
ax = fig_2.add_subplot(325)
ax.imshow(SGs[modal][pt]['OnTarget'][seg]['PCA']['VComps'],interpolation='none')
big_comp_matr[:,:,0,pp] = SGs[modal][pt]['OnTarget'][seg]['PCA']['VComps']
eivals[:,0,pp] = SGs[modal][pt]['OnTarget'][seg]['PCA']['Evals']
plt.title('OnTarget PCs')
ax = fig_2.add_subplot(326)
ax.imshow(SGs[modal][pt]['OffTarget'][seg]['PCA']['VComps'],interpolation='none')
big_comp_matr[:,:,0,pp] = SGs[modal][pt]['OffTarget'][seg]['PCA']['VComps']
eivals[:,0,pp] = SGs[modal][pt]['OffTarget'][seg]['PCA']['Evals']
plt.title('OffTarget PCs')
plt.suptitle('Patient ' + pt + ' PCA Decomp of LFP Bands')
#%%
plt.figure()
plt.subplot(211)
plt.plot(eivals[:,0,:])
plt.legend(['DBS901','903','905','906','907','908'])
plt.subplot(212)
plt.imshow(np.mean(big_comp_matr,3)[:,:,0],interpolation='none')
plt.title('Left Sided Vectors')
plt.colorbar()
axes_labels = ['L-Delta','L-Theta','L-Alpha','L-Beta*','R-Delta','R-Theta','R-Alpha','R-Beta*']
plt.xticks(range(0,8),axes_labels,rotation='vertical')
plt.yticks(range(0,8),axes_labels,rotation='horizontal')
#%%
#dot products for all component vectors
#%%
#Data saving needs to happen here
#actually display the matplotlib buffer at the end
plt.show()
#%%
#This plots the SGs for each patient individually; use this as a tool to choose segments
def plot_allpt_SGs(pt_list = ['901','903','905','906','907','908'],condit_list = ['OnTarget','OffTarget']):
for pp,pt in enumerate(pt_list):
#plt.figure()
for ch in range(2):
for cd, condit in enumerate(condit_list):
#plt.subplot(2,2,ch + (2*(cd)+1))
plt.figure()
plt.subplot(211)
plt.plot(SGs['LFP'][pt][condit]['TRaw'],SGs['LFP'][pt][condit]['Raw'])
#plt.xlim((598,611))
#plt.xlim((616,630))
plt.xlim((570,670))
plt.xlabel('Time (sec)')
plt.ylabel('Amplitude (uV)')
plt.subplot(212)
#plot_SG('LFP',pt,condit,ch,SGs)
plt.title(condit + ' ' + str(ch))
#plt.xlim((598,611))
#plt.xlim((616,630))
#plt.xlim((570,670))
plt.axis('off')
plt.title(pt)
#%%
#plot_SG(disp_modal,disp_pt,disp_condit,0,SGs)
#Plot all the spectrograms here, this should be a script-specific function and should be phased out soon
#plot_allpt_SGs(['907'],['OnTarget'])
disp_modal=['LFP'];disp_condit = ['OnTarget']
disp_pt = ['906']; timeseg = [370,500];
#disp_pt = ['905'];timeseg = [606,687]; chirp_templ = chirp['Raw'][1][10:30*422]
#plot_SG(disp_modal,disp_pt,disp_condit,'Left',SGs,tpts=timeseg)
#%%
#Extract known chirp for DBS906 and put into a file for template search in (a) voltage sweep LFP and (b) voltage sweep EEG
chirp = plot_phase(disp_modal,disp_pt,disp_condit,1,SGs,tpts=timeseg,fileio_out=False)
pickle.dump(chirp,open('/home/virati/DBS' + disp_pt[0] + '_chirp.pickle',"wb"))
chirp_templ = chirp['Raw'][1][10:30*422]
#%%
#do chirplet transform on the chirp template
pt = disp_pt[0]
#Ignore chirplet transform/analysis and just use the template to search amongst voltage sweep data
#vsweep_fname = '/home/virati/MDD_Data/BR/905/Session_2015_09_02_Wednesday/Dbs905_2015_09_02_10_31_14__MR_0.txt'
targsweep_fname = '/home/extend/MDD_Data/BR/906/Session_2015_08_27_Thursday/DBS906_2015_08_27_16_20_23__MR_0.txt'
vsweep_fname = '/home/extend/MDD_Data/BR/906/Session_2015_08_28_Friday/DBS906_2015_08_28_15_30_07__MR_0.txt'
#fsweep_fname = '/home/extend/MDD_Data/BR/906/Session_2015_08_28_Friday/DBS906_2015_08_28_16_34_45__MR_0.txt' # 906 frequency sweep
#load in the vsweep data
vsweepData = ts.import_BR(vsweep_fname,snip=(0,0))
[vstv,vsraw] = vsweepData.raw_ts()
vsweepData.view_tf(channs=np.arange(2),noverlap=2**9)
#go through vsraw and check for chirp_templ
#How many inner products do we need to compute?
chirp_templ = chirp_templ - np.mean(chirp_templ)
n_tot = vsraw.shape[0]
n_templ = chirp_templ.shape[0]
n_sweep = n_tot - n_templ
tl_ip = np.zeros((n_sweep,2))
print(np.max(np.abs(vsraw)))
for tlag in range(0,n_sweep,10):
print('tlag = ' + str(tlag))
#mean zero the current
curr_sig = vsraw[tlag:tlag+n_templ] - np.mean(vsraw[tlag:tlag+n_templ])
tl_ip[tlag] = np.dot(chirp_templ,curr_sig)
tvect = SGs['LFP'][pt]['OnTarget']['TRaw']
conv_tvect = np.linspace(0,tvect[-1],tl_ip.shape[0])
plt.figure()
plt.plot(conv_tvect,tl_ip)
plt.legend(['Channel 0','Channel 1'])
plt.figure()
plt.plot(chirp_templ)
#%%
#file_writeout_the raw ts, the filtered ts, of left and right
#write_chirp(disp_modal,disp_pt,disp_condit,1,SGs,tpts=timeseg)
#%%
#Do the segmentation
#What times are most important?
| [
"matplotlib.pyplot.title",
"matplotlib.rc",
"numpy.abs",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.suptitle",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linalg.norm",
"numpy.arange",
"scipy.interpolate.interp1d",
"sys.path.append",
"matplotlib.pyplot.cl... | [((364, 388), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (379, 388), True, 'import seaborn as sns\n'), ((389, 410), 'seaborn.set', 'sns.set', ([], {'font_scale': '(3)'}), '(font_scale=3)\n', (396, 410), True, 'import seaborn as sns\n'), ((411, 433), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (424, 433), True, 'import seaborn as sns\n'), ((468, 559), 'sys.path.append', 'sys.path.append', (['"""/home/virati/Dropbox/projects/Research/MDD-DBS/Ephys/archive/MMDBS/"""'], {}), "(\n '/home/virati/Dropbox/projects/Research/MDD-DBS/Ephys/archive/MMDBS/')\n", (483, 559), False, 'import sys\n'), ((746, 831), 'sys.path.append', 'sys.path.append', (['"""/home/virati/Dropbox/projects/Research/MDD-DBS/Ephys/DBSpace/"""'], {}), "('/home/virati/Dropbox/projects/Research/MDD-DBS/Ephys/DBSpace/'\n )\n", (761, 831), False, 'import sys\n'), ((974, 1003), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (987, 1003), False, 'import matplotlib\n'), ((1085, 1101), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1094, 1101), True, 'import matplotlib.pyplot as plt\n'), ((5415, 5432), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5426, 5432), False, 'from collections import defaultdict, OrderedDict\n'), ((12281, 12297), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12290, 12297), True, 'import matplotlib.pyplot as plt\n'), ((12311, 12328), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (12322, 12328), False, 'from collections import defaultdict, OrderedDict\n'), ((12339, 12503), 'numpy.array', 'np.array', (['[[-0.00583578, -0.00279751, 0.00131825, 0.01770169, 0.01166687], [-\n 0.0106586005, 2.42700023e-05, 0.00731445236, 0.00268723035, -\n 3.90440108e-06]]'], {}), '([[-0.00583578, -0.00279751, 0.00131825, 0.01770169, 0.01166687], [\n -0.0106586005, 2.42700023e-05, 0.00731445236, 0.00268723035, -\n 3.90440108e-06]])\n', (12347, 12503), True, 'import numpy as np\n'), ((12551, 12561), 'DBSpace.nestdict', 'nestdict', ([], {}), '()\n', (12559, 12561), False, 'from DBSpace import nestdict\n'), ((14193, 14215), 'numpy.zeros', 'np.zeros', (['(6, 2, 2, 5)'], {}), '((6, 2, 2, 5))\n', (14201, 14215), True, 'import numpy as np\n'), ((17281, 17297), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (17292, 17297), True, 'import matplotlib.pyplot as plt\n'), ((17298, 17343), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['Response_matrix[:, condit, 0, 2]'], {}), '(Response_matrix[:, condit, 0, 2])\n', (17309, 17343), True, 'import matplotlib.pyplot as plt\n'), ((17341, 17359), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 30)'], {}), '((-3, 30))\n', (17349, 17359), True, 'import matplotlib.pyplot as plt\n'), ((17359, 17375), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)'}), '(y=0)\n', (17370, 17375), True, 'import matplotlib.pyplot as plt\n'), ((17376, 17402), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Alpha']"], {}), "([1], ['Alpha'])\n", (17386, 17402), True, 'import matplotlib.pyplot as plt\n'), ((17402, 17427), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Oscillation"""'], {}), "('Oscillation')\n", (17412, 17427), True, 'import matplotlib.pyplot as plt\n'), ((17428, 17459), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Change (dB)"""'], {}), "('Power Change (dB)')\n", (17438, 17459), True, 'import matplotlib.pyplot as plt\n'), ((17460, 17489), 'matplotlib.pyplot.title', 'plt.title', (['"""Left LFP Channel"""'], {}), "('Left LFP Channel')\n", (17469, 17489), True, 'import matplotlib.pyplot as plt\n'), ((17610, 17626), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (17621, 17626), True, 'import matplotlib.pyplot as plt\n'), ((17627, 17672), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['Response_matrix[:, condit, 1, 2]'], {}), '(Response_matrix[:, condit, 1, 2])\n', (17638, 17672), True, 'import matplotlib.pyplot as plt\n'), ((17670, 17688), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 30)'], {}), '((-3, 30))\n', (17678, 17688), True, 'import matplotlib.pyplot as plt\n'), ((17688, 17704), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)'}), '(y=0)\n', (17699, 17704), True, 'import matplotlib.pyplot as plt\n'), ((17705, 17731), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Alpha']"], {}), "([1], ['Alpha'])\n", (17715, 17731), True, 'import matplotlib.pyplot as plt\n'), ((17731, 17756), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Oscillation"""'], {}), "('Oscillation')\n", (17741, 17756), True, 'import matplotlib.pyplot as plt\n'), ((17757, 17788), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Change (dB)"""'], {}), "('Power Change (dB)')\n", (17767, 17788), True, 'import matplotlib.pyplot as plt\n'), ((17789, 17819), 'matplotlib.pyplot.title', 'plt.title', (['"""Right LFP Channel"""'], {}), "('Right LFP Channel')\n", (17798, 17819), True, 'import matplotlib.pyplot as plt\n'), ((17934, 17944), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17942, 17944), True, 'import matplotlib.pyplot as plt\n'), ((18054, 18066), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18064, 18066), True, 'import matplotlib.pyplot as plt\n'), ((18072, 18088), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (18083, 18088), True, 'import matplotlib.pyplot as plt\n'), ((18544, 18560), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 7)'], {}), '((0, 7))\n', (18552, 18560), True, 'import matplotlib.pyplot as plt\n'), ((18613, 18631), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 40)'], {}), '((-3, 40))\n', (18621, 18631), True, 'import matplotlib.pyplot as plt\n'), ((18631, 18647), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)'}), '(y=0)\n', (18642, 18647), True, 'import matplotlib.pyplot as plt\n'), ((18707, 18738), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Change (dB)"""'], {}), "('Power Change (dB)')\n", (18717, 18738), True, 'import matplotlib.pyplot as plt\n'), ((18739, 18768), 'matplotlib.pyplot.title', 'plt.title', (['"""Left LFP Channel"""'], {}), "('Left LFP Channel')\n", (18748, 18768), True, 'import matplotlib.pyplot as plt\n'), ((18889, 18905), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (18900, 18905), True, 'import matplotlib.pyplot as plt\n'), ((19357, 19373), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 7)'], {}), '((0, 7))\n', (19365, 19373), True, 'import matplotlib.pyplot as plt\n'), ((19374, 19392), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 40)'], {}), '((-3, 40))\n', (19382, 19392), True, 'import matplotlib.pyplot as plt\n'), ((19392, 19408), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)'}), '(y=0)\n', (19403, 19408), True, 'import matplotlib.pyplot as plt\n'), ((19469, 19500), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Change (dB)"""'], {}), "('Power Change (dB)')\n", (19479, 19500), True, 'import matplotlib.pyplot as plt\n'), ((19501, 19531), 'matplotlib.pyplot.title', 'plt.title', (['"""Right LFP Channel"""'], {}), "('Right LFP Channel')\n", (19510, 19531), True, 'import matplotlib.pyplot as plt\n'), ((19695, 19705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19703, 19705), True, 'import matplotlib.pyplot as plt\n'), ((19800, 19879), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Response_matrix[:, condit, 0, 2]', 'Response_matrix[:, condit, 0, 3]'], {}), '(Response_matrix[:, condit, 0, 2], Response_matrix[:, condit, 0, 3])\n', (19811, 19879), True, 'import matplotlib.pyplot as plt\n'), ((27406, 27445), 'TimeSeries.import_BR', 'ts.import_BR', (['vsweep_fname'], {'snip': '(0, 0)'}), '(vsweep_fname, snip=(0, 0))\n', (27418, 27445), True, 'import TimeSeries as ts\n'), ((27767, 27789), 'numpy.zeros', 'np.zeros', (['(n_sweep, 2)'], {}), '((n_sweep, 2))\n', (27775, 27789), True, 'import numpy as np\n'), ((28094, 28135), 'numpy.linspace', 'np.linspace', (['(0)', 'tvect[-1]', 'tl_ip.shape[0]'], {}), '(0, tvect[-1], tl_ip.shape[0])\n', (28105, 28135), True, 'import numpy as np\n'), ((28134, 28146), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28144, 28146), True, 'import matplotlib.pyplot as plt\n'), ((28147, 28174), 'matplotlib.pyplot.plot', 'plt.plot', (['conv_tvect', 'tl_ip'], {}), '(conv_tvect, tl_ip)\n', (28155, 28174), True, 'import matplotlib.pyplot as plt\n'), ((28174, 28212), 'matplotlib.pyplot.legend', 'plt.legend', (["['Channel 0', 'Channel 1']"], {}), "(['Channel 0', 'Channel 1'])\n", (28184, 28212), True, 'import matplotlib.pyplot as plt\n'), ((28213, 28225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28223, 28225), True, 'import matplotlib.pyplot as plt\n'), ((28226, 28247), 'matplotlib.pyplot.plot', 'plt.plot', (['chirp_templ'], {}), '(chirp_templ)\n', (28234, 28247), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1270), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1268, 1270), True, 'import matplotlib.pyplot as plt\n'), ((1837, 1861), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (sec)"""'], {}), "('Time (sec)')\n", (1847, 1861), True, 'import matplotlib.pyplot as plt\n'), ((1866, 1894), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (1876, 1894), True, 'import matplotlib.pyplot as plt\n'), ((1899, 1909), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1907, 1909), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3012), 'scipy.signal.butter', 'sig.butter', (['(10)', '(30 / 422)'], {}), '(10, 30 / 422)\n', (2998, 3012), True, 'import scipy.signal as sig\n'), ((3019, 3044), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdYlBu"""'], {}), "('RdYlBu')\n", (3034, 3044), True, 'import matplotlib.pyplot as plt\n'), ((3062, 3079), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3073, 3079), False, 'from collections import defaultdict, OrderedDict\n'), ((4832, 4849), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (4843, 4849), False, 'from collections import defaultdict, OrderedDict\n'), ((5192, 5209), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5203, 5209), False, 'from collections import defaultdict, OrderedDict\n'), ((5579, 5596), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5590, 5596), False, 'from collections import defaultdict, OrderedDict\n'), ((12521, 12543), 'numpy.linalg.norm', 'np.linalg.norm', (['do_DSV'], {}), '(do_DSV)\n', (12535, 12543), True, 'import numpy as np\n'), ((16035, 16047), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16045, 16047), True, 'import matplotlib.pyplot as plt\n'), ((17181, 17197), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['pt'], {}), '(pt)\n', (17193, 17197), True, 'import matplotlib.pyplot as plt\n'), ((18271, 18310), 'matplotlib.pyplot.setp', 'plt.setp', (['bp[element]'], {'color': 'edge_color'}), '(bp[element], color=edge_color)\n', (18279, 18310), True, 'import matplotlib.pyplot as plt\n'), ((18503, 18542), 'matplotlib.pyplot.setp', 'plt.setp', (['bp[element]'], {'color': 'edge_color'}), '(bp[element], color=edge_color)\n', (18511, 18542), True, 'import matplotlib.pyplot as plt\n'), ((18659, 18685), 'numpy.linspace', 'np.linspace', (['(1.25)', '(6.25)', '(5)'], {}), '(1.25, 6.25, 5)\n', (18670, 18685), True, 'import numpy as np\n'), ((19088, 19127), 'matplotlib.pyplot.setp', 'plt.setp', (['bp[element]'], {'color': 'edge_color'}), '(bp[element], color=edge_color)\n', (19096, 19127), True, 'import matplotlib.pyplot as plt\n'), ((19316, 19355), 'matplotlib.pyplot.setp', 'plt.setp', (['bp[element]'], {'color': 'edge_color'}), '(bp[element], color=edge_color)\n', (19324, 19355), True, 'import matplotlib.pyplot as plt\n'), ((19420, 19446), 'numpy.linspace', 'np.linspace', (['(1.25)', '(6.25)', '(5)'], {}), '(1.25, 6.25, 5)\n', (19431, 19446), True, 'import numpy as np\n'), ((21811, 21823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21821, 21823), True, 'import matplotlib.pyplot as plt\n'), ((22043, 22076), 'matplotlib.pyplot.title', 'plt.title', (['"""Top three components"""'], {}), "('Top three components')\n", (22052, 22076), True, 'import matplotlib.pyplot as plt\n'), ((22678, 22700), 'numpy.zeros', 'np.zeros', (['(8, 8, 2, 6)'], {}), '((8, 8, 2, 6))\n', (22686, 22700), True, 'import numpy as np\n'), ((22711, 22730), 'numpy.zeros', 'np.zeros', (['(8, 2, 6)'], {}), '((8, 2, 6))\n', (22719, 22730), True, 'import numpy as np\n'), ((24171, 24183), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24181, 24183), True, 'import matplotlib.pyplot as plt\n'), ((24188, 24204), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (24199, 24204), True, 'import matplotlib.pyplot as plt\n'), ((24209, 24234), 'matplotlib.pyplot.plot', 'plt.plot', (['eivals[:, 0, :]'], {}), '(eivals[:, 0, :])\n', (24217, 24234), True, 'import matplotlib.pyplot as plt\n'), ((24237, 24294), 'matplotlib.pyplot.legend', 'plt.legend', (["['DBS901', '903', '905', '906', '907', '908']"], {}), "(['DBS901', '903', '905', '906', '907', '908'])\n", (24247, 24294), True, 'import matplotlib.pyplot as plt\n'), ((24299, 24315), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (24310, 24315), True, 'import matplotlib.pyplot as plt\n'), ((24389, 24420), 'matplotlib.pyplot.title', 'plt.title', (['"""Left Sided Vectors"""'], {}), "('Left Sided Vectors')\n", (24398, 24420), True, 'import matplotlib.pyplot as plt\n'), ((24425, 24439), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (24437, 24439), True, 'import matplotlib.pyplot as plt\n'), ((24827, 24837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24835, 24837), True, 'import matplotlib.pyplot as plt\n'), ((27656, 27676), 'numpy.mean', 'np.mean', (['chirp_templ'], {}), '(chirp_templ)\n', (27663, 27676), True, 'import numpy as np\n'), ((28007, 28036), 'numpy.dot', 'np.dot', (['chirp_templ', 'curr_sig'], {}), '(chirp_templ, curr_sig)\n', (28013, 28036), True, 'import numpy as np\n'), ((4739, 4817), 'numpy.logical_and', 'np.logical_and', (["(SGs[m]['F'] > band['range'][0])", "(SGs[m]['F'] < band['range'][1])"], {}), "(SGs[m]['F'] > band['range'][0], SGs[m]['F'] < band['range'][1])\n", (4753, 4817), True, 'import numpy as np\n'), ((5099, 5177), 'numpy.logical_and', 'np.logical_and', (["(SGs[m]['F'] > band['range'][0])", "(SGs[m]['F'] < band['range'][1])"], {}), "(SGs[m]['F'] > band['range'][0], SGs[m]['F'] < band['range'][1])\n", (5113, 5177), True, 'import numpy as np\n'), ((5646, 5663), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5657, 5663), False, 'from collections import defaultdict, OrderedDict\n'), ((12799, 12816), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (12810, 12816), False, 'from collections import defaultdict, OrderedDict\n'), ((16114, 16139), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(cc + 1)'], {}), '(2, 1, cc + 1)\n', (16125, 16139), True, 'import matplotlib.pyplot as plt\n'), ((16476, 16519), 'numpy.linspace', 'np.linspace', (['(2)', '(orig_len + 1)', '(10 * orig_len)'], {}), '(2, orig_len + 1, 10 * orig_len)\n', (16487, 16519), True, 'import numpy as np\n'), ((16536, 16582), 'numpy.concatenate', 'np.concatenate', (['(llfp[-3:-1], llfp, llfp[1:3])'], {}), '((llfp[-3:-1], llfp, llfp[1:3]))\n', (16550, 16582), True, 'import numpy as np\n'), ((16594, 16640), 'numpy.concatenate', 'np.concatenate', (['(rlfp[-3:-1], rlfp, rlfp[1:3])'], {}), '((rlfp[-3:-1], rlfp, rlfp[1:3]))\n', (16608, 16640), True, 'import numpy as np\n'), ((16651, 16673), 'numpy.arange', 'np.arange', (['li.shape[0]'], {}), '(li.shape[0])\n', (16660, 16673), True, 'import numpy as np\n'), ((16868, 16897), 'matplotlib.pyplot.plot', 'plt.plot', (['lii', 'rii'], {'alpha': '(0.2)'}), '(lii, rii, alpha=0.2)\n', (16876, 16897), True, 'import matplotlib.pyplot as plt\n'), ((17101, 17122), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.8, 0.8)'], {}), '((-0.8, 0.8))\n', (17109, 17122), True, 'import matplotlib.pyplot as plt\n'), ((17130, 17151), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.8, 0.8)'], {}), '((-0.8, 0.8))\n', (17138, 17151), True, 'import matplotlib.pyplot as plt\n'), ((17159, 17176), 'matplotlib.pyplot.title', 'plt.title', (['condit'], {}), '(condit)\n', (17168, 17176), True, 'import matplotlib.pyplot as plt\n'), ((18141, 18161), 'numpy.linspace', 'np.linspace', (['(1)', '(6)', '(5)'], {}), '(1, 6, 5)\n', (18152, 18161), True, 'import numpy as np\n'), ((18368, 18392), 'numpy.linspace', 'np.linspace', (['(1.5)', '(6.5)', '(5)'], {}), '(1.5, 6.5, 5)\n', (18379, 18392), True, 'import numpy as np\n'), ((18958, 18978), 'numpy.linspace', 'np.linspace', (['(1)', '(6)', '(5)'], {}), '(1, 6, 5)\n', (18969, 18978), True, 'import numpy as np\n'), ((19181, 19205), 'numpy.linspace', 'np.linspace', (['(1.5)', '(6.5)', '(5)'], {}), '(1.5, 6.5, 5)\n', (19192, 19205), True, 'import numpy as np\n'), ((22817, 22829), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22827, 22829), True, 'import matplotlib.pyplot as plt\n'), ((23050, 23102), 'matplotlib.pyplot.title', 'plt.title', (['"""OnTarget Representation in OnTarget PCs"""'], {}), "('OnTarget Representation in OnTarget PCs')\n", (23059, 23102), True, 'import matplotlib.pyplot as plt\n'), ((23380, 23433), 'matplotlib.pyplot.title', 'plt.title', (['"""OffTarget Representation in OnTarget PCs"""'], {}), "('OffTarget Representation in OnTarget PCs')\n", (23389, 23433), True, 'import matplotlib.pyplot as plt\n'), ((23732, 23757), 'matplotlib.pyplot.title', 'plt.title', (['"""OnTarget PCs"""'], {}), "('OnTarget PCs')\n", (23741, 23757), True, 'import matplotlib.pyplot as plt\n'), ((24055, 24081), 'matplotlib.pyplot.title', 'plt.title', (['"""OffTarget PCs"""'], {}), "('OffTarget PCs')\n", (24064, 24081), True, 'import matplotlib.pyplot as plt\n'), ((24099, 24157), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Patient ' + pt + ' PCA Decomp of LFP Bands')"], {}), "('Patient ' + pt + ' PCA Decomp of LFP Bands')\n", (24111, 24157), True, 'import matplotlib.pyplot as plt\n'), ((25927, 25940), 'matplotlib.pyplot.title', 'plt.title', (['pt'], {}), '(pt)\n', (25936, 25940), True, 'import matplotlib.pyplot as plt\n'), ((27506, 27518), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (27515, 27518), True, 'import numpy as np\n'), ((27803, 27816), 'numpy.abs', 'np.abs', (['vsraw'], {}), '(vsraw)\n', (27809, 27816), True, 'import numpy as np\n'), ((27955, 27990), 'numpy.mean', 'np.mean', (['vsraw[tlag:tlag + n_templ]'], {}), '(vsraw[tlag:tlag + n_templ])\n', (27962, 27990), True, 'import numpy as np\n'), ((4957, 5010), 'numpy.mean', 'np.mean', (["SGs[m][p][condit][cc]['SG'][band_vect, :]", '(0)'], {}), "(SGs[m][p][condit][cc]['SG'][band_vect, :], 0)\n", (4964, 5010), True, 'import numpy as np\n'), ((5317, 5370), 'numpy.mean', 'np.mean', (["SGs[m][p][condit][cc]['SG'][band_vect, :]", '(0)'], {}), "(SGs[m][p][condit][cc]['SG'][band_vect, :], 0)\n", (5324, 5370), True, 'import numpy as np\n'), ((5726, 5743), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5737, 5743), False, 'from collections import defaultdict, OrderedDict\n'), ((12921, 12984), 'TimeSeries.import_BR', 'ts.import_BR', (["Ephys[modal][pt][condit]['Filename']"], {'snip': '(0, 0)'}), "(Ephys[modal][pt][condit]['Filename'], snip=(0, 0))\n", (12933, 12984), True, 'import TimeSeries as ts\n'), ((13805, 13849), 'numpy.zeros', 'np.zeros', (["(BANDS[0]['Alpha'].shape[0], 2, 5)"], {}), "((BANDS[0]['Alpha'].shape[0], 2, 5))\n", (13813, 13849), True, 'import numpy as np\n'), ((13901, 13911), 'DBSpace.nestdict', 'nestdict', ([], {}), '()\n', (13909, 13911), False, 'from DBSpace import nestdict\n'), ((13956, 14000), 'numpy.zeros', 'np.zeros', (["(BANDS[0]['Alpha'].shape[0], 2, 1)"], {}), "((BANDS[0]['Alpha'].shape[0], 2, 1))\n", (13964, 14000), True, 'import numpy as np\n'), ((16697, 16726), 'scipy.interpolate.interp1d', 'interp1d', (['t', 'li'], {'kind': '"""cubic"""'}), "(t, li, kind='cubic')\n", (16705, 16726), False, 'from scipy.interpolate import interp1d\n'), ((16743, 16772), 'scipy.interpolate.interp1d', 'interp1d', (['t', 'ri'], {'kind': '"""cubic"""'}), "(t, ri, kind='cubic')\n", (16751, 16772), False, 'from scipy.interpolate import interp1d\n'), ((24331, 24356), 'numpy.mean', 'np.mean', (['big_comp_matr', '(3)'], {}), '(big_comp_matr, 3)\n', (24338, 24356), True, 'import numpy as np\n'), ((1360, 1372), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1370, 1372), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1710), 'matplotlib.pyplot.title', 'plt.title', (["(m + ' ' + p + ' ' + condit)"], {}), "(m + ' ' + p + ' ' + condit)\n", (1682, 1710), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2056), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2054, 2056), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2213), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2206, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2721, 2723), True, 'import matplotlib.pyplot as plt\n'), ((2740, 2756), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (2751, 2756), True, 'import matplotlib.pyplot as plt\n'), ((2773, 2834), 'matplotlib.pyplot.plot', 'plt.plot', (["SGs[m][p][condit]['TRaw']", "SGs[m][p][condit]['Raw']"], {}), "(SGs[m][p][condit]['TRaw'], SGs[m][p][condit]['Raw'])\n", (2781, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2883), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (2878, 2883), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3297), 'numpy.logical_and', 'np.logical_and', (['(tvec > tpts[0])', '(tvec < tpts[1])'], {}), '(tvec > tpts[0], tvec < tpts[1])\n', (3265, 3297), True, 'import numpy as np\n'), ((15500, 15517), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (15511, 15517), False, 'from collections import defaultdict, OrderedDict\n'), ((15571, 15588), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (15582, 15588), False, 'from collections import defaultdict, OrderedDict\n'), ((16303, 16352), 'scipy.stats.zscore', 'stats.zscore', (["SGs[modal][pt][condit]['DSV'][:, 0]"], {}), "(SGs[modal][pt][condit]['DSV'][:, 0])\n", (16315, 16352), True, 'import scipy.stats as stats\n'), ((16377, 16426), 'scipy.stats.zscore', 'stats.zscore', (["SGs[modal][pt][condit]['DSV'][:, 1]"], {}), "(SGs[modal][pt][condit]['DSV'][:, 1])\n", (16389, 16426), True, 'import scipy.stats as stats\n'), ((16816, 16848), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'llfp.shape[0]'], {}), '(0, 1, llfp.shape[0])\n', (16827, 16848), True, 'import numpy as np\n'), ((20184, 20189), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (20187, 20189), False, 'from sklearn.decomposition import PCA\n'), ((25247, 25259), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25257, 25259), True, 'import matplotlib.pyplot as plt\n'), ((25276, 25292), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (25287, 25292), True, 'import matplotlib.pyplot as plt\n'), ((25309, 25380), 'matplotlib.pyplot.plot', 'plt.plot', (["SGs['LFP'][pt][condit]['TRaw']", "SGs['LFP'][pt][condit]['Raw']"], {}), "(SGs['LFP'][pt][condit]['TRaw'], SGs['LFP'][pt][condit]['Raw'])\n", (25317, 25380), True, 'import matplotlib.pyplot as plt\n'), ((25470, 25490), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(570, 670)'], {}), '((570, 670))\n', (25478, 25490), True, 'import matplotlib.pyplot as plt\n'), ((25523, 25547), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (sec)"""'], {}), "('Time (sec)')\n", (25533, 25547), True, 'import matplotlib.pyplot as plt\n'), ((25564, 25592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude (uV)"""'], {}), "('Amplitude (uV)')\n", (25574, 25592), True, 'import matplotlib.pyplot as plt\n'), ((25626, 25642), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (25637, 25642), True, 'import matplotlib.pyplot as plt\n'), ((25886, 25901), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (25894, 25901), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1494), 'numpy.logical_and', 'np.logical_and', (["(SGs[m][p][condit]['T'] > tpts[0])", "(SGs[m][p][condit]['T'] < tpts[1])"], {}), "(SGs[m][p][condit]['T'] > tpts[0], SGs[m][p][condit]['T'] <\n tpts[1])\n", (1422, 1494), True, 'import numpy as np\n'), ((3352, 3409), 'scipy.signal.filtfilt', 'sig.filtfilt', (['b', 'a', "SGs[m][p][condit]['Raw'][sel_tvec, 0]"], {}), "(b, a, SGs[m][p][condit]['Raw'][sel_tvec, 0])\n", (3364, 3409), True, 'import scipy.signal as sig\n'), ((3446, 3503), 'scipy.signal.filtfilt', 'sig.filtfilt', (['b', 'a', "SGs[m][p][condit]['Raw'][sel_tvec, 1]"], {}), "(b, a, SGs[m][p][condit]['Raw'][sel_tvec, 1])\n", (3458, 3503), True, 'import scipy.signal as sig\n'), ((3843, 3855), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3853, 3855), True, 'import matplotlib.pyplot as plt\n'), ((4033, 4049), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (4044, 4049), True, 'import matplotlib.pyplot as plt\n'), ((4106, 4138), 'matplotlib.pyplot.plot', 'plt.plot', (['tvec[sel_tvec]', 'Lchann'], {}), '(tvec[sel_tvec], Lchann)\n', (4114, 4138), True, 'import matplotlib.pyplot as plt\n'), ((4158, 4190), 'matplotlib.pyplot.plot', 'plt.plot', (['tvec[sel_tvec]', 'Rchann'], {}), '(tvec[sel_tvec], Rchann)\n', (4166, 4190), True, 'import matplotlib.pyplot as plt\n'), ((4231, 4247), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (4242, 4247), True, 'import matplotlib.pyplot as plt\n'), ((4362, 4379), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5, 5)'], {}), '((-5, 5))\n', (4370, 4379), True, 'import matplotlib.pyplot as plt\n'), ((4399, 4416), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5, 5)'], {}), '((-5, 5))\n', (4407, 4416), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4463), 'matplotlib.pyplot.title', 'plt.title', (['"""Phase Portrait"""'], {}), "('Phase Portrait')\n", (4445, 4463), True, 'import matplotlib.pyplot as plt\n'), ((14801, 14872), 'numpy.dot', 'np.dot', (["SGs[modal][pt][condit]['BandMatrix'][seg, cc, :]", 'do_DSV[cc, :]'], {}), "(SGs[modal][pt][condit]['BandMatrix'][seg, cc, :], do_DSV[cc, :])\n", (14807, 14872), True, 'import numpy as np\n'), ((15708, 15779), 'numpy.mean', 'np.mean', (["SGs[modal][pt][condit]['BandSegments']['Bilat'][0, :, :, :]", '(0)'], {}), "(SGs[modal][pt][condit]['BandSegments']['Bilat'][0, :, :, :], 0)\n", (15715, 15779), True, 'import numpy as np\n'), ((15792, 15866), 'numpy.mean', 'np.mean', (["SGs[modal][pt][condit]['BandSegments']['PreBilat'][0, :, :, :]", '(0)'], {}), "(SGs[modal][pt][condit]['BandSegments']['PreBilat'][0, :, :, :], 0)\n", (15799, 15866), True, 'import numpy as np\n'), ((20254, 20313), 'numpy.squeeze', 'np.squeeze', (["SGs[modal][pt][condit]['BandSegments']['Bilat']"], {}), "(SGs[modal][pt][condit]['BandSegments']['Bilat'])\n", (20264, 20313), True, 'import numpy as np\n'), ((20370, 20429), 'numpy.squeeze', 'np.squeeze', (["SGs[modal][pt][condit]['BandSegments']['Bilat']"], {}), "(SGs[modal][pt][condit]['BandSegments']['Bilat'])\n", (20380, 20429), True, 'import numpy as np\n'), ((20432, 20494), 'numpy.squeeze', 'np.squeeze', (["SGs[modal][pt][condit]['BandSegments']['PreBilat']"], {}), "(SGs[modal][pt][condit]['BandSegments']['PreBilat'])\n", (20442, 20494), True, 'import numpy as np\n'), ((20732, 20787), 'numpy.squeeze', 'np.squeeze', (["SGs[modal][pt][condit]['BandSegments'][seg]"], {}), "(SGs[modal][pt][condit]['BandSegments'][seg])\n", (20742, 20787), True, 'import numpy as np\n'), ((1605, 1654), 'numpy.abs', 'np.abs', (["SGs[m][p][condit]['SG'][chann][:, t_idxs]"], {}), "(SGs[m][p][condit]['SG'][chann][:, t_idxs])\n", (1611, 1654), True, 'import numpy as np\n'), ((15198, 15303), 'numpy.logical_and', 'np.logical_and', (["(SGs[modal][pt][condit]['T'] >= tbounds[0])", "(SGs[modal][pt][condit]['T'] <= tbounds[1])"], {}), "(SGs[modal][pt][condit]['T'] >= tbounds[0], SGs[modal][pt][\n condit]['T'] <= tbounds[1])\n", (15212, 15303), True, 'import numpy as np\n'), ((2124, 2179), 'numpy.median', 'np.median', (["SGs[m][p][condit]['SG'][chann][:, :]"], {'axis': '(1)'}), "(SGs[m][p][condit]['SG'][chann][:, :], axis=1)\n", (2133, 2179), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import logging, uuid
__version__ = "0.1.0"
from d3m.container.dataset import D3MDatasetLoader, Dataset
from d3m.metadata import base as metadata_base, problem
from d3m.metadata.base import Metadata
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
import problem_pb2 as problem_pb2
import os, json
import pickle
import solutiondescription
import pandas as pd
import numpy as np
def load_problem_doc(problem_doc_uri: str):
"""
Load problem_doc from problem_doc_uri
Parameters ---------
problem_doc_uri Uri where the problemDoc.json is located
"""
with open(problem_doc_uri) as file:
problem_doc = json.load(file)
problem_doc_metadata = Metadata(problem_doc)
return problem_doc_metadata
def add_target_columns_metadata(dataset: 'Dataset', problem_doc: 'Metadata'):
for data in problem_doc['inputs']:
targets = data['targets']
for target in targets:
semantic_types = list(dataset.metadata.query((target['resource_id'], metadata_base.ALL_ELEMENTS, target['column_index'])).get('semantic_types', []))
if 'https://metadata.datadrivendiscovery.org/types/Target' not in semantic_types:
semantic_types.append('https://metadata.datadrivendiscovery.org/types/Target')
dataset.metadata = dataset.metadata.update((target['resource_id'], metadata_base.ALL_ELEMENTS, target['column_index']), {'semantic_types': semantic_types})
if 'https://metadata.datadrivendiscovery.org/types/TrueTarget' not in semantic_types:
semantic_types.append('https://metadata.datadrivendiscovery.org/types/TrueTarget')
dataset.metadata = dataset.metadata.update((target['resource_id'], metadata_base.ALL_ELEMENTS, target['column_index']), {'semantic_types': semantic_types})
dataset.metadata = dataset.metadata.remove_semantic_type((target['resource_id'],
metadata_base.ALL_ELEMENTS, target['column_index']),'https://metadata.datadrivendiscovery.org/types/Attribute',)
return dataset
def add_privileged_columns_metadata(dataset: 'Dataset', problem_doc: 'Metadata'):
for privileged_data in problem_doc.get('inputs')[0].get('privileged_data', []):
dataset.metadata = dataset.metadata.add_semantic_type((privileged_data['resource_id'], metadata_base.ALL_ELEMENTS, privileged_data['column_index']),'https://metadata.datadrivendiscovery.org/types/PrivilegedData',)
return dataset
def add_target_metadata(dataset, targets):
for target in targets:
semantic_types = list(dataset.metadata.query((target.resource_id, metadata_base.ALL_ELEMENTS, target.column_index)).get('semantic_types', []))
if 'https://metadata.datadrivendiscovery.org/types/Target' not in semantic_types:
semantic_types.append('https://metadata.datadrivendiscovery.org/types/Target')
dataset.metadata = dataset.metadata.update((target.resource_id, metadata_base.ALL_ELEMENTS, target.column_index), {'semantic_types': semantic_types})
if 'https://metadata.datadrivendiscovery.org/types/TrueTarget' not in semantic_types:
semantic_types.append('https://metadata.datadrivendiscovery.org/types/TrueTarget')
dataset.metadata = dataset.metadata.update((target.resource_id, metadata_base.ALL_ELEMENTS, target.column_index), {'semantic_types': semantic_types})
dataset.metadata = dataset.metadata.remove_semantic_type((target.resource_id,
metadata_base.ALL_ELEMENTS, target.column_index),'https://metadata.datadrivendiscovery.org/types/Attribute',)
return dataset
def add_privileged_metadata(dataset: 'Dataset', privileged_data):
for data in privileged_data:
dataset.metadata = dataset.metadata.add_semantic_type((data.resource_id, metadata_base.ALL_ELEMENTS, data.column_index),'https://metadata.datadrivendiscovery.org/types/PrivilegedData',)
return dataset
def get_task(names):
tasks = ['SEMISUPERVISED', 'OBJECTDETECTION', 'FORECASTING', 'GRAPHMATCHING', 'VERTEXNOMINATION', 'VERTEXCLASSIFICATION', 'COMMUNITYDETECTION', 'LINKPREDICTION', 'COLLABORATIVEFILTERING', 'CLUSTERING', 'CLASSIFICATION', 'REGRESSION']
for t in tasks:
if t in names:
if t == 'LINKPREDICTION':
if 'TIMESERIES' in names:
return 'LINKPREDICTIONTIMESERIES'
return t
return None
def get_task_name(keywords):
names = get_task_list(keywords)
return get_task(names)
def get_task_list(keywords):
names = []
for k in keywords:
name = k.upper()
names.append(name)
return names
def load_data_problem(inputdir, problempath):
print("Reading ", inputdir)
print("Reading ", problempath)
with open(problempath) as file:
problem_schema = json.load(file)
#filename = "scores.csv"
#with open(filename, "a") as g:
# g.write(inputdir + "\n")
datasetId = problempath[:-29]
dataset_schema = datasetId + "dataset_TRAIN/datasetDoc.json"
problem_doc_metadata = Metadata(problem_schema)
dataset_uri = 'file://{dataset_uri}'.format(dataset_uri=dataset_schema)
dataset = D3MDatasetLoader().load(dataset_uri)
problem_description = problem.parse_problem_description(problempath)
dataset = add_target_columns_metadata(dataset, problem_description)
dataset = add_privileged_columns_metadata(dataset, problem_description)
taskname = get_task_name(problem_doc_metadata.query(())['about']['taskKeywords'])
metric = problem_doc_metadata.query(())['inputs']['performanceMetrics'][0]['metric']
posLabel = None
if metric == "f1":
posLabel = problem_doc_metadata.query(())['inputs']['performanceMetrics'][0]['posLabel']
# Read the data augmentation
keywords = getAugmentation_keywords(problem_doc_metadata)
return (dataset, taskname, problem_description, metric, posLabel, keywords)
def getAugmentation_keywords(problem_doc_metadata):
keywords = None
if "dataAugmentation" in problem_doc_metadata.query(()):
keywords = problem_doc_metadata.query(())["dataAugmentation"]
return keywords
def get_pipeline(dirname, pipeline_name):
newdirname = dirname + "/" + pipeline_name
filename = pipeline_name.split("_")[0]
f = newdirname + "/" + filename + ".dump"
solution = pickle.load(open(f, 'rb'))
return solution
def write_solution(solution, dirname):
rank = str(solution.rank)
supporting_dirname = dirname + "/" + solution.id + "_" + rank
if not os.path.exists(supporting_dirname):
os.makedirs(supporting_dirname)
output = open(supporting_dirname+"/"+solution.id+".dump", "wb")
pickle.dump(solution, output)
output.close()
def initialize_for_search(outputDir):
dirNames = [outputDir+"/executables", outputDir+"/predictions", outputDir+"/pipelines_searched", outputDir+"/pipelines_scored", outputDir+"/pipelines_ranked", outputDir+"/pipeline_runs", outputDir+"/subpipelines", outputDir+"/additional_inputs"]
for name in dirNames:
if not os.path.exists(name):
os.makedirs(name)
def write_predictions(predictions, dirname, request_id):
directory = dirname + "/" + str(request_id)
if not os.path.exists(directory):
os.makedirs(directory)
outputFilePath = directory + "/predictions.csv"
with open(outputFilePath, 'w') as outputFile:
predictions.to_csv(outputFile, header=True, index=False)
return outputFilePath
def write_pipeline_json(solution, primitives, solution_dict, dirName, subpipeline_dirName, rank=None):
solution.write_pipeline_json(primitives, solution_dict, dirName, subpipeline_dirName, rank)
def write_rank_file(solution, rank, dirName):
outputFilePath = dirName + "/" + solution.id + ".rank"
with open(outputFilePath, 'w') as outputFile:
outputFile.write(str(rank))
def write_pipeline_yaml(solution, dirname, dataset, problem_description):
run_id = str(uuid.uuid4())
filename = dirname + "/" + run_id + ".yaml"
solution.write_pipeline_run(problem_description, dataset, filename)
def write_pipeline_executable(solution, dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
shell_script = '#!/bin/bash\n python ./src/main.py test\n'
filename = dirname + "/" + solution.id + "_" + str(solution.rank) + ".sh"
with open(filename, 'w') as f:
f.write(shell_script)
os.chmod(filename, 0o755)
def invert_metric(metric_type):
min_metrics = set()
min_metrics.add("MEAN_SQUARED_ERROR")
min_metrics.add("ROOT_MEAN_SQUARED_ERROR")
min_metrics.add("MEAN_ABSOLUTE_ERROR")
min_metrics.add("LOSS")
min_metrics.add("HAMMING_LOSS")
if metric_type in min_metrics:
return True
return False
def get_distil_metric_name(metric_type):
metric = 'accuracy'
if metric_type == "MEAN_SQUARED_ERROR" or metric_type == "ROOT_MEAN_SQUARED_ERROR" or metric_type == "MEAN_ABSOLUTE_ERROR":
metric = 'meanSquaredError'
elif metric_type == "MEAN_ABSOLUTE_ERROR":
metric = 'meanAbsoluteError'
elif metric_type == "ACCURACY":
metric = 'accuracy'
elif metric_type == "F1_MACRO" or metric_type == "F1_MICRO" or metric_type == "F1":
metric_type == 'f1Macro'
return metric
def search_all_related(dataset, keywords, min_size = 5):
"""
Search datasets related to the dataset
Arguments:
dataset {Dataset description} -- Dataset description
keywords {Dict of DataAugmentation} -- Contains keys "domain" and "keywords"
Returns:
datasets to use for augmentation
"""
# Do the import inside this function so that the exception can be caught if
# the import fails.
import datamart, datamart_nyu
# Create client
client = datamart_nyu.RESTDatamart(os.environ['DATAMART_URL_NYU']) #'https://datamart.d3m.vida-nyu.org')
# Function for search a list of keywords
def search(data, keywords):
"""
Search the datasets linked to the given keywords
"""
query = datamart.DatamartQuery(
keywords = keywords
)
cursor = client.search_with_data(
query = query,
supplied_data = data,
)
return cursor.get_next_page()
# Aggregate search words
datasets = []
for k in keywords:
try:
key_res = search(dataset, [l for l in k.keywords])
if key_res:
datasets.extend(key_res)
except Exception as e:
print("Search - Datamart crashed ...", e)
if len(datasets) <= min_size:
search_data = search(dataset, None)
if search_data:
datasets.extend(search_data)
if len(datasets) == 0:
raise Exception("No interesting datasets to use")
# Limit size
datasets = [d for d in datasets if len(d.get_json_metadata()['metadata']['columns']) < 20]
# Delete duplicates
_, indices = np.unique([d.get_json_metadata()['metadata']['name'] for d in datasets], return_index=True)
datasets = np.array(datasets)[indices].tolist()
# Sort by reverse score
datasets = sorted(datasets, key = lambda d: d.get_json_metadata()['score'], reverse = True)
return datasets
| [
"pickle.dump",
"os.chmod",
"json.load",
"os.makedirs",
"uuid.uuid4",
"d3m.metadata.problem.parse_problem_description",
"os.path.exists",
"datamart_nyu.RESTDatamart",
"d3m.metadata.base.Metadata",
"d3m.container.dataset.D3MDatasetLoader",
"numpy.array",
"datamart.DatamartQuery"
] | [((751, 772), 'd3m.metadata.base.Metadata', 'Metadata', (['problem_doc'], {}), '(problem_doc)\n', (759, 772), False, 'from d3m.metadata.base import Metadata\n'), ((5116, 5140), 'd3m.metadata.base.Metadata', 'Metadata', (['problem_schema'], {}), '(problem_schema)\n', (5124, 5140), False, 'from d3m.metadata.base import Metadata\n'), ((5295, 5341), 'd3m.metadata.problem.parse_problem_description', 'problem.parse_problem_description', (['problempath'], {}), '(problempath)\n', (5328, 5341), False, 'from d3m.metadata import base as metadata_base, problem\n'), ((6746, 6775), 'pickle.dump', 'pickle.dump', (['solution', 'output'], {}), '(solution, output)\n', (6757, 6775), False, 'import pickle\n'), ((8502, 8525), 'os.chmod', 'os.chmod', (['filename', '(493)'], {}), '(filename, 493)\n', (8510, 8525), False, 'import os, json\n'), ((9913, 9970), 'datamart_nyu.RESTDatamart', 'datamart_nyu.RESTDatamart', (["os.environ['DATAMART_URL_NYU']"], {}), "(os.environ['DATAMART_URL_NYU'])\n", (9938, 9970), False, 'import datamart, datamart_nyu\n'), ((708, 723), 'json.load', 'json.load', (['file'], {}), '(file)\n', (717, 723), False, 'import os, json\n'), ((4872, 4887), 'json.load', 'json.load', (['file'], {}), '(file)\n', (4881, 4887), False, 'import os, json\n'), ((6598, 6632), 'os.path.exists', 'os.path.exists', (['supporting_dirname'], {}), '(supporting_dirname)\n', (6612, 6632), False, 'import os, json\n'), ((6642, 6673), 'os.makedirs', 'os.makedirs', (['supporting_dirname'], {}), '(supporting_dirname)\n', (6653, 6673), False, 'import os, json\n'), ((7298, 7323), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (7312, 7323), False, 'import os, json\n'), ((7333, 7355), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (7344, 7355), False, 'import os, json\n'), ((8042, 8054), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8052, 8054), False, 'import logging, uuid\n'), ((8238, 8261), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (8252, 8261), False, 'import os, json\n'), ((8271, 8291), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (8282, 8291), False, 'import os, json\n'), ((10188, 10229), 'datamart.DatamartQuery', 'datamart.DatamartQuery', ([], {'keywords': 'keywords'}), '(keywords=keywords)\n', (10210, 10229), False, 'import datamart, datamart_nyu\n'), ((5231, 5249), 'd3m.container.dataset.D3MDatasetLoader', 'D3MDatasetLoader', ([], {}), '()\n', (5247, 5249), False, 'from d3m.container.dataset import D3MDatasetLoader, Dataset\n'), ((7130, 7150), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (7144, 7150), False, 'import os, json\n'), ((7163, 7180), 'os.makedirs', 'os.makedirs', (['name'], {}), '(name)\n', (7174, 7180), False, 'import os, json\n'), ((11204, 11222), 'numpy.array', 'np.array', (['datasets'], {}), '(datasets)\n', (11212, 11222), True, 'import numpy as np\n')] |
"""
This module is for normalization and data clipping as we described in Methods section.
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from collections import Counter
def random_partition(train_df, test_df, n_genes=978, annotation_col='nc_label', seed=0, validation_ratio=0.3):
"""
The factory function for dataset prepartion.
"""
train, test = normalize_train_and_test(train_df, test_df, annot_col=annotation_col, n_genes=n_genes)
print("[Preprocessing] Creating validation set...")
np.random.seed(seed)
n_experiment_samples = train['value'].shape[0]
valid_pos = np.random.choice(range(n_experiment_samples), int(validation_ratio * n_experiment_samples),
replace=False)
train_pos = list(set(range(n_experiment_samples)) - set(valid_pos))
valid = {'value': train['value'][valid_pos], 'full_label': train['full_label'].iloc[valid_pos],
'class_annot': train['class_annot'][valid_pos],
'p_sampler': train['p_sampler'][valid_pos]/sum(train['p_sampler'][valid_pos])}
train = {'value': train['value'][train_pos], 'full_label': train['full_label'].iloc[train_pos],
'class_annot': train['class_annot'][train_pos],
'p_sampler': train['p_sampler'][train_pos]/sum(train['p_sampler'][train_pos])}
return train, valid, test, valid_pos
def normalize_train_and_test(train_df, test_df, annot_col='class label', n_genes=978):
"""
The main function for normalization, using preprocess_df()
Args
:param train_df: (pandas.DataFrame) training data from disk
:param test_df: (pandas.DataFrame) test data frame from disk
:param annot_col: (str) which column to be used as classification label
:param n_genes: (int) the length of feature vectors (e.g. for L1000 genes: 978)
Returns:
train, test (dict, dict): processed dataset instances, each has:
'value': (pandas.DataFrame) data matrix
'full_label': (pandas.DataFrame) all the metadata information including classification annotations
'class_annot': (numpy.ndarray) classification annotations
'p_sampler': (numpy.ndarray) balanced sampling probability for each class
"""
train = preprocess_df(train_df, annot_col=annot_col, task='train_set', n_genes=n_genes)
test = preprocess_df(test_df, annot_col=annot_col, task='test_set', normalize_on=train_df, n_genes=n_genes)
return train, test
def preprocess_df(df, annot_col='class label', task='untitled_set', normalize_on=None, n_genes=978):
"""
The core function for data , using preprocess_df()
Args
:param df: (pandas.DataFrame) dataframe for preprocessing
:param task: Label for rach dataset instances
:param normalize_on: (pandas.DataFrame) Another dataframe for preprocessing used as reference,
if None use itself
:param n_genes: (int) the length of feature vectors (e.g. for L1000 genes: 978)
"""
print("[Preprocessing] Processing dataset {}...".format(task.upper()))
assert df.shape[1] > n_genes # L1000 genes
data = df.values[:, -n_genes:].astype('float')
labels = df.iloc[:, :-n_genes]
label_to_classify = df[annot_col].values.flatten().astype('int')
n_class = np.max(label_to_classify) + 1
label_to_classify = np.eye(n_class, dtype=np.float32)[label_to_classify]
if data.max() == 1 and data.min() == -1:
print("[Preprocessing] Input data {} detected has a range of (-1, 1), skipping data preprocessing..."
.format(task.upper()))
elif normalize_on is not None:
print("[Preprocessing] Scalers are detected for input data {} skipping data preprocessing..."
.format(task.upper()))
data = rescale_and_clip(data, scale_on=normalize_on.iloc[:, -n_genes:])
else:
data = rescale_and_clip(data)
p_class = 1./np.mean(label_to_classify, axis=0)
p_sampler = np.sum(label_to_classify * p_class, axis=1) / np.sum(label_to_classify * p_class)
return {'value': data, 'full_label': labels, 'class_annot': label_to_classify, 'p_sampler': p_sampler}
def assert_scale(x):
return x.mean() < 1e-10 and x.std() < 1e-10
def rescale_and_clip(data, scale_on=None):
if np.all([assert_scale(row) for row in data]) and np.all([assert_scale(col) for col in data.transpose()]):
print("[Preprocessing] Input data detected has mean=0 and sd=1, skipping data scaling...")
else:
print("[Preprocessing] Scaling...")
if scale_on is None:
scale_on = data
scaler = StandardScaler() # rescale each gene
scaler.fit(scale_on)
data = scaler.transform(data)
scaler = StandardScaler() # rescale each cell
data = np.transpose(scaler.fit_transform(np.transpose(data)))
print("[Preprocessing] Clipping...")
clipping_thre = 1.
data = np.clip(data, -clipping_thre, clipping_thre)/clipping_thre
print("[Preprocessing] Dataset is ready for training DeepD...")
return data
| [
"numpy.random.seed",
"numpy.sum",
"sklearn.preprocessing.StandardScaler",
"numpy.transpose",
"numpy.clip",
"numpy.max",
"numpy.mean",
"numpy.eye"
] | [((562, 582), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (576, 582), True, 'import numpy as np\n'), ((3381, 3406), 'numpy.max', 'np.max', (['label_to_classify'], {}), '(label_to_classify)\n', (3387, 3406), True, 'import numpy as np\n'), ((3435, 3468), 'numpy.eye', 'np.eye', (['n_class'], {'dtype': 'np.float32'}), '(n_class, dtype=np.float32)\n', (3441, 3468), True, 'import numpy as np\n'), ((4001, 4035), 'numpy.mean', 'np.mean', (['label_to_classify'], {'axis': '(0)'}), '(label_to_classify, axis=0)\n', (4008, 4035), True, 'import numpy as np\n'), ((4052, 4095), 'numpy.sum', 'np.sum', (['(label_to_classify * p_class)'], {'axis': '(1)'}), '(label_to_classify * p_class, axis=1)\n', (4058, 4095), True, 'import numpy as np\n'), ((4098, 4133), 'numpy.sum', 'np.sum', (['(label_to_classify * p_class)'], {}), '(label_to_classify * p_class)\n', (4104, 4133), True, 'import numpy as np\n'), ((4697, 4713), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4711, 4713), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4819, 4835), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4833, 4835), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5003, 5047), 'numpy.clip', 'np.clip', (['data', '(-clipping_thre)', 'clipping_thre'], {}), '(data, -clipping_thre, clipping_thre)\n', (5010, 5047), True, 'import numpy as np\n'), ((4906, 4924), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (4918, 4924), True, 'import numpy as np\n')] |
import numpy as np
class Individual:
def __init__(self, bounds):
"""
Class containing information about a population member.
Parameters
----------
bounds : dict
Parameter names mapped to upper / lower bounds.
Attributes
----------
_pnames : list
Names assigned to the input bounds.
position : np.ndarray
Current position of the individual.
fitness : float
Fitness evaluation for the associated position.
"""
if not isinstance(bounds, dict):
raise TypeError('bounds must be dict.')
self._pnames = list(bounds.keys())
_bounds = np.asarray(list(bounds.values()), dtype=np.float64)
self.lb = _bounds[:, 0]
self.ub = _bounds[:, 1]
self.position = np.random.uniform(self.lb, self.ub)
self.fitness = None
def __str__(self):
message = 'Position = {\n'
for k, v in zip(self._pnames, self.position):
message += f'\t{k:<15}{v}\n'
message += '}\n'
message += f'\nFitness = {self.fitness}'
return message
| [
"numpy.random.uniform"
] | [((852, 887), 'numpy.random.uniform', 'np.random.uniform', (['self.lb', 'self.ub'], {}), '(self.lb, self.ub)\n', (869, 887), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from collections.abc import MutableSequence
import numpy as np
from .dependent_variable import DependentVariable
from .dimension import Dimension
from .dimension import LabeledDimension
from .dimension import LinearDimension
from .dimension import MonotonicDimension
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ["DimensionList", "DependentVariableList"]
__dimensions_list__ = (Dimension, LinearDimension, MonotonicDimension, LabeledDimension)
class AbstractList(MutableSequence):
def __init__(self, data=[]):
super().__init__()
self._list = list(data)
def __repr__(self):
"""String representation"""
return self._list.__repr__()
def __str__(self):
"""String representation"""
string = ",\n".join([item.__repr__() for item in self._list])
return f"[{string}]"
def __len__(self):
"""List length"""
return len(self._list)
def __getitem__(self, index):
"""Get a list item"""
return self._list[index]
def __delitem__(self, index):
raise LookupError("Deleting items is not allowed.")
def check_object(self, *args):
pass
def insert(self, index: int, item: object):
"""Insert a list item"""
item = self.check_object(item)
self._list.insert(index, item)
def append(self, item):
"""Append a list item"""
item = self.check_object(item)
self._list.append(item)
def __setitem__(self, index, item):
"""Set item at index"""
item = self.check_object(item)
# if self._list[index].count != item.count:
# raise IndexError("Index out of range")
self._list[index] = item
def __eq__(self, other):
"""Check equality of DependentVariableList."""
if not isinstance(other, self.__class__):
return False
if len(self._list) != len(other._list):
return False
check = [self_i == other_i for self_i, other_i in zip(self._list, other._list)]
return np.all(check)
class DimensionList(AbstractList):
def check_object(self, obj):
if isinstance(obj, dict):
obj = Dimension(**obj)
if not isinstance(obj, __dimensions_list__):
name = obj.__class__.__name__
raise ValueError(f"Expecting a Dimension object, found {name}")
return obj
class DependentVariableList(AbstractList):
def check_object(self, obj):
if isinstance(obj, dict):
obj = DependentVariable(**obj)
if not isinstance(obj, DependentVariable):
name = obj.__class__.name__
raise ValueError(f"Expecting a DependentVariable object, found {name}")
return obj
| [
"numpy.all"
] | [((2073, 2086), 'numpy.all', 'np.all', (['check'], {}), '(check)\n', (2079, 2086), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from pyrecorder.recorders.file import File
from pyrecorder.video import Video
from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm
from pymoo.algorithms.nsga2 import RankAndCrowdingSurvival
from pymoo.algorithms.so_genetic_algorithm import GA
from pymoo.docs import parse_doc_string
from pymoo.model.mating import Mating
from pymoo.model.population import Population
from pymoo.model.survival import Survival
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.operators.sampling.random_sampling import FloatRandomSampling
from pymoo.operators.selection.random_selection import RandomSelection
from pymoo.operators.selection.tournament_selection import TournamentSelection, compare
from pymoo.optimize import minimize
from pymoo.problems.single.multimodal import MultiModalSimple1, curve, MultiModalSimple2
from pymoo.util.display import SingleObjectiveDisplay
from pymoo.util.misc import vectorized_cdist, norm_eucl_dist
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from pymoo.util.normalization import normalize
from pymoo.util.termination.default import SingleObjectiveDefaultTermination
from pymoo.visualization.scatter import Scatter
# =========================================================================================================
# Implementation
# =========================================================================================================
def comp_by_rank(pop, P, **kwargs):
S = np.full(P.shape[0], np.nan)
for i in range(P.shape[0]):
a, b = P[i, 0], P[i, 1]
S[i] = compare(a, pop[a].get("rank"), b, pop[b].get("rank"), method='smaller_is_better',
return_random_if_equal=True)
return S[:, None].astype(np.int)
class MMGA(GeneticAlgorithm):
def __init__(self,
pop_size=100,
sampling=FloatRandomSampling(),
selection=RandomSelection(),
crossover=SimulatedBinaryCrossover(prob=0.9, eta=3),
mutation=PolynomialMutation(prob=None, eta=5),
eliminate_duplicates=True,
n_offsprings=None,
display=SingleObjectiveDisplay(),
**kwargs):
"""
Parameters
----------
pop_size : {pop_size}
sampling : {sampling}
selection : {selection}
crossover : {crossover}
mutation : {mutation}
eliminate_duplicates : {eliminate_duplicates}
n_offsprings : {n_offsprings}
"""
super().__init__(pop_size=pop_size,
sampling=sampling,
selection=selection,
crossover=crossover,
mutation=mutation,
survival=NichingSurvival(),
eliminate_duplicates=eliminate_duplicates,
n_offsprings=n_offsprings,
display=display,
**kwargs)
# self.mating = NeighborBiasedMating(selection,
# crossover,
# mutation,
# repair=self.mating.repair,
# eliminate_duplicates=self.mating.eliminate_duplicates,
# n_max_iterations=self.mating.n_max_iterations)
self.default_termination = SingleObjectiveDefaultTermination()
class NichingSurvival(Survival):
def __init__(self) -> None:
super().__init__(True)
def _do(self, problem, pop, n_survive, out=None, algorithm=None, **kwargs):
X, F = pop.get("X", "F")
if F.shape[1] != 1:
raise ValueError("FitnessSurvival can only used for single objective single!")
n_neighbors = 5
# calculate the normalized euclidean distances from each solution to another
D = norm_eucl_dist(problem, X, X, fill_diag_with_inf=True)
# set the neighborhood for each individual
for k, individual in enumerate(pop):
# the neighbors in the current population
neighbors = pop[D[k].argsort()[:n_neighbors]]
# get the neighbors of the current individual and merge
N = individual.get("neighbors")
if N is not None:
rec = []
h = set()
for n in N:
for entry in n.get("neighbors"):
if entry not in h:
rec.append(entry)
h.add(entry)
neighbors = Population.merge(neighbors, rec)
# keep only the closest solutions to the individual
_D = norm_eucl_dist(problem, individual.X[None, :], neighbors.get("X"))[0]
# find only the closest neighbors
closest = _D.argsort()[:n_neighbors]
individual.set("crowding", _D[closest].mean())
individual.set("neighbors", neighbors[closest])
best = F[:, 0].argmin()
print(F[best], pop[best].get("crowding"))
# plt.scatter(F[:, 0], pop.get("crowding"))
# plt.show()
pop.set("_F", pop.get("F"))
pop.set("F", np.column_stack([F, -pop.get("crowding")]))
pop = RankAndCrowdingSurvival().do(problem, pop, n_survive)
pop.set("F", pop.get("_F"))
return pop
class NeighborBiasedMating(Mating):
def __init__(self, selection, crossover, mutation, bias=0.7, **kwargs):
super().__init__(selection, crossover, mutation, **kwargs)
self.bias = bias
def _do(self, problem, pop, n_offsprings, parents=None, **kwargs):
rnd = np.random.random(n_offsprings)
n_neighbors = (rnd <= self.bias).sum()
other = super()._do(problem, pop, n_offsprings - n_neighbors, parents, **kwargs)
N = []
cand = TournamentSelection(comp_by_rank).do(pop, n_neighbors, n_parents=1)[:, 0]
for k in cand:
N.append(pop[k])
n_cand_neighbors = pop[k].get("neighbors")
rnd = np.random.permutation(len(n_cand_neighbors))[:self.crossover.n_parents - 1]
[N.append(e) for e in n_cand_neighbors[rnd]]
parents = np.reshape(np.arange(len(N)), (-1, self.crossover.n_parents))
N = Population.create(*N)
bias = super()._do(problem, N, n_neighbors, parents, **kwargs)
return Population.merge(bias, other)
parse_doc_string(MMGA.__init__)
if __name__ == '__main__':
problem = MultiModalSimple2()
algorithm = MMGA(
pop_size=20,
eliminate_duplicates=True)
ret = minimize(problem,
algorithm,
termination=('n_gen', 100),
seed=1,
save_history=True,
verbose=False)
def plot(algorithm):
pop = algorithm.pop
sc = Scatter(title=algorithm.n_gen)
sc.add(curve(algorithm.problem), plot_type="line", color="black")
sc.add(np.column_stack([pop.get("X"), pop.get("F")]), color="red")
sc.do()
plot(ret.algorithm)
plt.show()
with Video(File("mm.mp4")) as vid:
for entry in ret.history:
plot(entry)
vid.record()
| [
"pyrecorder.recorders.file.File",
"pymoo.model.population.Population.merge",
"pymoo.optimize.minimize",
"pymoo.util.termination.default.SingleObjectiveDefaultTermination",
"numpy.full",
"pymoo.visualization.scatter.Scatter",
"pymoo.docs.parse_doc_string",
"pymoo.operators.crossover.simulated_binary_cr... | [((6676, 6707), 'pymoo.docs.parse_doc_string', 'parse_doc_string', (['MMGA.__init__'], {}), '(MMGA.__init__)\n', (6692, 6707), False, 'from pymoo.docs import parse_doc_string\n'), ((1632, 1659), 'numpy.full', 'np.full', (['P.shape[0]', 'np.nan'], {}), '(P.shape[0], np.nan)\n', (1639, 1659), True, 'import numpy as np\n'), ((6750, 6769), 'pymoo.problems.single.multimodal.MultiModalSimple2', 'MultiModalSimple2', ([], {}), '()\n', (6767, 6769), False, 'from pymoo.problems.single.multimodal import MultiModalSimple1, curve, MultiModalSimple2\n'), ((6860, 6962), 'pymoo.optimize.minimize', 'minimize', (['problem', 'algorithm'], {'termination': "('n_gen', 100)", 'seed': '(1)', 'save_history': '(True)', 'verbose': '(False)'}), "(problem, algorithm, termination=('n_gen', 100), seed=1,\n save_history=True, verbose=False)\n", (6868, 6962), False, 'from pymoo.optimize import minimize\n'), ((7348, 7358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7356, 7358), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2046), 'pymoo.operators.sampling.random_sampling.FloatRandomSampling', 'FloatRandomSampling', ([], {}), '()\n', (2044, 2046), False, 'from pymoo.operators.sampling.random_sampling import FloatRandomSampling\n'), ((2075, 2092), 'pymoo.operators.selection.random_selection.RandomSelection', 'RandomSelection', ([], {}), '()\n', (2090, 2092), False, 'from pymoo.operators.selection.random_selection import RandomSelection\n'), ((2121, 2162), 'pymoo.operators.crossover.simulated_binary_crossover.SimulatedBinaryCrossover', 'SimulatedBinaryCrossover', ([], {'prob': '(0.9)', 'eta': '(3)'}), '(prob=0.9, eta=3)\n', (2145, 2162), False, 'from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover\n'), ((2190, 2226), 'pymoo.operators.mutation.polynomial_mutation.PolynomialMutation', 'PolynomialMutation', ([], {'prob': 'None', 'eta': '(5)'}), '(prob=None, eta=5)\n', (2208, 2226), False, 'from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation\n'), ((2333, 2357), 'pymoo.util.display.SingleObjectiveDisplay', 'SingleObjectiveDisplay', ([], {}), '()\n', (2355, 2357), False, 'from pymoo.util.display import SingleObjectiveDisplay\n'), ((3640, 3675), 'pymoo.util.termination.default.SingleObjectiveDefaultTermination', 'SingleObjectiveDefaultTermination', ([], {}), '()\n', (3673, 3675), False, 'from pymoo.util.termination.default import SingleObjectiveDefaultTermination\n'), ((4131, 4185), 'pymoo.util.misc.norm_eucl_dist', 'norm_eucl_dist', (['problem', 'X', 'X'], {'fill_diag_with_inf': '(True)'}), '(problem, X, X, fill_diag_with_inf=True)\n', (4145, 4185), False, 'from pymoo.util.misc import vectorized_cdist, norm_eucl_dist\n'), ((5908, 5938), 'numpy.random.random', 'np.random.random', (['n_offsprings'], {}), '(n_offsprings)\n', (5924, 5938), True, 'import numpy as np\n'), ((6534, 6555), 'pymoo.model.population.Population.create', 'Population.create', (['*N'], {}), '(*N)\n', (6551, 6555), False, 'from pymoo.model.population import Population\n'), ((6644, 6673), 'pymoo.model.population.Population.merge', 'Population.merge', (['bias', 'other'], {}), '(bias, other)\n', (6660, 6673), False, 'from pymoo.model.population import Population\n'), ((7122, 7152), 'pymoo.visualization.scatter.Scatter', 'Scatter', ([], {'title': 'algorithm.n_gen'}), '(title=algorithm.n_gen)\n', (7129, 7152), False, 'from pymoo.visualization.scatter import Scatter\n'), ((7168, 7192), 'pymoo.problems.single.multimodal.curve', 'curve', (['algorithm.problem'], {}), '(algorithm.problem)\n', (7173, 7192), False, 'from pymoo.problems.single.multimodal import MultiModalSimple1, curve, MultiModalSimple2\n'), ((7375, 7389), 'pyrecorder.recorders.file.File', 'File', (['"""mm.mp4"""'], {}), "('mm.mp4')\n", (7379, 7389), False, 'from pyrecorder.recorders.file import File\n'), ((4830, 4862), 'pymoo.model.population.Population.merge', 'Population.merge', (['neighbors', 'rec'], {}), '(neighbors, rec)\n', (4846, 4862), False, 'from pymoo.model.population import Population\n'), ((5505, 5530), 'pymoo.algorithms.nsga2.RankAndCrowdingSurvival', 'RankAndCrowdingSurvival', ([], {}), '()\n', (5528, 5530), False, 'from pymoo.algorithms.nsga2 import RankAndCrowdingSurvival\n'), ((6108, 6141), 'pymoo.operators.selection.tournament_selection.TournamentSelection', 'TournamentSelection', (['comp_by_rank'], {}), '(comp_by_rank)\n', (6127, 6141), False, 'from pymoo.operators.selection.tournament_selection import TournamentSelection, compare\n')] |
import numpy as np
import pandas as pd
#Load data from csv
root_square_cases = pd.read_csv('Test_root_square.csv')
#Convert to numpy array
root_square_cases = np.array(root_square_cases) | [
"pandas.read_csv",
"numpy.array"
] | [((80, 115), 'pandas.read_csv', 'pd.read_csv', (['"""Test_root_square.csv"""'], {}), "('Test_root_square.csv')\n", (91, 115), True, 'import pandas as pd\n'), ((160, 187), 'numpy.array', 'np.array', (['root_square_cases'], {}), '(root_square_cases)\n', (168, 187), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 5 11:57:04 2021
Parses the statistics from main_moabb_pipeline.py
"""
import regex
import numpy as np
integers = regex.compile('^([0-9]*)'+ '\s*' +'([0-9]*)$')
doubles = regex.compile('^([0-9]*\.*[0-9]*)'+ '\s*' +'([0-9]*)$')
with open('./path/to/file') as f:
lines = f.readlines()
first = np.zeros(len(lines))
second = np.zeros(len(lines))
for i in range(len(lines)):
first[i] = float(doubles.match(lines[i]).group(1))
second[i] = float(doubles.match(lines[i]).group(2))
print(first.shape)
print(second.shape)
avg = np.sum(first)/np.sum(second)*100
print("The percentage of non-spd matrices is {}".format(np.round(avg, 2)))
| [
"regex.compile",
"numpy.round",
"numpy.sum"
] | [((172, 221), 'regex.compile', 'regex.compile', (["('^([0-9]*)' + '\\\\s*' + '([0-9]*)$')"], {}), "('^([0-9]*)' + '\\\\s*' + '([0-9]*)$')\n", (185, 221), False, 'import regex\n'), ((230, 289), 'regex.compile', 'regex.compile', (["('^([0-9]*\\\\.*[0-9]*)' + '\\\\s*' + '([0-9]*)$')"], {}), "('^([0-9]*\\\\.*[0-9]*)' + '\\\\s*' + '([0-9]*)$')\n", (243, 289), False, 'import regex\n'), ((609, 622), 'numpy.sum', 'np.sum', (['first'], {}), '(first)\n', (615, 622), True, 'import numpy as np\n'), ((623, 637), 'numpy.sum', 'np.sum', (['second'], {}), '(second)\n', (629, 637), True, 'import numpy as np\n'), ((699, 715), 'numpy.round', 'np.round', (['avg', '(2)'], {}), '(avg, 2)\n', (707, 715), True, 'import numpy as np\n')] |
from PIL import Image, ImageDraw
from facenet_pytorch import MTCNN, InceptionResnetV1
import numpy as np
import os
import time
import torch
TARGET_DIR = 'imgs/'
files = [f for f in os.listdir(TARGET_DIR)]
npz = np.load('all2.npz')
#global known_face_encodings, known_face_names, sids
known_face_encodings = npz['encode']
known_face_names = npz['names']
sids = npz['sids']
workers = 0 if os.name == 'nt' else 4
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device=device
)
resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
def _load_image_file(file, mode='RGB'):
"""
Loads an image file (.jpg, .png, etc) into a numpy array
:param file: image file name or file object to load
:param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported.
:return: image contents as numpy array
"""
im = Image.open(file)
if mode:
im = im.convert(mode)
return np.array(im)
def _get_all_encoding():
global known_face_encodings, known_face_names, sids
if os.path.exists('new_data2.npz'):
new_npz = np.load('new_data2.npz')
new_encodings = new_npz['encode']
new_names = new_npz['names']
new_sids = new_npz['sids']
num = known_face_encodings.shape[0]
new_num = new_encodings.shape[0]
flag = 0
for j in range(new_num):
for i in range(num):
if str(sids[i])==new_sids[j]:
#known_face_encodings[i] = new_encodings[:,np.newaxis].T
known_face_encodings[i] = new_encodings[j]
known_face_names[i] = new_names[0]
sids[i] = new_sids[0]
flag = 1
break
if flag==0:
#known_face_encodings = np.vstack((known_face_encodings, new_encodings[:,np.newaxis].T))
known_face_encodings = np.vstack((known_face_encodings, new_encodings))
sids = np.hstack((sids, new_sids))
known_face_names = np.hstack((known_face_names, new_names))
os.remove('new_data2.npz')
print(known_face_names)
np.savez('all2.npz', encode=known_face_encodings, sids=sids, names=known_face_names)
return known_face_encodings, sids, known_face_names
def _face_distance(known_face_encoding_list, face_encoding_to_check):
"""
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
for each comparison face. The distance tells you how similar the faces are.
:param faces: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
"""
if len(known_face_encoding_list) == 0:
return np.empty((0))
# return (face_encodings - face_to_compare).norm().item()
# encodes_ = known_face_encoding_list - face_encoding_to_check[:, np.newaxis]
return np.linalg.norm(known_face_encoding_list - face_encoding_to_check, axis=1)
def _compare_faces(known_face_encoding_list, face_encoding_to_check, tolerance=0.8):
"""
Compare a list of face encodings against a candidate encoding to see if they match.
:param known_face_encodings: A list of known face encodings
:param face_encoding_to_check: A single face encoding to compare against the list
:param tolerance: How much distance between faces to consider it a match. Lower is more strict. 0.6 is typical best performance.
:return: A list of True/False values indicating which known_face_encodings match the face encoding to check
"""
return list(_face_distance(known_face_encoding_list, face_encoding_to_check) <= tolerance)
def recognition_name(img="lwx.jpg"):
start = time.time()
known_face_encodings, _, known_face_names = _get_all_encoding()
unknown_image = _load_image_file(img)
unknown_face_encodings = face_encoding(img)
pil_image = Image.fromarray(unknown_image)
name = "Unknown"
draw = ImageDraw.Draw(pil_image)
matches = _compare_faces(known_face_encodings, unknown_face_encodings, tolerance=0.8)
face_distances = _face_distance(known_face_encodings, unknown_face_encodings)
print('face_distances: ', face_distances)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
print('name:',name)
draw.text((100, 120), str(name), fill=(255, 255, 255, 255))
end = time.time()
print('耗时:', end-start)
t_d = 'static/assets/img'
up_path = os.path.join(t_d, 'aaa.jpg')
pil_image.save(up_path, 'jpeg')
return up_path
def _face_encodings(obj_img):
x_aligned, prob = mtcnn(obj_img, return_prob=True)
if x_aligned is None:
return []
aligned = torch.stack([x_aligned]).to(device)
face_encodings = resnet(aligned).detach().cpu()
return face_encodings[0].numpy()
def face_encoding(img):
obj_img = _load_image_file(img)
obj_face_encoding = _face_encodings(obj_img)
return obj_face_encoding
def identification_face(img="lwx.jpg"):
known_face_encodings, _, known_face_names = _get_all_encoding()
start = time.time()
face_encodings = face_encoding(img)
name = "Unknown"
matches = _compare_faces(known_face_encodings, face_encodings, tolerance=0.8)
face_distances = _face_distance(known_face_encodings, face_encodings)
print('face_distances: ', face_distances)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
end = time.time()
print('耗时:', end-start)
return name
recognition_name(img='imgs/liuwenxiu.jpg')
| [
"numpy.load",
"os.remove",
"numpy.empty",
"numpy.argmin",
"numpy.linalg.norm",
"os.path.join",
"os.path.exists",
"facenet_pytorch.MTCNN",
"PIL.ImageDraw.Draw",
"numpy.hstack",
"torch.cuda.is_available",
"facenet_pytorch.InceptionResnetV1",
"numpy.savez",
"os.listdir",
"numpy.vstack",
"... | [((213, 232), 'numpy.load', 'np.load', (['"""all2.npz"""'], {}), "('all2.npz')\n", (220, 232), True, 'import numpy as np\n'), ((540, 670), 'facenet_pytorch.MTCNN', 'MTCNN', ([], {'image_size': '(160)', 'margin': '(0)', 'min_face_size': '(20)', 'thresholds': '[0.6, 0.7, 0.7]', 'factor': '(0.709)', 'post_process': '(True)', 'device': 'device'}), '(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7\n ], factor=0.709, post_process=True, device=device)\n', (545, 670), False, 'from facenet_pytorch import MTCNN, InceptionResnetV1\n'), ((1100, 1116), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (1110, 1116), False, 'from PIL import Image, ImageDraw\n'), ((1171, 1183), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1179, 1183), True, 'import numpy as np\n'), ((1274, 1305), 'os.path.exists', 'os.path.exists', (['"""new_data2.npz"""'], {}), "('new_data2.npz')\n", (1288, 1305), False, 'import os\n'), ((2361, 2450), 'numpy.savez', 'np.savez', (['"""all2.npz"""'], {'encode': 'known_face_encodings', 'sids': 'sids', 'names': 'known_face_names'}), "('all2.npz', encode=known_face_encodings, sids=sids, names=\n known_face_names)\n", (2369, 2450), True, 'import numpy as np\n'), ((3215, 3288), 'numpy.linalg.norm', 'np.linalg.norm', (['(known_face_encoding_list - face_encoding_to_check)'], {'axis': '(1)'}), '(known_face_encoding_list - face_encoding_to_check, axis=1)\n', (3229, 3288), True, 'import numpy as np\n'), ((4021, 4032), 'time.time', 'time.time', ([], {}), '()\n', (4030, 4032), False, 'import time\n'), ((4208, 4238), 'PIL.Image.fromarray', 'Image.fromarray', (['unknown_image'], {}), '(unknown_image)\n', (4223, 4238), False, 'from PIL import Image, ImageDraw\n'), ((4271, 4296), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['pil_image'], {}), '(pil_image)\n', (4285, 4296), False, 'from PIL import Image, ImageDraw\n'), ((4538, 4563), 'numpy.argmin', 'np.argmin', (['face_distances'], {}), '(face_distances)\n', (4547, 4563), True, 'import numpy as np\n'), ((4747, 4758), 'time.time', 'time.time', ([], {}), '()\n', (4756, 4758), False, 'import time\n'), ((4831, 4859), 'os.path.join', 'os.path.join', (['t_d', '"""aaa.jpg"""'], {}), "(t_d, 'aaa.jpg')\n", (4843, 4859), False, 'import os\n'), ((5447, 5458), 'time.time', 'time.time', ([], {}), '()\n', (5456, 5458), False, 'import time\n'), ((5745, 5770), 'numpy.argmin', 'np.argmin', (['face_distances'], {}), '(face_distances)\n', (5754, 5770), True, 'import numpy as np\n'), ((5866, 5877), 'time.time', 'time.time', ([], {}), '()\n', (5875, 5877), False, 'import time\n'), ((183, 205), 'os.listdir', 'os.listdir', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (193, 205), False, 'import os\n'), ((447, 472), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (470, 472), False, 'import torch\n'), ((1325, 1349), 'numpy.load', 'np.load', (['"""new_data2.npz"""'], {}), "('new_data2.npz')\n", (1332, 1349), True, 'import numpy as np\n'), ((2302, 2328), 'os.remove', 'os.remove', (['"""new_data2.npz"""'], {}), "('new_data2.npz')\n", (2311, 2328), False, 'import os\n'), ((3046, 3057), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3054, 3057), True, 'import numpy as np\n'), ((2126, 2174), 'numpy.vstack', 'np.vstack', (['(known_face_encodings, new_encodings)'], {}), '((known_face_encodings, new_encodings))\n', (2135, 2174), True, 'import numpy as np\n'), ((2194, 2221), 'numpy.hstack', 'np.hstack', (['(sids, new_sids)'], {}), '((sids, new_sids))\n', (2203, 2221), True, 'import numpy as np\n'), ((2253, 2293), 'numpy.hstack', 'np.hstack', (['(known_face_names, new_names)'], {}), '((known_face_names, new_names))\n', (2262, 2293), True, 'import numpy as np\n'), ((5060, 5084), 'torch.stack', 'torch.stack', (['[x_aligned]'], {}), '([x_aligned])\n', (5071, 5084), False, 'import torch\n'), ((689, 729), 'facenet_pytorch.InceptionResnetV1', 'InceptionResnetV1', ([], {'pretrained': '"""vggface2"""'}), "(pretrained='vggface2')\n", (706, 729), False, 'from facenet_pytorch import MTCNN, InceptionResnetV1\n')] |
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D
from keras.layers.core import Flatten, Dense, Dropout
from keras.applications.mobilenet import preprocess_input, decode_predictions
from keras import backend as K
from keras.utils.conv_utils import convert_kernel
import tensorflow as tf
import numpy as np
# Model definition
def get_model(first_layer):
# from keras import applications
# model = applications.VGG16(include_top=False, weights='imagenet')
model = Sequential()
model.add(first_layer)
model.add(Conv2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
# model.summary()
return model
def load_model_weights(model, weights_path):
print('\nLoading model.')
# Load pre-trained model
model.load_weights(weights_path, by_name=True)
# Theano to Tensoflow - depends on the version
ops = []
for layer in model.layers:
if layer.__class__.__name__ in ['Conv2D']: # Layers with pre-trained weights
original_w = K.get_value(layer.kernel)
converted_w = convert_kernel(original_w)
ops.append(tf.assign(layer.kernel, converted_w).op)
K.get_session().run(ops)
# Prev code
# f = h5py.File(weights_path)
# for k in range(f.attrs['nb_layers']):
# if k >= len(model.layers):
# # we don't look at the last (fully-connected) layers in the savefile
# break
# g = f['layer_{}'.format(k)]
# weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
# model.layers[k].set_weights(weights)
# f.close()
# model.save_weights(weights_path)
print('\nModel loaded.')
return model
# Return output of specified layer
def get_output_layer(model, layer_name):
layer_dict = dict([(layer.name, layer) for layer in model.layers])
layer = layer_dict[layer_name]
return layer.output
# Load trained model - for occlusion experiment
def load_trained_model(weights_path):
# first_layer = ZeroPadding2D((1, 1), input_shape=(img_width, img_height, 3))
# model = get_model(first_layer) # must have FC and output layer for class prediction
# model.load_weights(weights_path, by_name=True)
from keras.applications.mobilenet import MobileNet
model = MobileNet(weights='imagenet')
return model
# Predict probabilities for given test image using trained model
def pred_prob_list(model, test_image):
test_image = np.expand_dims(test_image, axis=0)
test_image = preprocess_input(test_image)
predictions = model.predict(test_image)
return predictions
| [
"keras.utils.conv_utils.convert_kernel",
"keras.backend.get_session",
"keras.layers.MaxPool2D",
"keras.applications.mobilenet.preprocess_input",
"keras.applications.mobilenet.MobileNet",
"numpy.expand_dims",
"keras.backend.get_value",
"tensorflow.assign",
"keras.layers.Conv2D",
"keras.layers.ZeroP... | [((521, 533), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (531, 533), False, 'from keras.models import Sequential\n'), ((3809, 3838), 'keras.applications.mobilenet.MobileNet', 'MobileNet', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (3818, 3838), False, 'from keras.applications.mobilenet import MobileNet\n'), ((3980, 4014), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (3994, 4014), True, 'import numpy as np\n'), ((4032, 4060), 'keras.applications.mobilenet.preprocess_input', 'preprocess_input', (['test_image'], {}), '(test_image)\n', (4048, 4060), False, 'from keras.applications.mobilenet import preprocess_input, decode_predictions\n'), ((576, 627), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv1_1"""'}), "(64, 3, 3, activation='relu', name='conv1_1')\n", (582, 627), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((643, 664), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (656, 664), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((680, 731), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv1_2"""'}), "(64, 3, 3, activation='relu', name='conv1_2')\n", (686, 731), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((747, 780), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (756, 780), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((797, 818), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (810, 818), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((834, 886), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv2_1"""'}), "(128, 3, 3, activation='relu', name='conv2_1')\n", (840, 886), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((902, 923), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (915, 923), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((939, 991), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv2_2"""'}), "(128, 3, 3, activation='relu', name='conv2_2')\n", (945, 991), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1007, 1040), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (1016, 1040), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1057, 1078), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1070, 1078), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1094, 1146), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv3_1"""'}), "(256, 3, 3, activation='relu', name='conv3_1')\n", (1100, 1146), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1162, 1183), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1175, 1183), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1199, 1251), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv3_2"""'}), "(256, 3, 3, activation='relu', name='conv3_2')\n", (1205, 1251), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1267, 1288), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1280, 1288), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1304, 1356), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv3_3"""'}), "(256, 3, 3, activation='relu', name='conv3_3')\n", (1310, 1356), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1372, 1405), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (1381, 1405), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1422, 1443), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1435, 1443), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1459, 1511), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv4_1"""'}), "(512, 3, 3, activation='relu', name='conv4_1')\n", (1465, 1511), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1527, 1548), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1540, 1548), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1564, 1616), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv4_2"""'}), "(512, 3, 3, activation='relu', name='conv4_2')\n", (1570, 1616), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1632, 1653), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1645, 1653), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1669, 1721), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv4_3"""'}), "(512, 3, 3, activation='relu', name='conv4_3')\n", (1675, 1721), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1737, 1770), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (1746, 1770), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1787, 1808), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1800, 1808), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1824, 1876), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv5_1"""'}), "(512, 3, 3, activation='relu', name='conv5_1')\n", (1830, 1876), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1892, 1913), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1905, 1913), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1929, 1981), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv5_2"""'}), "(512, 3, 3, activation='relu', name='conv5_2')\n", (1935, 1981), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1997, 2018), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2010, 2018), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((2034, 2086), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""conv5_3"""'}), "(512, 3, 3, activation='relu', name='conv5_3')\n", (2040, 2086), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((2102, 2135), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (2111, 2135), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((2543, 2568), 'keras.backend.get_value', 'K.get_value', (['layer.kernel'], {}), '(layer.kernel)\n', (2554, 2568), True, 'from keras import backend as K\n'), ((2595, 2621), 'keras.utils.conv_utils.convert_kernel', 'convert_kernel', (['original_w'], {}), '(original_w)\n', (2609, 2621), False, 'from keras.utils.conv_utils import convert_kernel\n'), ((2690, 2705), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (2703, 2705), True, 'from keras import backend as K\n'), ((2645, 2681), 'tensorflow.assign', 'tf.assign', (['layer.kernel', 'converted_w'], {}), '(layer.kernel, converted_w)\n', (2654, 2681), True, 'import tensorflow as tf\n')] |
"""
Implement L2 regularization of a fully connected neural network.
"""
import matplotlib.pyplot as plt
import numpy as np
from coding_neural_network_from_scratch import (initialize_parameters,
L_model_forward,
compute_cost,
relu_gradient,
sigmoid_gradient,
tanh_gradient,
update_parameters,
accuracy)
from gradient_checking import dictionary_to_vector
def compute_cost_reg(AL, y, parameters, lambd=0):
"""
Computes the Cross-Entropy cost function with L2 regularization.
Arguments
---------
AL : 2d-array
probability vector of shape 1 x training_examples.
y : 2d-array
true "label" vector.
parameters : dict
contains all the weight matrices and bias vectors for all layers.
lambd : float
regularization hyperparameter.
Returns
-------
cost : float
binary cross-entropy cost.
"""
# number of examples
m = y.shape[1]
# compute traditional cross entropy cost
cross_entropy_cost = compute_cost(AL, y)
# convert parameters dictionary to vector
parameters_vector = dictionary_to_vector(parameters)
# compute the regularization penalty
L2_regularization_penalty = (
lambd / (2 * m)) * np.sum(np.square(parameters_vector))
# compute the total cost
cost = cross_entropy_cost + L2_regularization_penalty
return cost
def linear_backword_reg(dZ, cache, lambd=0):
"""
Computes the gradient of the output w.r.t weight, bias, & post-activation
output of (l - 1) layers at layer l.
Arguments
---------
dZ : 2d-array
gradient of the cost w.r.t. the linear output (of current layer l).
cache : tuple
values of (A_prev, W, b) coming from the forward propagation in the
current layer.
lambd : float
regularization hyperparameter.
Returns
-------
dA_prev : 2d-array
gradient of the cost w.r.t. the activation (of the previous layer l-1).
dW : 2d-array
gradient of the cost w.r.t. W (current layer l).
db : 2d-array
gradient of the cost w.r.t. b (current layer l).
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1 / m) * np.dot(dZ, A_prev.T) + (lambd / m) * W
db = (1 / m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward_reg(dA, cache, activation_fn="relu", lambd=0):
"""
Arguments
---------
dA : 2d-array
post-activation gradient for current layer l.
cache : tuple
values of (linear_cache, activation_cache).
activation : str
activation used in this layer: "sigmoid", "tanh", or "relu".
lambd : float
regularization hyperparameter.
Returns
-------
dA_prev : 2d-array
gradient of the cost w.r.t. the activation (of previous layer l-1),
same shape as A_prev.
dW : 2d-array
gradient of the cost w.r.t. W (current layer l), same shape as W.
db : 2d-array
gradient of the cost w.r.t. b (current layer l), same shape as b.
"""
linear_cache, activation_cache = cache
if activation_fn == "sigmoid":
dZ = sigmoid_gradient(dA, activation_cache)
dA_prev, dW, db = linear_backword_reg(dZ, linear_cache, lambd)
elif activation_fn == "tanh":
dZ = tanh_gradient(dA, activation_cache)
dA_prev, dW, db = linear_backword_reg(dZ, linear_cache, lambd)
elif activation_fn == "relu":
dZ = relu_gradient(dA, activation_cache)
dA_prev, dW, db = linear_backword_reg(dZ, linear_cache, lambd)
return dA_prev, dW, db
def L_model_backward_reg(AL, y, caches, hidden_layers_activation_fn="relu",
lambd=0):
"""
Computes the gradient of output layer w.r.t weights, biases, etc. starting
on the output layer in reverse topological order.
Arguments
---------
AL : 2d-array
probability vector, output of the forward propagation
(L_model_forward()).
y : 2d-array
true "label" vector (containing 0 if non-cat, 1 if cat).
caches : list
list of caches for all layers.
hidden_layers_activation_fn :
activation function used on hidden layers: "tanh", "relu".
lambd : float
regularization hyperparameter.
Returns
-------
grads : dict
gradients.
"""
y = y.reshape(AL.shape)
L = len(caches)
grads = {}
dAL = np.divide(AL - y, np.multiply(AL, 1 - AL))
grads["dA" + str(L - 1)], grads["dW" + str(L)], grads["db" + str(L)] =\
linear_activation_backward_reg(dAL, caches[L - 1], "sigmoid", lambd)
for l in range(L - 1, 0, -1):
current_cache = caches[l - 1]
grads["dA" + str(l - 1)], grads["dW" + str(l)], grads["db" + str(l)] =\
linear_activation_backward_reg(
grads["dA" + str(l)], current_cache,
hidden_layers_activation_fn, lambd)
return grads
def model_with_regularization(
X, y, layers_dims, learning_rate=0.01, num_epochs=3000,
print_cost=False, hidden_layers_activation_fn="relu", lambd=0):
"""
Implements L-Layer neural network.
Arguments
---------
X : 2d-array
data, shape: number of examples x num_px * num_px * 3.
y : 2d-array
true "label" vector, shape: 1 x number of examples.
layers_dims : list
input size and size of each layer, length: number of layers + 1.
learning_rate : float
learning rate of the gradient descent update rule.
num_epochs : int
number of times to over the training data.
print_cost : bool
if True, it prints the cost every 100 steps.
hidden_layers_activation_fn : str
activation function to be used on hidden layers: "tanh", "relu".
lambd : float
regularization hyperparameter.
Returns
-------
parameters : dict
parameters learnt by the model. They can then be used to predict test
examples.
"""
# get number of examples
m = X.shape[1]
# to get consistents output
np.random.seed(1)
# initialize parameters
parameters = initialize_parameters(layers_dims)
# intialize cost list
cost_list = []
# implement gradient descent
for i in range(num_epochs):
# compute forward propagation
AL, caches = L_model_forward(
X, parameters, hidden_layers_activation_fn)
# compute regularized cost
reg_cost = compute_cost_reg(AL, y, parameters, lambd)
# compute gradients
grads = L_model_backward_reg(
AL, y, caches, hidden_layers_activation_fn, lambd)
# update parameters
parameters = update_parameters(parameters, grads, learning_rate)
# print cost
if (i + 1) % 100 == 0 and print_cost:
print("The cost after {} iterations: {}".format(
(i + 1), reg_cost))
# append cost
if i % 100 == 0:
cost_list.append(reg_cost)
# plot the cost curve
plt.plot(cost_list)
plt.xlabel("Iterations (per hundreds)")
plt.ylabel("Cost")
plt.title("Cost curve for the learning rate = {}".format(learning_rate))
return parameters
| [
"coding_neural_network_from_scratch.sigmoid_gradient",
"coding_neural_network_from_scratch.initialize_parameters",
"numpy.random.seed",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.multiply",
"coding_neural_network_from_scratch.relu_gradient",
"numpy.square",
"matplotlib.pyplot.ylabel",
"gradient... | [((1370, 1389), 'coding_neural_network_from_scratch.compute_cost', 'compute_cost', (['AL', 'y'], {}), '(AL, y)\n', (1382, 1389), False, 'from coding_neural_network_from_scratch import initialize_parameters, L_model_forward, compute_cost, relu_gradient, sigmoid_gradient, tanh_gradient, update_parameters, accuracy\n'), ((1464, 1496), 'gradient_checking.dictionary_to_vector', 'dictionary_to_vector', (['parameters'], {}), '(parameters)\n', (1484, 1496), False, 'from gradient_checking import dictionary_to_vector\n'), ((2715, 2730), 'numpy.dot', 'np.dot', (['W.T', 'dZ'], {}), '(W.T, dZ)\n', (2721, 2730), True, 'import numpy as np\n'), ((6773, 6790), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6787, 6790), True, 'import numpy as np\n'), ((6840, 6874), 'coding_neural_network_from_scratch.initialize_parameters', 'initialize_parameters', (['layers_dims'], {}), '(layers_dims)\n', (6861, 6874), False, 'from coding_neural_network_from_scratch import initialize_parameters, L_model_forward, compute_cost, relu_gradient, sigmoid_gradient, tanh_gradient, update_parameters, accuracy\n'), ((7763, 7782), 'matplotlib.pyplot.plot', 'plt.plot', (['cost_list'], {}), '(cost_list)\n', (7771, 7782), True, 'import matplotlib.pyplot as plt\n'), ((7788, 7827), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations (per hundreds)"""'], {}), "('Iterations (per hundreds)')\n", (7798, 7827), True, 'import matplotlib.pyplot as plt\n'), ((7833, 7851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (7843, 7851), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2699), 'numpy.sum', 'np.sum', (['dZ'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ, axis=1, keepdims=True)\n', (2672, 2699), True, 'import numpy as np\n'), ((3747, 3785), 'coding_neural_network_from_scratch.sigmoid_gradient', 'sigmoid_gradient', (['dA', 'activation_cache'], {}), '(dA, activation_cache)\n', (3763, 3785), False, 'from coding_neural_network_from_scratch import initialize_parameters, L_model_forward, compute_cost, relu_gradient, sigmoid_gradient, tanh_gradient, update_parameters, accuracy\n'), ((5088, 5111), 'numpy.multiply', 'np.multiply', (['AL', '(1 - AL)'], {}), '(AL, 1 - AL)\n', (5099, 5111), True, 'import numpy as np\n'), ((7054, 7113), 'coding_neural_network_from_scratch.L_model_forward', 'L_model_forward', (['X', 'parameters', 'hidden_layers_activation_fn'], {}), '(X, parameters, hidden_layers_activation_fn)\n', (7069, 7113), False, 'from coding_neural_network_from_scratch import initialize_parameters, L_model_forward, compute_cost, relu_gradient, sigmoid_gradient, tanh_gradient, update_parameters, accuracy\n'), ((7416, 7467), 'coding_neural_network_from_scratch.update_parameters', 'update_parameters', (['parameters', 'grads', 'learning_rate'], {}), '(parameters, grads, learning_rate)\n', (7433, 7467), False, 'from coding_neural_network_from_scratch import initialize_parameters, L_model_forward, compute_cost, relu_gradient, sigmoid_gradient, tanh_gradient, update_parameters, accuracy\n'), ((1611, 1639), 'numpy.square', 'np.square', (['parameters_vector'], {}), '(parameters_vector)\n', (1620, 1639), True, 'import numpy as np\n'), ((2607, 2627), 'numpy.dot', 'np.dot', (['dZ', 'A_prev.T'], {}), '(dZ, A_prev.T)\n', (2613, 2627), True, 'import numpy as np\n'), ((3909, 3944), 'coding_neural_network_from_scratch.tanh_gradient', 'tanh_gradient', (['dA', 'activation_cache'], {}), '(dA, activation_cache)\n', (3922, 3944), False, 'from coding_neural_network_from_scratch import initialize_parameters, L_model_forward, compute_cost, relu_gradient, sigmoid_gradient, tanh_gradient, update_parameters, accuracy\n'), ((4068, 4103), 'coding_neural_network_from_scratch.relu_gradient', 'relu_gradient', (['dA', 'activation_cache'], {}), '(dA, activation_cache)\n', (4081, 4103), False, 'from coding_neural_network_from_scratch import initialize_parameters, L_model_forward, compute_cost, relu_gradient, sigmoid_gradient, tanh_gradient, update_parameters, accuracy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.